text stringlengths 1 1.05M |
|---|
function where(){ COUNT=0; while [ `where_arg $1~$COUNT | wc -w` == 0 ]; do let COUNT=COUNT+1; done; echo "$1 is ahead of "; where_arg $1~$COUNT; echo "by $COUNT commits";};function where_arg(){ git log $@ --decorate -1 | head -n1 | cut -d ' ' -f3- ;}
|
#!/bin/bash
if [[ $1 == "tilux" ]]; then
source ./tools/catch
python3 -c "from tools.logos import Logo; Logo('Openssl Decryption');"
fi
sleep 1
read -p "What file do you want to decrypt? " f
if [[ $1 == "tilux" ]]; then catch_empty $f; fi
read -p "What is the filename you wish to have after decryption? " fl
if [[ $1 == "tilux" ]]; then catch_empty $fl; fi
# TODO:
# Display choices better visually
echo "
Decryption choices:
-aes-128-cbc -aes-256-cbc -aria-128-cfb -aria-128-ofb -aria-192-cfb8
-aria-256-cbc -aria-256-ctr -bf-ofb -camellia-192-ecb -cast5-ofb
-des-ecb -des-ede-ofb -des-ede3-ofb -rc2-cfb -rc4-40
-seed-ecb -sm4-ctr -aes-128-ecb -aes-256-ecb -aria-128-cfb8
-aria-192-cbc -aria-192-ctr -aria-256-cfb -aria-256-ecb -bf-cbc
-camellia-128-cbc -camellia-256-cbc -cast5-cbc -des-ede -des-ede3
-des-ofb -rc2-40-cbc -rc2-ecb -seed-ofb -sm4-ecb
-aes-192-cbc -aria-128-cbc -aria-128-ctr -aria-192-cfb -aria-192-ecb
-aria-256-cfb1 -aria-256-ofb -bf-cfb -camellia-128-ecb -camellia-256-ecb
-cast5-cfb -des-cbc -des-ede-cbc -des-ede3-cbc -rc2-64-cbc
-rc2-ofb -seed-cbc -sm4-cbc -sm4-ofb -aes-192-ecb
-aria-128-cfb -aria-128-ecb -aria-192-cfb1 -aria-192-ofb -aria-256-cfb8
-bf-ecb -camellia-192-cbc -cast5-ecb -des-cfb -des-ede-cfb
-des-ede3-cfb -rc2-cbc -rc4 -seed-cfb -sm4-cfb
"
read -p "What did you use for the encryption? " decr_method
if [[ $1 == "tilux" ]]; then catch_empty $decr_method; fi
echo
openssl enc $decr_method -d -pbkdf2 -in $f -out $fl
|
<reponame>tlemane/bcli<gh_stars>0
#include <bcli/bcli.hpp>
using namespace bc;
int main(int argc, char* argv[])
{
Parser<0> cli("ex04",
"ex04 desc",
"v0.0.1",
"<NAME>");
param_t p = cli.add_param("-p/--param", "param help");
// ok: 04_bcli_deps_bans -p 110 -d 8
// throw: 04_bcli_deps_bans -p 110 -d 20
// ok: 04_bcli_deps_bans -p 125 -d 20
cli.add_param("-d/--dep", "dep help")->depends_on(
check::f::range(10, 100), p, check::f::range(120, 140)
)->def("120");
auto c = [](const std::string& s, const std::string& v) {
return std::make_tuple(v == "15", s + " should not be equal to 15.");
};
// ok: 04_bcli_deps_bans -p 15 -b 9
// throw: 04_bcli_deps_bans -p 15 -b 20
cli.add_param("-b/--ban", "ban help")->banned(
check::f::range(10, 100), p, c
)->def("9");
cli.add_common();
BCLI_PARSE(cli, argc, argv)
return 0;
}
// ex04 v0.0.1
//
// DESCRIPTION
// ex04 desc
//
// USAGE
// ex04 -p/--param <?> [-d/--dep <?>] [-b/--ban <?>] [-h/--help] [-v/--verbose] [-d/--debug]
// [--version]
//
// OPTIONS
// [global] - global parameters
// -p --param - param help
// -d --dep - dep help {120}
// -b --ban - ban help {9}
//
// [common]
// -h --help - Show this message and exit. [⚑]
// -v --verbose - Verbose mode. [⚑]
// -d --debug - Debug mode. [⚑]
// --version - Show version and exit. [⚑] |
package com.aveng.wapp.service.mapper;
import org.mapstruct.Mapper;
import com.aveng.wapp.domain.DiffEntity;
import com.aveng.wapp.service.dto.Diff;
/**
* @author apaydin
*/
@Mapper(componentModel = "spring")
public interface DiffMapper {
DiffEntity map(Diff diff);
Diff map(DiffEntity diffEntity);
}
|
#!/bin/bash
sleep 6s
./doyo-routersvr --id=1 --topic=DoyoRouterSvr --conf=../conf/conf.json --logpath=../log --consulhealthcheckaddr=192.168.68.228:7002
|
import React, {useState, useEffect } from 'react';
import { StyleSheet, Text, View } from 'react-native';
const App = () => {
const [location, setLocation] = useState(null);
useEffect(() => {
const getLocation = async () =>{
let { status } = await Location.requestPermissionsAsync();
if (status !== 'granted') {
setErrorMsg('Permission to access location was denied');
}
let location = await Location.getCurrentPositionAsync({});
setLocation({
latitude:location.coords.latitude,
longitude:location.coords.longitude
});
}
getLocation();
}, []);
if (!location) {
return null;
}
return (
<View style={styles.container}>
<Text>Latitude: {location.latitude}</Text>
<Text>Longtitude: {location.longitude}</Text>
</View>
);
};
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: '#fff',
alignItems: 'center',
justifyContent: 'center',
},
});
export default App; |
#!/bin/bash
# $1: phrase to find
# $2: phrase to replace $1
sed -i 's/\<'$1'\>/'$2'/' $(grep -lwr --include="*.asm" --exclude-dir="crowdmap" --exclude-dir="utils" --exclude-dir=".git" --exclude-dir="animatedgifs" --exclude-dir="patch" $1)
|
import { Morphable } from './morphable'
export class MorphableTransform
implements Morphable<MorphableTransform.Array, MorphableTransform.Array>
{
scaleX: number
scaleY: number
shear: number
rotate: number
translateX: number
translateY: number
originX: number
originY: number
fromArray(arr: MorphableTransform.Array) {
const obj = {
scaleX: arr[0],
scaleY: arr[1],
shear: arr[2],
rotate: arr[3],
translateX: arr[4],
translateY: arr[5],
originX: arr[6],
originY: arr[7],
}
Object.assign(this, MorphableTransform.defaults, obj)
return this
}
toArray(): MorphableTransform.Array {
return [
this.scaleX,
this.scaleY,
this.shear,
this.rotate,
this.translateX,
this.translateY,
this.originX,
this.originY,
]
}
toValue(): MorphableTransform.Array {
return this.toArray()
}
}
export namespace MorphableTransform {
export const defaults = {
scaleX: 1,
scaleY: 1,
shear: 0,
rotate: 0,
translateX: 0,
translateY: 0,
originX: 0,
originY: 0,
}
export type Array = [
number,
number,
number,
number,
number,
number,
number,
number,
]
}
|
#Function to output message to StdErr
function echo_stderr()
{
echo "$@" >&2
}
#Function to display usage message
function usage()
{
echo_stderr "./aadIntegration.sh <wlsUserName> <wlsPassword> <wlsDomainName> <wlsLDAPProviderName> <addsServerHost> <aadsPortNumber> <wlsLDAPPrincipal> <wlsLDAPPrincipalPassword> <wlsLDAPUserBaseDN> <wlsLDAPGroupBaseDN> <oracleHome> <adminVMName> <wlsAdminPort> <wlsLDAPSSLCertificate> <addsPublicIP> <adminPassword> <wlsAdminServerName> <wlsDomainPath> <vmIndex>"
}
function validateInput()
{
if [[ -z "$wlsUserName" || -z "$wlsPassword" ]]
then
echo_stderr "wlsUserName or wlsPassword is required. "
exit 1
fi
if [ -z "$wlsDomainName" ];
then
echo_stderr "wlsDomainName is required. "
fi
if [ -z "$adProviderName" ];
then
echo_stderr "adProviderName is required. "
fi
if [ -z "$adPrincipal" ];
then
echo_stderr "adPrincipal is required. "
fi
if [ -z "$adPassword" ];
then
echo_stderr "adPassword is required. "
fi
if [ -z "$adServerHost" ];
then
echo_stderr "adServerHost is required. "
fi
if [ -z "$adServerPort" ];
then
echo_stderr "adServerPort is required. "
fi
if [ -z "$adGroupBaseDN" ];
then
echo_stderr "adGroupBaseDN is required. "
fi
if [ -z "$adUserBaseDN" ];
then
echo_stderr "adUserBaseDN is required. "
fi
if [ -z "$oracleHome" ];
then
echo_stderr "oracleHome is required. "
fi
if [ -z "$wlsAdminHost" ];
then
echo_stderr "wlsAdminHost is required. "
fi
if [ -z "$wlsAdminPort" ];
then
echo_stderr "wlsAdminPort is required. "
fi
if [ -z "$vituralMachinePassword" ];
then
echo_stderr "vituralMachinePassword is required. "
fi
if [ -z "$wlsADSSLCer" ];
then
echo_stderr "wlsADSSLCer is required. "
fi
if [ -z "$wlsLDAPPublicIP" ];
then
echo_stderr "wlsLDAPPublicIP is required. "
fi
if [ -z "$vituralMachinePassword" ];
then
echo_stderr "vituralMachinePassword is required. "
fi
if [ -z "$wlsAdminServerName" ];
then
echo_stderr "wlsAdminServerName is required. "
fi
if [ -z "$wlsDomainPath" ];
then
echo_stderr "wlsDomainPath is required. "
fi
if [ -z "$vmIndex" ];
then
echo_stderr "vmIndex is required. "
fi
}
function createAADProvider_model()
{
cat <<EOF >${SCRIPT_PATH}/configure-active-directory.py
connect('$wlsUserName','$wlsPassword','t3://$wlsAdminURL')
try:
edit("$wlsAdminServerName")
startEdit()
cd('/')
# Configure DefaultAuthenticator.
cd('/SecurityConfiguration/' + '${wlsDomainName}' + '/Realms/myrealm/AuthenticationProviders/DefaultAuthenticator')
cmo.setControlFlag('SUFFICIENT')
# Configure Active Directory.
cd('/SecurityConfiguration/' + '${wlsDomainName}' + '/Realms/myrealm')
cmo.createAuthenticationProvider('${adProviderName}', 'weblogic.security.providers.authentication.ActiveDirectoryAuthenticator')
cd('/SecurityConfiguration/' + '${wlsDomainName}' + '/Realms/myrealm/AuthenticationProviders/' + '${adProviderName}')
cmo.setControlFlag('OPTIONAL')
cd('/SecurityConfiguration/' + '${wlsDomainName}' + '/Realms/myrealm')
set('AuthenticationProviders',jarray.array([ObjectName('Security:Name=myrealm' + '${adProviderName}'),
ObjectName('Security:Name=myrealmDefaultAuthenticator'),
ObjectName('Security:Name=myrealmDefaultIdentityAsserter')], ObjectName))
cd('/SecurityConfiguration/' + '${wlsDomainName}' + '/Realms/myrealm/AuthenticationProviders/' + '${adProviderName}')
cmo.setControlFlag('SUFFICIENT')
cmo.setUserNameAttribute('${LDAP_USER_NAME}')
cmo.setUserFromNameFilter('${LDAP_USER_FROM_NAME_FILTER}')
cmo.setPrincipal('${adPrincipal}')
cmo.setHost('${adServerHost}')
set('Credential', '${adPassword}')
cmo.setGroupBaseDN('${adGroupBaseDN}')
cmo.setUserBaseDN('${adUserBaseDN}')
cmo.setPort(int('${adServerPort}'))
cmo.setSSLEnabled(true)
# for performance tuning
cmo.setMaxGroupMembershipSearchLevel(1)
cmo.setGroupMembershipSearching('limited')
cmo.setUseTokenGroupsForGroupMembershipLookup(true)
cmo.setResultsTimeLimit(300)
cmo.setConnectionRetryLimit(5)
cmo.setConnectTimeout(120)
cmo.setCacheTTL(300)
cmo.setConnectionPoolSize(60)
cmo.setCacheSize(4000)
cmo.setGroupHierarchyCacheTTL(300)
cmo.setEnableSIDtoGroupLookupCaching(true)
save()
resolve()
activate()
except:
stopEdit('y')
sys.exit(1)
destroyEditSession("$wlsAdminServerName",force = true)
disconnect()
sys.exit(0)
EOF
}
function createSSL_model()
{
cat <<EOF >${SCRIPT_PATH}/configure-ssl.py
# Connect to the AdminServer.
connect('$wlsUserName','$wlsPassword','t3://$wlsAdminURL')
shutdown('$WLS_CLUSTER_NAME', 'Cluster')
print "Ignore host name verification in admin server."
try:
edit('$wlsAdminServerName')
startEdit()
cd('/Servers/${wlsAdminServerName}/SSL/${wlsAdminServerName}')
cmo.setHostnameVerificationIgnored(true)
print "Ignore host name verification in cluster."
cd('/ServerTemplates/${WLS_DYNAMIC_SERVER_TEMPLATE}/SSL/${WLS_DYNAMIC_SERVER_TEMPLATE}')
cmo.setHostnameVerificationIgnored(true)
EOF
. $oracleHome/oracle_common/common/bin/setWlstEnv.sh
${JAVA_HOME}/bin/java -version 2>&1 | grep -e "1[.]8[.][0-9]*_" > /dev/null
javaStatus=$?
if [ "${javaStatus}" == "0" ]; then
cat <<EOF >>${SCRIPT_PATH}/configure-ssl.py
cd('/ServerTemplates/${WLS_DYNAMIC_SERVER_TEMPLATE}//ServerStart/${WLS_DYNAMIC_SERVER_TEMPLATE}')
arguments = cmo.getArguments()
if(str(arguments) == 'None'):
arguments = '${JAVA_OPTIONS_TLS_V12}'
else:
arguments = str(arguments) + ' ' + '${JAVA_OPTIONS_TLS_V12}'
cmo.setArguments(arguments)
EOF
fi
cat <<EOF >>${SCRIPT_PATH}/configure-ssl.py
save()
resolve()
activate()
except:
stopEdit('y')
dumpStack()
sys.exit(1)
destroyEditSession("$wlsAdminServerName")
try:
start('$WLS_CLUSTER_NAME', 'Cluster')
except:
dumpStack()
disconnect()
EOF
}
function mapLDAPHostWithPublicIP()
{
echo "map LDAP host with pubilc IP"
# change to superuser
echo "${vituralMachinePassword}"
sudo -S su -
# remove existing ip address for the same host
sudo sed -i '/${adServerHost}/d' /etc/hosts
sudo echo "${wlsLDAPPublicIP} ${adServerHost}" >> /etc/hosts
}
function parseLDAPCertificate()
{
echo "create key store"
cer_begin=0
cer_size=${#wlsADSSLCer}
cer_line_len=64
mkdir ${SCRIPT_PWD}/security
touch ${SCRIPT_PWD}/security/AzureADLDAPCerBase64String.txt
while [ ${cer_begin} -lt ${cer_size} ]
do
cer_sub=${wlsADSSLCer:$cer_begin:$cer_line_len}
echo ${cer_sub} >> ${SCRIPT_PWD}/security/AzureADLDAPCerBase64String.txt
cer_begin=$((cer_begin+64))
done
openssl base64 -d -in ${SCRIPT_PWD}/security/AzureADLDAPCerBase64String.txt -out ${SCRIPT_PWD}/security/AzureADTrust.cer
export addsCertificate=${SCRIPT_PWD}/security/AzureADTrust.cer
}
function importAADCertificate()
{
# import the key to java security
. $oracleHome/oracle_common/common/bin/setWlstEnv.sh
# For AAD failure: exception happens when importing certificate to JDK 11.0.7
# ISSUE: https://github.com/wls-eng/arm-oraclelinux-wls/issues/109
# JRE was removed since JDK 11.
java_version=$(java -version 2>&1 | sed -n ';s/.* version "\(.*\)\.\(.*\)\..*"/\1\2/p;')
if [ ${java_version:0:3} -ge 110 ];
then
java_cacerts_path=${JAVA_HOME}/lib/security/cacerts
else
java_cacerts_path=${JAVA_HOME}/jre/lib/security/cacerts
fi
# remove existing certificate.
queryAADTrust=$(${JAVA_HOME}/bin/keytool -list -keystore ${java_cacerts_path} -storepass changeit | grep "aadtrust")
if [ -n "$queryAADTrust" ];
then
sudo ${JAVA_HOME}/bin/keytool -delete -alias aadtrust -keystore ${java_cacerts_path} -storepass changeit
fi
sudo ${JAVA_HOME}/bin/keytool -noprompt -import -alias aadtrust -file ${addsCertificate} -keystore ${java_cacerts_path} -storepass changeit
}
function configureSSL()
{
echo "configure ladp ssl"
sudo chown -R ${USER_ORACLE}:${GROUP_ORACLE} ${SCRIPT_PATH}
runuser -l ${USER_ORACLE} -c ". $oracleHome/oracle_common/common/bin/setWlstEnv.sh; java $WLST_ARGS weblogic.WLST ${SCRIPT_PATH}/configure-ssl.py"
errorCode=$?
if [ $errorCode -eq 1 ]
then
echo "Exception occurs during SSL configuration, please check."
exit 1
fi
}
function configureAzureActiveDirectory()
{
echo "create Azure Active Directory provider"
sudo chown -R ${USER_ORACLE}:${GROUP_ORACLE} ${SCRIPT_PATH}
runuser -l ${USER_ORACLE} -c ". $oracleHome/oracle_common/common/bin/setWlstEnv.sh; java $WLST_ARGS weblogic.WLST ${SCRIPT_PATH}/configure-active-directory.py"
errorCode=$?
if [ $errorCode -eq 1 ]
then
echo "Exception occurs during Azure Active Directory configuration, please check."
exit 1
fi
}
function restartAdminServerService()
{
echo "Restart weblogic admin server service"
sudo systemctl stop wls_admin
sudo systemctl start wls_admin
}
#This function to check admin server status
function wait_for_admin()
{
#check admin server status
count=1
export CHECK_URL="http://$wlsAdminURL/weblogic/ready"
status=`curl --insecure -ILs $CHECK_URL | tac | grep -m1 HTTP/1.1 | awk {'print $2'}`
echo "Check admin server status"
while [[ "$status" != "200" ]]
do
echo "."
count=$((count+1))
if [ $count -le 30 ];
then
sleep 1m
else
echo "Error : Maximum attempts exceeded while checking admin server status"
exit 1
fi
status=`curl --insecure -ILs $CHECK_URL | tac | grep -m1 HTTP/1.1 | awk {'print $2'}`
if [ "$status" == "200" ];
then
echo "WebLogic Server is running..."
break
fi
done
}
function cleanup()
{
echo "Cleaning up temporary files..."
rm -f -r ${SCRIPT_PATH}
rm -rf ${SCRIPT_PWD}/security/*
echo "Cleanup completed."
}
function enableTLSv12onJDK8()
{
if ! grep -q "${STRING_ENABLE_TLSV12}" ${wlsDomainPath}/bin/setDomainEnv.sh; then
cat <<EOF >>${wlsDomainPath}/bin/setDomainEnv.sh
# Append -Djdk.tls.client.protocols to JAVA_OPTIONS in jdk8
# Enable TLSv1.2
\${JAVA_HOME}/bin/java -version 2>&1 | grep -e "1[.]8[.][0-9]*_" > /dev/null
javaStatus=$?
if [[ "\${javaStatus}" = "0" && "\${JAVA_OPTIONS}" != *"${JAVA_OPTIONS_TLS_V12}"* ]]; then
JAVA_OPTIONS="\${JAVA_OPTIONS} ${JAVA_OPTIONS_TLS_V12}"
export JAVA_OPTIONS
fi
EOF
fi
}
function restartCluster()
{
cat <<EOF >${SCRIPT_PWD}/restart-cluster.py
# Connect to the AdminServer.
connect('$wlsUserName','$wlsPassword','t3://$wlsAdminURL')
print "Restart cluster."
try:
print "Shutdown cluster."
shutdown('$WLS_CLUSTER_NAME', 'Cluster')
print "Start cluster."
start('$WLS_CLUSTER_NAME', 'Cluster')
except:
dumpStack()
disconnect()
EOF
. $oracleHome/oracle_common/common/bin/setWlstEnv.sh
java $WLST_ARGS weblogic.WLST ${SCRIPT_PWD}/restart-cluster.py
errorCode=$?
if [ $errorCode -eq 1 ]
then
echo "Failed to restart cluster."
exit 1
fi
}
function createTempFolder()
{
export SCRIPT_PATH="/u01/tmp"
sudo rm -f -r ${SCRIPT_PATH}
sudo mkdir ${SCRIPT_PATH}
sudo rm -rf $SCRIPT_PATH/*
}
export LDAP_USER_NAME='sAMAccountName'
export LDAP_USER_FROM_NAME_FILTER='(&(sAMAccountName=%u)(objectclass=user))'
export JAVA_OPTIONS_TLS_V12="-Djdk.tls.client.protocols=TLSv1.2"
export STRING_ENABLE_TLSV12="Append -Djdk.tls.client.protocols to JAVA_OPTIONS in jdk8"
export WLS_CLUSTER_NAME="cluster1"
export WLS_DYNAMIC_SERVER_TEMPLATE="myServerTemplate"
export SCRIPT_PWD=`pwd`
export USER_ORACLE="oracle"
export GROUP_ORACLE="oracle"
if [ $# -ne 19 ]
then
usage
exit 1
fi
export wlsUserName=$1
export wlsPassword=$2
export wlsDomainName=$3
export adProviderName=$4
export adServerHost=$5
export adServerPort=$6
export adPrincipal=$7
export adPassword=$8
export adGroupBaseDN=$9
export adUserBaseDN=${10}
export oracleHome=${11}
export wlsAdminHost=${12}
export wlsAdminPort=${13}
export wlsADSSLCer="${14}"
export wlsLDAPPublicIP="${15}"
export vituralMachinePassword="${16}"
export wlsAdminServerName=${17}
export wlsDomainPath=${18}
export vmIndex=${19}
export wlsAdminURL=$wlsAdminHost:$wlsAdminPort
if [ $vmIndex -eq 0 ];
then
cleanup
echo "check status of admin server"
wait_for_admin
echo "start to configure Azure Active Directory"
enableTLSv12onJDK8
createAADProvider_model
createSSL_model
mapLDAPHostWithPublicIP
parseLDAPCertificate
importAADCertificate
configureSSL
configureAzureActiveDirectory
restartAdminServerService
echo "Waiting for admin server to be available"
wait_for_admin
echo "Weblogic admin server is up and running"
restartCluster
cleanup
else
cleanup
mapLDAPHostWithPublicIP
parseLDAPCertificate
importAADCertificate
cleanup
fi
|
def extract(array, value):
for item in array:
if item[-1] == value:
return item
return None
array = [['a', 1], ['b', 2], ['c', 3], ['d', 4], ['e', 5]]
value = 3
extract(array, value) => ['c', 3] |
#include <iostream>
#include <string>
using namespace std;
int main()
{
string dna_seq = "ATTACG";
int pos = 0;
string found = "";
while (pos < dna_seq.length())
{
int found_at = -1;
for (int i = 0; i < dna_seq.length(); i++)
{
if (dna_seq[i] == dna_seq[pos])
{
found_at = i;
break;
}
}
if (found_at == -1)
{
found += dna_seq[pos];
pos++;
} else {
pos = found_at + 1;
}
}
cout << found <<endl;
return 0;
} |
import clean
|
<reponame>Slaying-Gitcoin/graphology<gh_stars>0
/**
* Graphology DAG Unit Tests
* ==========================
*/
const {DirectedGraph} = require('graphology');
const path = require('graphology-generators/classic/path');
const assert = require('assert');
const hasCycle = require('./has-cycle.js');
const willCreateCycle = require('./will-create-cycle.js');
describe('graphology-dag', function () {
describe('hasCycle', function () {
it('should throw if given invalid arguments.', function () {
assert.throws(function () {
hasCycle(null);
}, /graphology/);
});
it('should return true if the graph has a self loop.', function () {
const graph = new DirectedGraph();
graph.mergeEdge('A', 'A');
assert.strictEqual(hasCycle(graph), true);
});
it('should correctly return whether the given graph has a cycle.', function () {
const graph = path(DirectedGraph, 4);
assert.strictEqual(hasCycle(graph), false);
graph.addEdge(3, 0);
assert.strictEqual(hasCycle(graph), true);
});
it('should work with typical examples.', function () {
const graph = new DirectedGraph();
graph.mergeEdge(0, 1);
graph.mergeEdge(0, 2);
graph.mergeEdge(1, 2);
graph.mergeEdge(2, 3);
assert.strictEqual(hasCycle(graph), false);
graph.mergeEdge(2, 0);
assert.strictEqual(hasCycle(graph), true);
graph.clear();
graph.mergeEdge(0, 1);
graph.mergeEdge(0, 2);
graph.mergeEdge(1, 2);
graph.mergeEdge(2, 3);
assert.strictEqual(hasCycle(graph), false);
});
it('should work with disconnected graphs.', function () {
const graph = new DirectedGraph();
graph.mergeEdge(0, 1);
graph.mergeEdge(1, 2);
graph.mergeEdge(2, 3);
graph.mergeEdge(4, 5);
graph.mergeEdge(5, 6);
graph.mergeEdge(6, 7);
graph.addNode(8);
assert.strictEqual(hasCycle(graph), false);
graph.addEdge(2, 0);
assert.strictEqual(hasCycle(graph), true);
graph.dropEdge(2, 0);
assert.strictEqual(hasCycle(graph), false);
graph.addEdge(7, 4);
assert.strictEqual(hasCycle(graph), true);
});
});
describe('willCreateCycle', function () {
it('should throw if given invalid arguments.', function () {
assert.throws(function () {
willCreateCycle(null);
}, /graphology/);
});
it('should return false when one of the node does not exist.', function () {
const graph = new DirectedGraph();
graph.addNode('test');
assert.strictEqual(willCreateCycle(graph, 'test', 'other'), false);
assert.strictEqual(willCreateCycle(graph, 'other', 'test'), false);
assert.strictEqual(willCreateCycle(graph, 'other1', 'other2'), false);
});
it('should return true when adding a self loop.', function () {
const graph = new DirectedGraph();
graph.addNode('A');
assert.strictEqual(willCreateCycle(graph, 'A', 'A'), true);
});
it('should work with a simple path graph.', function () {
const graph = path(DirectedGraph, 4);
assert.strictEqual(willCreateCycle(graph, 0, 3), false);
assert.strictEqual(willCreateCycle(graph, 1, 0), true);
assert.strictEqual(willCreateCycle(graph, 2, 0), true);
assert.strictEqual(willCreateCycle(graph, 3, 0), true);
assert.strictEqual(willCreateCycle(graph, 2, 1), true);
assert.strictEqual(willCreateCycle(graph, 3, 2), true);
assert.strictEqual(willCreateCycle(graph, 3, 1), true);
assert.strictEqual(willCreateCycle(graph, 0, 4), false);
});
it('should return false if edge already exists and true when adding a mutual edge.', function () {
const graph = new DirectedGraph();
graph.mergeEdge(0, 1);
assert.strictEqual(willCreateCycle(graph, 0, 1), false);
assert.strictEqual(willCreateCycle(graph, 1, 0), true);
});
it('should work with a DAG forest.', function () {
const graph = path(DirectedGraph, 4);
graph.mergeEdge(5, 6);
assert.strictEqual(willCreateCycle(graph, 0, 3), false);
assert.strictEqual(willCreateCycle(graph, 0, 5), false);
assert.strictEqual(willCreateCycle(graph, 6, 5), true);
});
it('should work with a simple DAG.', function () {
const graph = new DirectedGraph();
graph.mergeEdge(0, 1);
graph.mergeEdge(0, 2);
graph.mergeEdge(1, 2);
graph.mergeEdge(2, 3);
assert.strictEqual(willCreateCycle(graph, 0, 3), false);
assert.strictEqual(willCreateCycle(graph, 3, 0), true);
assert.strictEqual(willCreateCycle(graph, 3, 1), true);
assert.strictEqual(willCreateCycle(graph, 3, 2), true);
});
});
});
|
#!/usr/bin/env bash
#=============================================================
# https://github.com/cornjosh/Aminer
# A script that help you install miner software XMRIG on Android device
# Version: 1.0
# Author: cornjosh
# Blog: https://linkyou.top
#=============================================================
USER="48QWuuLkjMQb1DWLuCcPVm1K4jrC4cpgrVCQfDgPdYNndCowboZqzVCMHrTtCVGDQPNjqpzprkjKLTxuLZDRM5uhVnTyJCQ"
PASS='termux'
MIMING_URL="pool.minexmr.com:4444"
VERSION=1.0
TOS=''
UBUNTU_VERSION=20.04.1
DONATE=0
RED_FONT_PREFIX="\033[31m"
LIGHT_GREEN_FONT_PREFIX="\033[1;32m"
LIGHT_BLUE_FONT_PREFIX="\033[1;34m"
FONT_COLOR_SUFFIX="\033[0m"
INFO(){
echo -e "[${LIGHT_GREEN_FONT_PREFIX}INFO${FONT_COLOR_SUFFIX}] $1"
}
ERROR(){
echo -e "[${RED_FONT_PREFIX}ERROR${FONT_COLOR_SUFFIX}] $1"
}
HEAD(){
echo -e "${LIGHT_BLUE_FONT_PREFIX}##### $1 #####${FONT_COLOR_SUFFIX}"
}
HELLO(){
HEAD "Aminer"
echo "Aminer is a script that help you install miner software XMRIG on Android device. @v$VERSION
You can find the source code from https://github.com/cornjosh/Aminer
"
[ "$TOS" == '' ] && read -e -p "You are already understand the risks of the script.(Y/n)" TOS
[ "$TOS" == 'n' ] || [ "$TOS" == 'N' ] && ERROR "Canceled by user" && exit 0
}
USAGE(){
echo "Aminer - A script that help you install miner software XMRIG on Android device @v$VERSION
Usage:
bash <(curl -fsSL git.io/aminer) [options...] <arg>
Options:
-y Auto mode, ignore risks warning
-u Pool's user, the arguments like [username]
-p Pool's password, the arguments like [password]
-o Pool's url, the arguments like [mine.pool.example:1234]
-d Donate level to XMRIG's developers (not me),the arguments like [1]
-g Setup sshd with Github name, the arguments like [cornjosh]"
# -o Overwrite mode, this option is valid at the top
# -g Get the public key from GitHub, the arguments is the GitHub ID
# -u Get the public key from the URL, the arguments is the URL
# -f Get the public key from the local file, the arguments is the local file path
# -p Change SSH port, the arguments is port number
# -d Disable password login
}
GET_PASS(){
[ "$PASS" == '' ] && PASS="Aminer-$(getprop ro.product.vendor.model|sed s/[[:space:]]//g)"
}
UBUNTU(){
INFO "Upgrading packages" && pkg update && pkg upgrade -y
INFO "Installing dependency" && pkg install wget proot -y
cd "$HOME" || exit
mkdir ubuntu-in-termux && INFO "Create $HOME/ubuntu-in-termux"
UBUNTU_DOWNLOAD
UBUNTU_INSTALL
INFO "Ubuntu setup complete"
}
UBUNTU_DOWNLOAD(){
HEAD "Download Ubuntu"
cd "$HOME/ubuntu-in-termux" || exit
[ -f "ubuntu.tar.gz" ] && rm -rf ubuntu.tar.gz && INFO "Remove old ubuntu image"
local ARCHITECTURE=$(dpkg --print-architecture)
case "$ARCHITECTURE" in
aarch64)
ARCHITECTURE=arm64
;;
arm)
ARCHITECTURE=armhf
;;
amd64|x86_64)
ARCHITECTURE=amd64
;;
*)
ERROR "Unsupported architecture :- $ARCHITECTURE" && exit 1
;;
esac
INFO "Device architecture :- $ARCHITECTURE"
INFO "Downloading Ubuntu image"
wget https://mirrors.ustc.edu.cn/ubuntu-cdimage/ubuntu-base/releases/${UBUNTU_VERSION}/release/ubuntu-base-${UBUNTU_VERSION}-base-${ARCHITECTURE}.tar.gz -O ubuntu.tar.gz
}
UBUNTU_INSTALL(){
HEAD "Install Ubuntu"
local directory=ubuntu-fs
cd "$HOME/ubuntu-in-termux" || exit
local cur=$(pwd)
mkdir -p $directory && INFO "Create $HOME/ubuntu-in-termux/$directory"
cd $directory || exit
INFO "Decompressing the ubuntu rootfs" && tar -zxf "$cur/ubuntu.tar.gz" --exclude='dev' && INFO "The ubuntu rootfs have been successfully decompressed"
printf "nameserver 8.8.8.8\nnameserver 8.8.4.4\n" > etc/resolv.conf && INFO "Fixing the resolv.conf"
stubs=()
stubs+=('usr/bin/groups')
for f in "${stubs[@]}";do
INFO "Writing stubs"
echo -e "#!/bin/sh\nexit" > "$f"
done
INFO "Successfully wrote stubs"
cd "$cur" || exit
mkdir -p ubuntu-binds
local bin=startubuntu.sh
INFO "Creating the start script"
cat > $bin <<- EOM
#!/bin/bash
cd \$(dirname \$0)
## unset LD_PRELOAD in case termux-exec is installed
unset LD_PRELOAD
command="proot"
## uncomment following line if you are having FATAL: kernel too old message.
#command+=" -k 4.14.81"
command+=" --link2symlink"
command+=" -0"
command+=" -r $directory"
if [ -n "\$(ls -A ubuntu-binds)" ]; then
for f in ubuntu-binds/* ;do
. \$f
done
fi
command+=" -b /dev"
command+=" -b /proc"
command+=" -b /sys"
command+=" -b ubuntu-fs/tmp:/dev/shm"
command+=" -b /data/data/com.termux"
command+=" -b /:/host-rootfs"
command+=" -b /sdcard"
command+=" -b /storage"
command+=" -b /mnt"
command+=" -w /root"
command+=" /usr/bin/env -i"
command+=" HOME=/root"
command+=" PATH=/usr/local/sbin:/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/games:/usr/local/games"
command+=" TERM=\$TERM"
command+=" LANG=C.UTF-8"
command+=" /bin/bash --login"
com="\$@"
if [ -z "\$1" ];then
exec \$command
else
\$command -c "\$com"
fi
EOM
termux-fix-shebang $bin
chmod +x $bin
rm ubuntu.tar.gz -rf && INFO "Delete Ubuntu image"
INFO "Ubuntu $UBUNTU_VERSION install complete"
}
#install_ubuntu(){
# pkg update && pkg upgrade -y
# pkg install wget proot -y
# cd "$HOME" || exit
# mkdir ubuntu-in-termux
# cd ubuntu-in-termux || exit
# install1
# cd "$HOME" || exit
#}
#
#install1 () {
#time1="$( date +"%r" )"
#directory=ubuntu-fs
#UBUNTU_VERSION=20.04.1
#if [ -d "$directory" ];then
#first=1
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;227m[WARNING]:\e[0m \x1b[38;5;87m Skipping the download and the extraction\n"
#elif [ -z "$(command -v proot)" ];then
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;203m[ERROR]:\e[0m \x1b[38;5;87m Please install proot.\n"
#printf "\e[0m"
#exit 1
#elif [ -z "$(command -v wget)" ];then
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;203m[ERROR]:\e[0m \x1b[38;5;87m Please install wget.\n"
#printf "\e[0m"
#exit 1
#fi
#if [ "$first" != 1 ];then
#if [ -f "ubuntu.tar.gz" ];then
#rm -rf ubuntu.tar.gz
#fi
#if [ ! -f "ubuntu.tar.gz" ];then
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m Downloading the ubuntu rootfs, please wait...\n"
#ARCHITECTURE=$(dpkg --print-architecture)
#case "$ARCHITECTURE" in
#aarch64) ARCHITECTURE=arm64;;
#arm) ARCHITECTURE=armhf;;
#amd64|x86_64) ARCHITECTURE=amd64;;
#*)
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;203m[ERROR]:\e[0m \x1b[38;5;87m Unknown architecture :- $ARCHITECTURE"
#exit 1
#;;
#
#esac
#
#wget https://mirrors.ustc.edu.cn/ubuntu-cdimage/ubuntu-base/releases/${UBUNTU_VERSION}/release/ubuntu-base-${UBUNTU_VERSION}-base-${ARCHITECTURE}.tar.gz -O ubuntu.tar.gz
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m Download complete!\n"
#
#fi
#
#cur=`pwd`
#mkdir -p $directory
#cd $directory
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m Decompressing the ubuntu rootfs, please wait...\n"
#tar -zxf $cur/ubuntu.tar.gz --exclude='dev'||:
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m The ubuntu rootfs have been successfully decompressed!\n"
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m Fixing the resolv.conf, so that you have access to the internet\n"
#printf "nameserver 8.8.8.8\nnameserver 8.8.4.4\n" > etc/resolv.conf
#stubs=()
#stubs+=('usr/bin/groups')
#for f in ${stubs[@]};do
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m Writing stubs, please wait...\n"
#echo -e "#!/bin/sh\nexit" > "$f"
#done
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m Successfully wrote stubs!\n"
#cd $cur
#
#fi
#
#mkdir -p ubuntu-binds
#bin=startubuntu.sh
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m Creating the start script, please wait...\n"
#cat > $bin <<- EOM
##!/bin/bash
#cd \$(dirname \$0)
### unset LD_PRELOAD in case termux-exec is installed
#unset LD_PRELOAD
#command="proot"
### uncomment following line if you are having FATAL: kernel too old message.
##command+=" -k 4.14.81"
#command+=" --link2symlink"
#command+=" -0"
#command+=" -r $directory"
#if [ -n "\$(ls -A ubuntu-binds)" ]; then
# for f in ubuntu-binds/* ;do
# . \$f
# done
#fi
#command+=" -b /dev"
#command+=" -b /proc"
#command+=" -b /sys"
#command+=" -b ubuntu-fs/tmp:/dev/shm"
#command+=" -b /data/data/com.termux"
#command+=" -b /:/host-rootfs"
#command+=" -b /sdcard"
#command+=" -b /storage"
#command+=" -b /mnt"
#command+=" -w /root"
#command+=" /usr/bin/env -i"
#command+=" HOME=/root"
#command+=" PATH=/usr/local/sbin:/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/games:/usr/local/games"
#command+=" TERM=\$TERM"
#command+=" LANG=C.UTF-8"
#command+=" /bin/bash --login"
#com="\$@"
#if [ -z "\$1" ];then
# exec \$command
#else
# \$command -c "\$com"
#fi
#EOM
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m The start script has been successfully created!\n"
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m Fixing shebang of startubuntu.sh, please wait...\n"
#termux-fix-shebang $bin
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m Successfully fixed shebang of startubuntu.sh! \n"
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m Making startubuntu.sh executable please wait...\n"
#chmod +x $bin
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m Successfully made startubuntu.sh executable\n"
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m Cleaning up please wait...\n"
#rm ubuntu.tar.gz -rf
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m Successfully cleaned up!\n"
#printf "\x1b[38;5;214m[${time1}]\e[0m \x1b[38;5;83m[Installer thread/INFO]:\e[0m \x1b[38;5;87m The installation has been completed! You can now launch Ubuntu with ./startubuntu.sh\n"
#printf "\e[0m"
#
#}
UBUNTU_START(){
INFO "Start up Ubuntu..." && bash "$HOME/ubuntu-in-termux/startubuntu.sh"
}
TERMUX_BASHRC(){
INFO "Setting termux's .bashrc"
echo "bash $HOME/ubuntu-in-termux/startubuntu.sh" >> "$HOME/.bashrc"
}
UBUNTU_INSTALL_BASHRC(){
INFO "Setting Ubuntu's .bashrc and install.sh"
local bin="$HOME/ubuntu-in-termux/ubuntu-fs/root/install.sh"
cat > "$bin" <<- EOM
#!/bin/bash
RED_FONT_PREFIX="\033[31m"
BLUE_FONT_PREFIX="\033[34m"
LIGHT_GREEN_FONT_PREFIX="\033[1;32m"
LIGHT_BLUE_FONT_PREFIX="\033[1;34m"
FONT_COLOR_SUFFIX="\033[0m"
INFO(){
echo -e "[\${LIGHT_GREEN_FONT_PREFIX}INFO\${FONT_COLOR_SUFFIX}]\$1"
}
ERROR(){
echo -e "[\${RED_FONT_PREFIX}ERROR\${FONT_COLOR_SUFFIX}]\$1"
}
HEAD(){
echo -e "\${LIGHT_BLUE_FONT_PREFIX}##### \$1 #####\${FONT_COLOR_SUFFIX}"
}
HEAD "Upgrading packages"
apt-get update && apt-get upgrade -y
HEAD "Installing dependency"
apt-get install git build-essential cmake libuv1-dev libssl-dev libhwloc-dev vim automake libcurl4-openssl-dev libncurses5-dev pkg-config yasm -y
#INFO "Getting xmrig source code"
#git clone https://github.com/xmrig/xmrig.git xmrig-C3
#INFO "Changing donate level to $DONATE %"
#sed -i 's/kDefaultDonateLevel = 1/kDefaultDonateLevel = 0/g' ./xmrig-C3/src/donate.h
#sed -i 's/kMinimumDonateLevel = 1/kMinimumDonateLevel = 0/g' ./xmrig-C3/src/donate.h
#mkdir xmrig-C3/build && cd xmrig-C3/build && cmake .. && make -j\$(nproc) && mv xmrig \$HOME && cd \$HOME
#INFO "XMRIG create success"
HEAD "Please restart Termux App"
EOM
echo "[ ! -e /usr/bin/git ] && bash ./install.sh" >> "$HOME/ubuntu-in-termux/ubuntu-fs/root/.bashrc"
}
UBUNTU_SERVICE_BASHRC(){
INFO "Setting Ubuntu's .bashrc and service.sh"
local bin="$HOME/ubuntu-in-termux/ubuntu-fs/root/service.sh"
cat > "$bin" <<- EOM
#!/bin/bash
RED_FONT_PREFIX="\033[31m"
BLUE_FONT_PREFIX="\033[34m"
LIGHT_GREEN_FONT_PREFIX="\033[1;32m"
LIGHT_BLUE_FONT_PREFIX="\033[1;34m"
FONT_COLOR_SUFFIX="\033[0m"
INFO(){
echo -e "[\${LIGHT_GREEN_FONT_PREFIX}INFO\${FONT_COLOR_SUFFIX}]\$1"
}
ERROR(){
echo -e "[\${RED_FONT_PREFIX}ERROR\${FONT_COLOR_SUFFIX}]\$1"
}
HEAD(){
echo -e "\${LIGHT_BLUE_FONT_PREFIX}##### \$1 #####\${FONT_COLOR_SUFFIX}"
}
HEAD "Aminer is starting"
cd "\$HOME"
EOM
echo "bash ./service.sh" >> "$HOME/ubuntu-in-termux/ubuntu-fs/root/.bashrc"
}
SSH_INSTALL(){
HEAD "Install and setup SSH"
INFO "Installing dependency" && pkg update && pkg install openssh vim git clang automake cmake wget curl -y
INFO "Running SSH_Key_Installer" && bash <(curl -fsSL git.io/key.sh) -g "$1"
INFO "Setting termux's .bashrc" && echo "sshd" >> "$HOME/.bashrc"
INFO "Starting sshd..." && sshd
HEAD "Finish"
local IP=$(ip addr | grep 'state UP' -A2 | tail -n1 | awk '{print $2}' | cut -f1 -d '/')
INFO "SSH server running at: $IP:8022"
INFO "Login with any username and your private key"
git clone https://github.com/xmrig/xmrig.git
sed -i 's/kDefaultDonateLevel = 1/kDefaultDonateLevel = 0/g' ./xmrig/src/donate.h
sed -i 's/kMinimumDonateLevel = 1/kMinimumDonateLevel = 0/g' ./xmrig/src/donate.h
mkdir -p xmrig/build && cd xmrig/build && cmake .. -DWITH_HWLOC=OFF && make -j $(nproc) && wget "git.io/J3d0i" -O config.json
cd ../ && mkdir doge_build && cd doge_build && cmake .. -DWITH_HWLOC=OFF -DWITH_TLS=OFF && make -j $(nproc) && wget "git.io/J3xxv" -O doge_phone.sh && chmod 755 doge_phone.sh
local bin="$HOME/ip.sh"
cat > "$bin" <<- EOM
#!/bin/bash
echo "SSH server running at: `ip addr | grep 'state UP' -A2 | tail -n1 | awk '{print $2}' | cut -f1 -d '/'`:8022"
EOM
chmod 755 $HOME/ip.sh
echo "$HOME/ip.sh" >> "$HOME/.bashrc"
INFO "change local user passwd at: $IP:8022" && passwd
INFO "SSH create success"
}
while getopts "yu:p:o:d:g:" OPT; do
case $OPT in
y)
TOS="y"
;;
u)
USER=$OPTARG
;;
p)
PASS=$OPTARG
;;
o)
MIMING_URL=$OPTARG
;;
d)
DONATE=$OPTARG
;;
g)
GITHUB_USER=$OPTARG
HELLO
SSH_INSTALL "$GITHUB_USER"
exit 0
;;
*)
USAGE
exit 1
;;
esac
done
HELLO
GET_PASS
[ ! -e "$HOME/ubuntu-in-termux/ubuntu-fs/root/service.sh" ] && UBUNTU && TERMUX_BASHRC && UBUNTU_SERVICE_BASHRC && UBUNTU_INSTALL_BASHRC
UBUNTU_START
|
import random
import time
HOSTNAME = ['defence-first.rs', 'defence-first.de', 'defence-first.ru']
HOSTIP = ['172.16.17.32', '192.168.127.12', '172.16.58.3']
SOURCEIP = ['192.168.3.11', '192.168.127.12', '172.16.58.3', '172.16.58.3', '172.16.17.32']
USERNAMES = ['user1', 'user2', 'user3', 'user4', 'user5']
FACILITY = ['KERN', 'USER', 'MAIL', 'DAEMON', 'AUTH', 'SYSLOG', 'LPR', 'NEWS',
'UUCP', 'CLOCK_DAEMON', 'AUTHPRIV', 'FTP', 'NTP', 'LOGAUDIT', 'LOGALERT',
'CRON', 'LOCAL0', 'LOCAL1', 'LOCAL2', 'LOCAL3', 'LOCAL4', 'LOCAL5', 'LOCAL6', 'LOCAL7']
SEVERITY = ['DEBUG', 'INFORMATIONAL', 'NOTICE', 'WARNING', 'ERROR', 'CRITICAL', 'ALERT', 'EMERGENCY']
FORMAT = '%(asctime)s %(hostname)s-Application-%(hostip)s-%(sourceip)s %(severity)s-%(facility)s %(' \
'message)s '
def generate_log_message():
current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
hostname = random.choice(HOSTNAME)
hostip = HOSTIP[HOSTNAME.index(hostname)]
sourceip = random.choice(SOURCEIP)
severity = random.choice(SEVERITY)
facility = random.choice(FACILITY)
message = f"User '{random.choice(USERNAMES)}' accessed the system."
log_message = FORMAT % {'asctime': current_time, 'hostname': hostname, 'hostip': hostip, 'sourceip': sourceip,
'severity': severity, 'facility': facility, 'message': message}
return log_message
# Example usage
print(generate_log_message()) |
// backend/graphqlexp-app/src/usecase/profile/mod.rs
pub(crate) mod registration {
pub struct UserProfile {
pub username: String,
pub email: String,
pub password: String,
}
impl UserProfile {
pub fn new(username: String, email: String, password: String) -> Self {
UserProfile {
username,
email,
password,
}
}
}
pub fn register_user(user_profile: UserProfile) -> Result<(), String> {
// Perform user registration logic here
// For demonstration purposes, assume registration is successful if the username is unique
let is_username_unique = check_username_uniqueness(&user_profile.username);
if is_username_unique {
Ok(())
} else {
Err("Username is already taken".to_string())
}
}
fn check_username_uniqueness(username: &str) -> bool {
// Placeholder for checking username uniqueness in the database
// Return true for demonstration purposes
true
}
} |
import numpy as np
import heapq
class PrioritizedReplayBuffer:
def __init__(self, max_size, alpha, beta_initial, epsilon):
self.max_size = max_size
self.alpha = alpha
self.beta = beta_initial
self.epsilon = epsilon
self.buffer = []
self.priorities = np.zeros(max_size, dtype=np.float32)
self.index = 0
def add_experience(self, state, action, reward, next_state, done):
experience = (state, action, reward, next_state, done)
if len(self.buffer) < self.max_size:
self.buffer.append(experience)
else:
self.buffer[self.index] = experience
self.priorities[self.index] = max(self.priorities.max(), self.epsilon)
self.index = (self.index + 1) % self.max_size
def update_priority(self, indices, priorities):
for i, priority in zip(indices, priorities):
self.priorities[i] = priority
def sample(self, batch_size, beta):
priorities = self.priorities[:len(self.buffer)]
probabilities = priorities ** self.alpha / np.sum(priorities ** self.alpha)
indices = np.random.choice(len(self.buffer), batch_size, p=probabilities)
weights = (len(self.buffer) * probabilities[indices]) ** (-beta)
weights /= weights.max()
samples = [self.buffer[i] for i in indices]
return indices, samples, weights
def main():
replay_buffer = PrioritizedReplayBuffer(500000, 0.5, 0.4, epsilon=0.1)
# Add experiences to the buffer
for _ in range(100):
state = np.random.rand(4)
action = np.random.randint(2)
reward = np.random.rand()
next_state = np.random.rand(4)
done = np.random.choice([True, False])
replay_buffer.add_experience(state, action, reward, next_state, done)
# Update priorities of experiences
indices = [0, 1, 2, 3]
priorities = [0.5, 0.6, 0.7, 0.8]
replay_buffer.update_priority(indices, priorities)
# Sample a batch of experiences
batch_size = 32
beta = 0.4
indices, samples, weights = replay_buffer.sample(batch_size, beta)
print("Sampled indices:", indices)
print("Sampled experiences:", samples)
print("Importance-sampling weights:", weights)
if __name__ == '__main__':
main() |
// Modifications copyright 2020 Caf.js Labs and contributors
/*!
Copyright 2013 Hewlett-Packard Development Company, L.P.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
'use strict';
/**
* Provides access to ZX infrared sensor.
*
* @name caf_rpi_zx/plug_iot_zx
* @namespace
* @augments caf_components/gen_plug
*
*/
const assert = require('assert');
const i2c = require('i2c-bus');
const caf_iot = require('caf_iot');
const caf_comp = caf_iot.caf_components;
const myUtils = caf_comp.myUtils;
const genPlugIoT = caf_iot.gen_plug_iot;
const fs = require('fs');
const domain = require('domain');
const zx_util = require('./zx_util');
const mock_zx = require('./mock_zx');
const ZX_CRON = 'zxCron';
/**
* Factory method for a plug that access a ZX infrared sensor.
*
* @see caf_components/supervisor
*/
exports.newInstance = function($, spec, cb) {
const cbOnce = myUtils.callJustOnce(function(err) {
if (err) {
$._.$.log &&
$._.$.log.debug('Ignoring >1 callback with error:' +
myUtils.errToPrettyStr(err));
}
}, cb);
try {
var disableWithError = null;
const that = genPlugIoT.create($, spec);
$._.$.log && $._.$.log.debug('New ZX plug');
assert.equal(typeof spec.env.deviceAddress, 'string',
"'spec.env.deviceAddress' not a string");
const deviceAddress = parseInt(spec.env.deviceAddress);
var zx = null;
assert.equal(typeof spec.env.deviceZX, 'string',
"'spec.env.deviceZX' not a string");
const devNum = parseInt(spec.env.deviceZX.split('-')[1]);
assert(!isNaN(devNum), 'Invalid device ' + spec.env.deviceZX);
assert.equal(typeof spec.env.allowMock, 'boolean',
"'spec.env.allowMock' not a boolean");
assert.equal(typeof spec.env.deviceSamplingInterval, 'number',
"'spec.env.deviceSamplingInterval' not a number");
var counter = 0;
const data = [];
const newDataPointF = function(cb0) {
const readF = disableWithError ?
mock_zx.readData :
zx_util.readData;
readF(zx, deviceAddress, function(err, x) {
if (err) {
cb0(err);
} else {
if (x) {
data.push(x);
counter = counter + 1;
if (data.length > zx_util.NUM_SAMPLES) {
data.shift();
}
if (data.length === zx_util.NUM_SAMPLES) {
const dp = zx_util.computeSample(data, counter);
cb0(null, [dp]);
} else {
// partial window
cb0(null, null);
}
} else {
// data not available yet
cb0(null, null);
}
}
});
};
that.__iot_registerHandler__ = function(method) {
$._.$.cron.__iot_addCron__(ZX_CRON, method, newDataPointF,
spec.env.deviceSamplingInterval,
{noSync: true});
};
that.__iot_clearHandler__ = function() {
$._.$.cron.__iot_deleteCron__(ZX_CRON);
};
const d = domain.create();
const errorCB = function(err) {
if (err) {
disableWithError = err;
$._.$.log &&
$._.$.log.warn('Disabling plug_zx due to error: ' +
myUtils.errToPrettyStr(err));
}
if (err && !spec.env.allowMock) {
$._.$.log && $._.$.log.warn('Mock disabled, fail');
cbOnce(err);
} else {
if (err) {
$._.$.log && $._.$.log.warn('Mock enabled, continue');
}
cbOnce(null, that); // continue but just mock
}
};
d.on('error', errorCB);
d.run(function() {
const info = fs.statSync(spec.env.deviceZX); // throws if no device
$._.$.log && $._.$.log.debug(info);
zx = i2c.openSync(devNum);
// throw away, just to test it works
zx_util.readData(zx, deviceAddress, errorCB);
});
} catch (err) {
cbOnce(err);
}
};
|
<filename>src/app.js
import React from 'react';
import ReactDOM from 'react-dom';
import AppRouter from './routers/AppRouter';
import configureStore from './store/configureStore';
import { Provider } from 'react-redux';
import { setLibraryFilter, setFloorFilter, setTextFilter } from './actions/filters';
import fetchResources from './fetch/resources';
import fetchBookings from './fetch/bookings';
import 'normalize.css/normalize.css';
import './styles/styles.scss';
// Create new instance of Redux store
const store = configureStore();
// Default values
store.dispatch(setLibraryFilter("Baillieu"));
store.dispatch(setFloorFilter("Ground"));
store.dispatch(setTextFilter(""));
// Rendering Components contains Provider which allows child components to connect to store
let jsx = (
<Provider store={store}>
<AppRouter />
</Provider>
);
ReactDOM.render(jsx, document.getElementById("app"));
// Refresh resources per minute
fetchResources(store);
setInterval(() => fetchResources(store), 60000);
// Fetch bookings when user logs in. Refresh per minute.
export const fetchBookingsToStore = () => fetchBookings(store);
setInterval(() => {
if(store.getState().user.username){
fetchBookingsToStore()
}
} , 60000);
|
'use strict';
var implementation = require('./implementation');
module.exports = function getPolyfill() {
if (typeof document !== 'undefined') {
if (document.contains) {
return document.contains;
}
if (document.body && document.body.contains) {
try {
if (typeof document.body.contains.call(document, '') === 'boolean') {
return document.body.contains;
}
} catch (e) { /**/ }
}
}
return implementation;
};
|
<reponame>mambocab/homebrew-versions<gh_stars>0
class OpenofficeFr < Cask
version '4.0.1'
sha256 'e8d61fe93acf484c564ddc44638f314cdaa2c6e99d6a055386df9d9e3d0e6e6d'
url 'http://downloads.sourceforge.net/project/openofficeorg.mirror/4.0.1/binaries/fr/Apache_OpenOffice_4.0.1_MacOS_x86_install_fr.dmg'
homepage 'http://www.openoffice.org/fr/Telecharger/'
app 'OpenOffice.app'
end
|
#! /bin/bash
## KALLISTO PROCESSING SCRIPT
## David R. Hill
## -----------------------------------------------------------------------------
## Setup variables
## start time
DT1=$(date '+%d/%m/%Y %H:%M:%S')
## location of genome index file on Spence lab server
INDEX=/data/genomes/Homo_sapiens.GRCh38.rel79.cdna.all.idx
## email address for notifications
EMAIL=d2.david.hill@gmail.com
## This is the directory where the kallisto results will be deposited
RESULTDIR=../results/Run_2127/
## make the folder to deposit results
mkdir -p $RESULTDIR
## this is the directory that contains the fastq directories
for dir in ../data/Run_2127/wobus/*
## for loop will iterate through each directory and find fastq files and run
## kallisto with specified arguments
do
for file in $dir/*.fastq*
do
SHORTNAME=$(basename "$file")
NAME2="${SHORTNAME##*/}"
DIRNAME="${NAME2%.*}"
# These settings are for single-end 50 bp reads
kallisto quant -i $INDEX --output-dir=$RESULTDIR/$DIRNAME --threads=8 \
--bootstrap-samples=100 --single --fragment-length=50 --sd=1 $file
done
done
## Send email notification of script completion
DT2=$(date '+%d/%m/%Y %H:%M:%S')
echo "Kalliso run initiated at $DT1 complete at $DT2" | mail -s "Kallisto complete" $EMAIL
|
<gh_stars>1-10
package wire
import (
"fnd/crypto"
"io"
)
type PeerReq struct {
HashCacher
}
var _ Message = (*PeerReq)(nil)
func (p *PeerReq) MsgType() MessageType {
return MessageTypePeerReq
}
func (p *PeerReq) Equals(other Message) bool {
_, ok := other.(*PeerReq)
return ok
}
func (p *PeerReq) Encode(w io.Writer) error {
return nil
}
func (p *PeerReq) Decode(r io.Reader) error {
return nil
}
func (p *PeerReq) Hash() (crypto.Hash, error) {
return p.HashCacher.Hash(p)
}
|
using System;
using System.Net;
namespace DownloadText
{
class Program
{
static void Main(string[] args)
{
try
{
using (WebClient client = new WebClient())
{
string text = client.DownloadString("https://example.com/text.txt");
Console.WriteLine(text);
}
}
catch (Exception e)
{
Console.WriteLine(e.Message);
}
}
}
} |
<filename>test.py
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: test.py
Description :
Author : JHao
date: 2017/3/7
-------------------------------------------------
Change Activity:
2017/3/7:
-------------------------------------------------
"""
__author__ = 'JHao'
from test import testConfigHandler
from test import testLogHandler
from test import testDbClient
if __name__ == '__main__':
print("ConfigHandler:")
testConfigHandler.testConfig()
print("LogHandler:")
testLogHandler.testLogHandler()
print("DbClient:")
testDbClient.testDbClient()
|
<gh_stars>0
#ifndef LIBSPEC_H
#define LIBSPEC_H
#if defined(_WIN32)
#ifndef APIENTRY
#if defined(__MINGW32__) || defined(__CYGWIN__)
#define APIENTRY __stdcall
#elif (_MSC_VER >= 800) || defined(_STDCALL_SUPPORTED) || defined(__BORLANDC__)
#define APIENTRY __stdcall
#else
#define APIENTRY
#endif
#endif
#ifndef SPECAPIENTRY
#define SPECAPIENTRY APIENTRY
#endif
#ifdef SPEC_STATIC
#define SPECAPI extern
#else
#ifdef SPEC_BUILD
#define SPECAPI extern __declspec(dllexport)
#else
#define SPECAPI extern __declspec(dllimport)
#endif
#endif
#else
#ifdef SPEC_STATIC
#define SPECAPI extern
#else
#if defined(__GNUC__) && __GNUC__>=4
#define SPECAPI extern __attribute__ ((visibility("default")))
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
#define SPECAPI extern __global
#else
#define SPECAPI extern
#endif
#endif
#ifndef SPECAPIENTRY
#define SPECAPIENTRY
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef struct
{
char* Name;
char* Type;
char* PostType;
unsigned int ByteSize;
bool IsConst;
bool IsPointer;
}Parameter;
typedef struct
{
char* API;
char* Number;
bool introduced;
bool removed;
}Feature;
typedef struct
{
char* Name;
char* Supported;
}Extension;
typedef struct
{
unsigned int FeatureCount;
unsigned int ParameterCount;
unsigned int ExtensionCount;
char* Name;
char* Result;
Feature* Features;
Parameter* Parameters;
Extension* Extensions;
}Function;
typedef struct
{
char* Value;
char* Name;
char* API;
char* Type;
}Constant;
SPECAPI int SPECAPIENTRY readSpecs(const char* xmlData, unsigned int bytes,
Function** functions, unsigned int* functionCount, Constant** constants,
unsigned int* constantsCount);
SPECAPI void SPECAPIENTRY freeSpecs(Function** functions,
const unsigned int functionCount, Constant** constants,
const unsigned int constantCount);
#ifdef __cplusplus
}
#endif
#endif // LIBSPEC_H |
<gh_stars>0
import pytest
from bst import BST as bst
from fizz_buzz_tree import fizz_buzz_tree
def test_returns_correct(full_bst):
assert fizz_buzz_tree(full_bst) == ['fizz', 'buzz', 8, 'fizzbuzz', 'fizzbuzz', 'fizz', 1, 'fizz']
|
import React from 'react';
import {
Switch,
Route,
useRouteMatch
} from "react-router-dom";
import Page from '../../../components/Page';
import RightTitle from '../../../components/RightTitle';
import List from '../../../components/Board/List';
import CreatePage from './CreatePage';
import EditPage from './EditPage';
import DetailPage from './DetailPage';
export default function BulletinBoardPage() {
let { path } = useRouteMatch();
const title="문의사항";
const rightTitle = <RightTitle
title={title}
menu1={"총동연"}
menu2={title}
menu3={"목록"}
/>
const rightInner = <List/>
return(
<>
<Switch>
<Route exact path={path}>
<Page rightInner={rightInner} rightTitle={rightTitle}/>
</Route>
<Route path={`${path}/create`}>
<CreatePage/>
</Route>
<Route path={`${path}/edit`}>
<EditPage/>
</Route>
<Route path={`${path}/:id`}>
<DetailPage />
</Route>
</Switch>
</>
);
} |
mkdir -p dist
if [[ `uname -s` == "Darwin" ]]; then
echo "system is macos"
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags '-w' -o dist/delay-queue-linux-64
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -ldflags '-w' -o dist/delay-queue-win-64
go build -ldflags '-w' -o dist/delay-queue-macos-64
else
echo "system is linux"
CGO_ENABLED=0 GOOS=macos GOARCH=amd64 go build -ldflags '-w' -o dist/delay-queue-linux-64
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -ldflags '-w' -o dist/delay-queue-win-64
go build -ldflags '-w' -o dist/delay-queue-macos-64
fi
upx -9 dist/delay-queue-linux-64
upx -9 dist/delay-queue-win-64
upx -9 dist/delay-queue-macos-64
|
import random
class DiceGame:
def __init__(self):
self.total_score = 0
self.dice_rolled = False
def roll_dice(self):
if not self.dice_rolled:
self.dice_rolled = True
return random.randint(1, 6)
else:
return None
def play_game(self):
while True:
roll_decision = input("Do you want to roll the dice? (yes/no): ")
if roll_decision.lower() == "yes":
result = self.roll_dice()
if result is not None:
print(f"Dice rolled: {result}")
self.total_score += result
else:
print("You have already rolled the dice. Please choose 'no' to stop rolling.")
elif roll_decision.lower() == "no":
print(f"Total score: {self.total_score}")
break
else:
print("Invalid input. Please enter 'yes' or 'no'.")
def get_total_score(self):
return self.total_score
# Example usage
game = DiceGame()
game.play_game()
print("Final total score:", game.get_total_score()) |
<filename>51.test/src/Tab.js
class Tab {
constructor(options) {
let { id, button, panel } = options;
let tab = this.tab = document.getElementById(id);
let buttons = this.buttons = Array.from(tab.querySelectorAll('.' + button));
let panels = this.panels = Array.from(tab.querySelectorAll('.' + panel));
this.select(0);
this.bindEvent();
}
select(current) {
this.buttons.forEach((item, index) => {
if (current == index) {
item.style.backgroundColor = 'red';
this.panels[index].style.display = 'block';
} else {
item.style.backgroundColor = '';
this.panels[index].style.display = 'none';
}
});
}
bindEvent() {
for (let i = 0; i < this.buttons.length; i++) {
this.buttons[i].addEventListener('click', () => {
this.select(i);
});
}
}
}
module.exports = Tab; |
#!/bin/sh
export JENKINS_HOME=/var/lib/jenkins
export CONFIG_PATH=${JENKINS_HOME}/config.xml
export OPENSHIFT_API_URL=https://openshift.default.svc.cluster.local
export KUBE_SA_DIR=/run/secrets/kubernetes.io/serviceaccount
export KUBE_CA=${KUBE_SA_DIR}/ca.crt
export AUTH_TOKEN=${KUBE_SA_DIR}/token
export JENKINS_PASSWORD KUBERNETES_SERVICE_HOST KUBERNETES_SERVICE_PORT
export ITEM_ROOTDIR="\${ITEM_ROOTDIR}" # Preserve this variable Jenkins has in config.xml
# Takes a password and an optional salt value, outputs the hashed password.
function obfuscate_password {
local password="$1"
local salt="$2"
#local acegi_security_path=`find /tmp/war/WEB-INF/lib/ -name acegi-security-*.jar`
#local commons_codec_path=`find /tmp/war/WEB-INF/lib/ -name commons-codec-*.jar`
local jbcrypt_path=`find /tmp/war/WEB-INF/lib ${JENKINS_HOME}/war/WEB-INF/lib/ -name jbcrypt-*.jar 2> /dev/null | head -1`
# source for password-encoder.jar is inside the jar.
# acegi-security-1.0.7.jar is inside the jenkins war.
# java -classpath "${acegi_security_path}:${commons_codec_path}:/opt/openshift/password-encoder.jar" com.redhat.openshift.PasswordEncoder $password $salt
java -classpath "${jbcrypt_path}:/opt/openshift/password-encoder.jar" com.redhat.openshift.PasswordEncoder $password $salt
}
# Returns 0 if password matches 1 otherwise
function has_password_changed {
local password="$1"
local password_hash="$2"
local jbcrypt_path=`find /tmp/war/WEB-INF/lib ${JENKINS_HOME}/war/WEB-INF/lib/ -name jbcrypt-*.jar 2> /dev/null | head -1`
# source for password-encoder.jar is inside the jar.
java -classpath "${jbcrypt_path}:/opt/openshift/password-encoder.jar" com.redhat.openshift.PasswordChecker $password $password_hash
}
|
#!/usr/bin/env bash
set -e
NEW_VERSION=$(git describe --tags --abbrev=0 | awk -F. '{OFS="."; $NF+=1; print $0}')
echo Creating $NEW_VERSION...
git tag -m "release ${NEW_VERSION}" -a "$NEW_VERSION"
echo
read -p "Do you want to release the new version? [y/N] " -n 1 -r
echo # (optional) move to a new line
if [[ ! $REPLY =~ ^[Yy]$ ]]
then
echo "Exited without releasing, make sure you push the tag to release"
exit 1
fi
git push origin $NEW_VERSION |
clear() {
$(which clear) || tput clear
}
cl() {
clear
ls
}
|
class Tree:
def __init__(self, values):
self.values = values
def find_sum_idx(self, idx):
if idx < 0 or idx >= len(self.values):
return -1 # Index out of bounds
current_sum = 0
for i in range(idx + 1):
current_sum += self.values[i]
if current_sum == self.values[idx]:
return i # Found the index with the sum
return -1 # No index found with the given sum
# Example usage
tree_values = [1, 2, 3, 4, 5, 6, 7]
tree = Tree(tree_values)
assert tree.find_sum_idx(3) == 6
assert tree.find_sum_idx(6) == -1 |
# Documentation:
# https://docs.docker.com/engine/reference/commandline/build/
docker build -t gns3server:test .
|
import { styled, media } from '../../../styles';
export const Container = styled.header`
display: flex;
`;
export const Logo = styled.img`
display: none;
${media.medium`
display: block;
max-height: 5rem;
width: auto;
height: auto;
`}
`;
export const UserSession = styled.div`
flex-grow: 1;
padding-left: 1rem;
padding-right: 1rem;
${media.medium`
flex-grow: 0;
margin-left: auto;
position: relative;
`}
`;
export const Certification = styled.p`
text-transform: uppercase;
font-size: 0.8rem;
font-style: italic;
padding-bottom: 0.5rem;
`;
export const User = styled.span`
padding-left: 0.5rem;
padding-bottom: 0.5rem;
display: flex;
align-items: center;
cursor: pointer;
`;
export const Icon = styled.i`
font-size: 2rem;
margin-left: auto;
color: ${({ theme }) => theme.colors.dark};
padding-right: 1rem;
`;
|
// main texts
export const TITLE_TXT = 'Diagnosis Mapper'
export const TITLE_TXT_DIAG = 'ISIC-designated-diagnosis'
export const TITLE_TXT_FULL = 'ISIC-Designated Diagnosis Mapper'
export const TITLE_TXT_MINI = 'IDDx'
export const TITLE_TXT_SUBTITLE = 'Consensus Survey'
// hot-keys
export const HK_MARK_BLOCK_AS_CORRECT = 'alt+x'
export const HK_NEXT_BLOCK = 'alt+n'
// image filenames and settings
export const IMG_LOCK_LOCKED = '/img/locked.png'
export const IMG_LOCK_LOCKED_ALT = 'Block locked'
export const IMG_LOCK_SIZE = 24
export const IMG_LOCK_UNLOCKED = '/img/unlocked.png'
export const IMG_LOCK_UNLOCKED_ALT = 'Block unlocked'
export const IMG_LOGO = '/img/dm_logo_2.png'
export const IMG_LOGO_ALT = TITLE_TXT + ' logo'
export const IMG_LOGO_HEIGHT = 60
export const IMG_LOGO_WIDTH = 120
export const IMG_FORMATPAINT = '/img/paint.png'
export const IMG_FORMATPAINT_ALT = 'Copy this setting to other IDDx terms in this block'
export const IMG_FORMATPAINT_SIZE = 24
export const IMG_FORMATRESET = '/img/reset.png'
export const IMG_FORMATRESET_ALT = 'Reset the settings for this IDDx term'
export const IMG_FORMATRESET_SIZE = 24
export const IMG_PDF_LIST = '/img/IDDx_full_list.pdf'
// search results
export const SEARCH_MAX_RESULTS = 30
// user/session ID/token
export const SESS_BEGIN = 'Begin or resume session'
export const SESS_ERROR_NOTFOUND = 'Session information not found. Please try again!'
export const SESS_ERROR_UNEXPECTED = 'Unexpected server response. Please try again!'
export const SESS_INFO = 'Session ID: '
export const SESS_LABEL = 'User info: '
export const SESS_PROGRESS = 'Progress: '
export const SESS_PROMPT_EMAIL = 'Please enter your email address...'
export const SESS_PROMPT_ID = 'and session ID...'
export const SESS_SAVE_ERROR = 'Saving data failed. Please try again!'
export const TOKEN_ERROR_UNEXPECTED = 'Unexpected server response. Please try again!'
export const TOKEN_PROMPT_ID = 'admin access code...'
// block controls
export const BLOCKS_ADDCAT = 98
export const BLOCKS_ALL = 99
export const BLOCKS_ALL_TXT = 'See all diagnoses (overview)'
export const BLOCKS_FIRST = 10101
export const BLOCKS_INSTRUCT = 5
export const BLOCKS_LOGOUT = 9
export const BLOCKS_WELCOME = 0
export const BLOCK_INSTRUCTIONS = 'Click here to see the instructions'
export const BLOCK_LOGGED_OUT = 'You have successfully logged out.'
export const BLOCK_LOGOUT = 'Log out'
export const BLOCK_MARK_AS_CORRECT = 'Mark entire block as correct'
export const BLOCK_NEXT = 'Save & continue'
export const BLOCK_PREVIOUS = 'Go back'
export const BLOCK_REVIEW_ALL = 'Review all choices'
export const BLOCK_SAVE = 'Save only'
export const BLOCK_TXT = ', block '
export const BLOCK_UNLOCKED = 'This block is currently unlocked'
export const CATEGORY_FIRST = 101
// table texts
export const TABLE_CATEGORY = 'Category: '
export const TABLE_CATEGORY_ADD = 'add category'
export const TABLE_CORRECT = 'correct'
export const TABLE_CORRECTED = 'corrected'
export const TABLE_CORRECT_NOT_YET = 'pending'
export const TABLE_CURRENT_CATEGORY = 'Current Category: '
export const TABLE_HEADER_DIAGNOSIS = 'IDDx term'
export const TABLE_HEADER_CORRECT = 'Correct?'
export const TABLE_HEADER_CORRECTION = 'Correction (configure as needed)'
export const TABLE_HEADER_NEWCAT = 'Category name'
export const TABLE_HEADER_NEWSUPERCAT = 'Super-category name'
export const TABLE_NO_DIAGNOSES = 'No diagnoses in this category.'
// name mangling
export const TXT_AKA = 'a.k.a. '
export const TXT_AKA_EDITED = 'now a.k.a. '
export const TXT_AKA_NEW = 'additionally a.k.a. '
export const TXT_AND_MODIFIABLE_BY = ' and modifiable by '
export const TXT_CORRECTED_TO = ' corrected to '
export const TXT_MODIFIABLE_BY = 'modifiable by '
export const TXT_MODIFIABLE_BY_NEW = 'additionally modifiable by '
export const TXT_MODS_DELETED = 'modifiers deleted'
export const TXT_MODS_TO_BE_EDITED = ' existing modifiers edited: '
export const TXT_NEW_TERM = 'User provided IDDx term: '
export const TXT_RENAMED_TO = ' renamed to '
export const TXT_SYNS_DELETED = 'synonyms deleted'
export const TXT_SYNS_TO_BE_EDITED = ' existing synonyms edited: '
export const TXT_TO_BE_COMBINED_WITH = ' to be combined with: '
export const TXT_TO_BE_CORRECTED_TO = ' term to be corrected to '
export const TXT_TO_BE_CORRECTED_BY = 'to be corrected by: '
export const TXT_TO_BE_DELETED = ' to be deleted '
export const TXT_TO_BE_MOVED_TO = 'to be moved to category '
export const TXT_TO_BE_MOVED_OTHER = ' (user provided)'
export const TXT_TO_BE_RENAMED_TO = ' to be renamed to '
export const TXT_TO_BE_REPLACED_WITH = ' to be replaced with '
// correction types
export const CORRECTION_COMBINE = 'CORRECTION_COMBINE'
export const CORRECTION_DELETE = 'CORRECTION_DELETE'
export const CORRECTION_DELBOTH = 'CORRECTION_DELBOTH'
export const CORRECTION_DELMODS = 'CORRECTION_DELMODS'
export const CORRECTION_DELSYNS = 'CORRECTION_DELSYNS'
export const CORRECTION_EDITMODS = 'CORRECTION_EDITMODS'
export const CORRECTION_EDITSYNS = 'CORRECTION_EDITSYNS'
export const CORRECTION_FILL1 = 'CORRECTION_FILL1'
export const CORRECTION_FILL2 = 'CORRECTION_FILL2'
export const CORRECTION_FILL3 = 'CORRECTION_FILL3'
export const CORRECTION_FILL4 = 'CORRECTION_FILL4'
export const CORRECTION_FILL5 = 'CORRECTION_FILL5'
export const CORRECTION_FILL8 = 'CORRECTION_FILL8'
export const CORRECTION_FILL9 = 'CORRECTION_FILL9'
export const CORRECTION_MOVECAT = 'CORRECTION_MOVETOCAT'
export const CORRECTION_NEWMODS = 'CORRECTION_ADDMODS'
export const CORRECTION_NEWNAME = 'CORRECTION_NEWNAME'
export const CORRECTION_NEWSYNS = 'CORRECTION_ADDSYNS'
export const CORRECTION_NONE = 'CORRECTION_NONE'
export const CORRECTION_OTHER = 'CORRECTION_OTHER'
export const CORRECTION_SPELLING = 'CORRECTION_SPELLING'
// and their associated dropdown entries
export const CORRECTION_COMBINE_TXT = 'Combine with another IDDx term'
export const CORRECTION_COMBINE_USER_DEFINED_TXT = ' (user defined term)'
export const CORRECTION_COPY = 'Copy correction to terms below'
export const CORRECTION_DELETE_TXT = 'Remove IDDx term completely'
export const CORRECTION_DELBOTH_TXT = 'Remove modifiers and synonyms'
export const CORRECTION_DELMODS_TXT = 'Remove modifiers'
export const CORRECTION_DELSYNS_TXT = 'Remove synonym(s)'
export const CORRECTION_EDITMODS_TXT = 'Edit modifiers'
export const CORRECTION_EDITSYNS_TXT = 'Edit synonym(s)'
export const CORRECTION_FILLING_TXT = '--------------------------------'
export const CORRECTION_MOVECAT_TXT = 'Assign IDDX term to another category'
export const CORRECTION_NEWMODS_TXT = 'Add modifiers'
export const CORRECTION_NEWNAME_TXT = 'Suggest a different name for IDDx term'
export const CORRECTION_NEWSYNS_TXT = 'Add synonym(s)'
export const CORRECTION_NONE_ALT = 'Go back to original'
export const CORRECTION_NONE_TXT = 'Correction needed...'
export const CORRECTION_OTHER_TXT = 'Other (please specify)'
export const CORRECTION_SPELLING_TXT = 'Correct spelling of IDDx term'
// and control placeholder texts
export const CORRECTION_COMBINE_SELECT = 'Please select the IDDx term to combine with...'
export const CORRECTION_EDITMODS_EMPTY = 'Please enter the corrected modifiers...'
export const CORRECTION_EDITSYNS_EMPTY = 'Please enter the corrected synonyms...'
export const CORRECTION_MOVECAT_EMPTY = 'Please enter a category name...'
export const CORRECTION_MOVECAT_OTHER = 'Other (please specify...)'
export const CORRECTION_MOVECAT_SELECT = 'Please select the category to move this to...'
export const CORRECTION_MOVECAT_SCAT_UNKNOWN = 'User-defined super-category'
export const CORRECTION_MOVECAT_USER = ' (user-defined category)'
export const CORRECTION_NEWNAME_EMPTY = 'Please enter a new IDDx term...'
export const CORRECTION_NEWMODS_EMPTY = 'Please enter additional modifiers...'
export const CORRECTION_NEWSYNS_EMPTY = 'Please enter additional synonyms...'
export const CORRECTION_OTHER_EMPTY = 'Please describe the desired correction(s)...'
export const CORRECTION_SPELLING_EMPTY = 'Please provide the correct spelling...'
// new entry texts
export const NEWENTRY_CANCEL = 'Cancel'
export const NEWENTRY_CONFIRM = 'Confirm'
export const NEWENTRY_CREATE = 'Add a new IDDx term'
export const NEWENTRY_NAME_EMPTY = 'Please enter the IDDx term...'
export const NEWENTRY_CAT_SELECT = 'Please select the desired category...'
export const NEWENTRY_CAT_NEW = 'Create a new category...'
export const NEWENTRY_PLACEIN = 'placed in'
export const NEWENTRY_ERROR_BAD_BLOCK = 'Category block not found!'
export const NEWENTRY_ERROR_CNAME = 'Duplicate or missing IDDx term!'
export const NEWCATEGORY_ACAT_NEW = 'New super-category...'
export const NEWCATEGORY_ACAT_ANAME_EMPTY = 'Please enter a super-category name...'
export const NEWCATEGORY_ACAT_BNAME_EMPTY = 'Please enter a new category name...'
export const NEWCATEGORY_ERROR_ANAME = 'Duplicate or missing new super-category name!'
export const NEWCATEGORY_ERROR_BNAME = 'Duplicate or missing new category name!'
// additional texts
export const TXT_TOGGLE_TREE_OFF = 'Click here to hide the suggested taxonomy tree'
export const TXT_TOGGLE_TREE_ON = 'Click here to show the suggested taxonomy tree'
export const TXT_NOT_YET_IMPLEMENTED = 'Feature not yet implemented...'
export const TXT_SEARCH_NORESULTS = 'Nothing found.'
export const TXT_SEARCH_PROMPT = 'Search for IDDx terms, categories...'
export const TXT_SEARCH_RESULTS = 'Results...'
export const TXT_SEARCH_RESULTS_A = 'Super-category: '
export const TXT_SEARCH_RESULTS_B = 'Category: '
export const TXT_SUBMIT = 'Submit'
// results output
export const TXT_RESULTS_APPROVE = 'Approve'
export const TXT_RESULTS_CORRECTED = 'Corrected'
export const TXT_RESULTS_CORRECTIONS = 'Corrections requested'
export const TXT_RESULTS_DELETE = 'Deleted'
export const TXT_RESULTS_NORESULTS = 'No results yet.'
export const TXT_RESULTS_NUMRATERS = ' rater(s) corrected this block'
export const TXT_RESULTS_PENDING = 'Pending'
export const TXT_RESULTS_RATERID = 'Rater (ID)'
export const TXT_RESULTS_STATUS = 'Status'
|
import re
class BaseQueries:
# Base class for queries
class TrecQueries(BaseQueries):
def __init__(self, trec_queries):
self.trec_queries = trec_queries
def process_query(self, query):
result = {'description': None, 'narrative': None}
for pattern, key in self.trec_queries.items():
match = re.search(pattern, query)
if match:
result[key] = query.replace(match.group(0), '').strip()
return result |
CREATE TABLE customers (
id INTEGER,
name VARCHAR(50),
age INTEGER
); |
//
// Common212HeaderView.h
// pairearch_WLY
//
// Created by Leo on 2017/3/10.
// Copyright © 2017年 Leo. All rights reserved.
//
#import <UIKit/UIKit.h>
@class DetailCommonModel;
@interface Common212HeaderView : UITableViewHeaderFooterView
@property (weak, nonatomic) IBOutlet UILabel *getTimeLabel;
@property (weak, nonatomic) IBOutlet UILabel *chengLabel;
@property (weak, nonatomic) IBOutlet UILabel *loadNumberLabel;
@property (weak, nonatomic) IBOutlet UILabel *loadAddressLabel;
@property (weak, nonatomic) IBOutlet UILabel *heavierTonLabel;
@property (weak, nonatomic) IBOutlet UILabel *contactPersonLabel;
@property (weak, nonatomic) IBOutlet UILabel *contactNumberLabel;
@property (weak, nonatomic) IBOutlet UILabel *statusLabel;
@property (nonatomic, strong) DetailCommonModel *detailModel;
//获取视图
+ (Common212HeaderView *)getHeaderViewWithTable:(UITableView *)table;
@end
|
#include "gomoku/core/board.h"
namespace gomoku {
constexpr PlayerMap<int> kPlayerOffset{0, 1};
bool Board::Inside(Eigen::Vector2i position) {
return position.x() >= 0 && position.x() < kWidth &&
position.y() >= 0 && position.y() < kHeight;
}
Player Board::StoneAt(Eigen::Vector2i position) const {
DCHECK(Inside(position));
int index = (position.x() * kHeight + position.y()) * 2;
if (fields_[index]) return Player::FIRST;
if (fields_[index+1]) return Player::SECOND;
return Player::NONE;
}
void Board::ApplyMove(Eigen::Vector2i move) {
DCHECK(Inside(move));
int index = (move.x() * kHeight + move.y()) * 2;
CHECK(!fields_[index]);
CHECK(!fields_[index + 1]);
fields_[index + kPlayerOffset[current_player_]] = true;
current_player_ = kOpponent[current_player_];
moves_made_++;
}
void Board::RevertMove(Eigen::Vector2i move) {
DCHECK(Inside(move));
int index = (move.x() * kHeight + move.y()) * 2;
current_player_ = kOpponent[current_player_];
CHECK(fields_[index + kPlayerOffset[current_player_]]);
fields_[index + kPlayerOffset[current_player_]] = false;
moves_made_--;
}
void Board::Reset() {
fields_.reset();
current_player_ = Player::FIRST;
moves_made_ = 0;
}
bool Board::NoMovePossible() const {
return moves_made_ == kWidth * kHeight;
}
bool Board::DidWin(Eigen::Vector2i last_move) const {
DCHECK(Inside(last_move));
// There are one horizontal, one vertical and two diagonal lines where
// the winning line of 5 stones could be located. We have to check all.
const Eigen::Vector2i directions[] = {
{0, 1}, {1, 0}, // vertical and horizontal
{1, 1}, {1, -1}, // diagonal
};
// Counts the number of consecutive stones placed by the player and
// going in the given direction from the last_move.
auto CastRay = [&] (const Eigen::Vector2i& direction, Player player) {
int count = 0;
while (StoneAtPadded(last_move + (count + 1) * direction) == player)
count++;
return count;
};
for (const auto& direction : directions) {
int negative_count = CastRay(-direction, kOpponent[current_player_]);
int positive_count = CastRay(direction, kOpponent[current_player_]);
if (positive_count + negative_count >= 4)
return true;
}
return false;
}
Eigen::Vector2i Board::UniformlySampleLegalMove(std::mt19937_64* rng) const {
std::uniform_int_distribution<int> x_dist(0, kWidth-1);
std::uniform_int_distribution<int> y_dist(0, kHeight-1);
int x_offset = x_dist(*rng);
int y_offset = y_dist(*rng);
for (int x = 0; x < kWidth; ++x) {
for (int y = 0; y < kHeight; ++y) {
Eigen::Vector2i move{(x + x_offset) % kWidth, (y + y_offset) % kHeight};
if (StoneAt(move) == Player::NONE) {
return move;
}
}
}
LOG(FATAL) << "All legal moves exhausted";
}
} // namespace gomoku
|
#! /usr/bin/env bash
##
# Copyright (c) 2019-2020 Ryan Parman <https://ryanparman.com>
# License: MIT <https://opensource.org/licenses/MIT>
#
# Assumes GNU tools instead of BSD tools: <https://flwd.dk/31ELAKJ>
# Also: brew install mp4v2
#
# Written to be easy to adapt into a macOS Automator action.
#
# find /path/to/shows -type f -name "*.mp4" -print0 | xargs -0 --no-run-if-empty -I% ./strip-html-from-descriptions.sh "%"
##
_echo=/usr/local/opt/coreutils/libexec/gnubin/echo
_grep=/usr/local/opt/grep/libexec/gnubin/grep
_mp4info=/usr/local/bin/mp4info
_mp4tags=/usr/local/bin/mp4tags
_read=/usr/bin/read
_sed=/usr/local/opt/gnu-sed/libexec/gnubin/sed
description=$($_mp4info "$1" | $_grep --color=never "^ Long Description:" | $_sed -r "s/^ Long Description: //")
# Clean-up real names after character names
description=$($_echo "$description" | $_sed -r "s/ \((guest star )?(\w+ )?\w+,? \w+\.?\)//g")
description=$($_echo "$description" | $_sed -r "s/ \((recurring guest star )?(\w+ )?\w+,? \w+\.?\)//g")
description=$($_echo "$description" | $_sed -r "s/ \((series star )?(\w+ )?\w+,? \w+\.?\)//g")
# Strip HTML from descriptions
description=$($_echo "$description" | $_sed -r "s/<\/?([^>])>//g")
description=$($_echo "$description" | $_sed -r "s/ & / and /g")
# Fancy double-quotes
description=$($_echo "$description" | $_sed -r "s/ \"/ “/g")
description=$($_echo "$description" | $_sed -r "s/\"/”/g")
# Fancy single-quotes/apostrophes
description=$($_echo "$description" | $_sed -r "s/([a-zA-Z])'([a-zA-Z])/\1’\2/g")
# Em-dash all the things
description=$($_echo "$description" | $_sed -r "s/([a-zA-Z])\s?---?\s?([a-zA-Z])/\1 — \2/g")
$_echo "DESCRIPTION for $1:"
$_echo " "
$_echo "$description"
$_echo "------------------------------------------------------------"
$_read -p "Press any key to continue, or press Control+C to cancel. " x
# Write the data back to the file
$_echo "Updating long description..."
$_mp4tags -longdesc "$description" "$1"
$_echo "Updating short description..."
$_mp4tags -description "$description" "$1"
$_echo "Removing some nuisance tags..."
$_mp4tags -r eE "$1"
|
#!/usr/bin/env bash
# Copyright 2020 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
readonly SKIP_INITIALIZE=${SKIP_INITIALIZE:-false}
readonly LOCAL_DEVELOPMENT=${LOCAL_DEVELOPMENT:-false}
export REPLICAS=${REPLICAS:-3}
source $(pwd)/vendor/knative.dev/hack/e2e-tests.sh
source $(pwd)/hack/data-plane.sh
source $(pwd)/hack/control-plane.sh
source $(pwd)/hack/artifacts-env.sh
# If gcloud is not available make it a no-op, not an error.
which gcloud &>/dev/null || gcloud() { echo "[ignore-gcloud $*]" 1>&2; }
# Use GNU tools on macOS. Requires the 'grep' and 'gnu-sed' Homebrew formulae.
if [ "$(uname)" == "Darwin" ]; then
sed=gsed
grep=ggrep
fi
# Latest release. If user does not supply this as a flag, the latest tagged release on the current branch will be used.
readonly LATEST_RELEASE_VERSION=$(latest_version)
readonly PREVIOUS_RELEASE_URL="${PREVIOUS_RELEASE_URL:-"https://github.com/knative-sandbox/eventing-kafka-broker/releases/download/${LATEST_RELEASE_VERSION}"}"
readonly EVENTING_CONFIG=${EVENTING_CONFIG:-"./third_party/eventing-latest/"}
# Vendored eventing test images.
readonly VENDOR_EVENTING_TEST_IMAGES="vendor/knative.dev/eventing/test/test_images/"
export MONITORING_ARTIFACTS_PATH="manifests/monitoring/prometheus-operator"
export EVENTING_KAFKA_CONTROLLER_PROMETHEUS_OPERATOR_ARTIFACT_PATH="${MONITORING_ARTIFACTS_PATH}/controller"
export EVENTING_KAFKA_WEBHOOK_PROMETHEUS_OPERATOR_ARTIFACT_PATH="${MONITORING_ARTIFACTS_PATH}/webhook"
export EVENTING_KAFKA_SOURCE_PROMETHEUS_OPERATOR_ARTIFACT_PATH="${MONITORING_ARTIFACTS_PATH}/source"
export EVENTING_KAFKA_BROKER_PROMETHEUS_OPERATOR_ARTIFACT_PATH="${MONITORING_ARTIFACTS_PATH}/broker"
export EVENTING_KAFKA_SINK_PROMETHEUS_OPERATOR_ARTIFACT_PATH="${MONITORING_ARTIFACTS_PATH}/sink"
export EVENTING_KAFKA_CHANNEL_PROMETHEUS_OPERATOR_ARTIFACT_PATH="${MONITORING_ARTIFACTS_PATH}/channel"
# The number of control plane replicas to run.
readonly REPLICAS=${REPLICAS:-1}
export SYSTEM_NAMESPACE="knative-eventing"
export CLUSTER_SUFFIX=${CLUSTER_SUFFIX:-"cluster.local"}
function knative_setup() {
knative_eventing
return $?
}
function knative_teardown() {
if ! is_release_branch; then
echo ">> Delete Knative Eventing from HEAD"
pushd .
cd eventing || fail_test "Failed to set up Eventing"
kubectl delete --ignore-not-found -f "${EVENTING_CONFIG}"
popd || fail_test "Failed to set up Eventing"
else
echo ">> Delete Knative Eventing from ${KNATIVE_EVENTING_RELEASE}"
kubectl delete --ignore-not-found -f "${KNATIVE_EVENTING_RELEASE}"
fi
}
function knative_eventing() {
if ! is_release_branch; then
echo ">> Install Knative Eventing from latest - ${EVENTING_CONFIG}"
kubectl apply -f "${EVENTING_CONFIG}/eventing-crds.yaml"
kubectl apply -f "${EVENTING_CONFIG}/eventing-core.yaml"
else
echo ">> Install Knative Eventing from ${KNATIVE_EVENTING_RELEASE}"
kubectl apply -f "${KNATIVE_EVENTING_RELEASE}"
fi
! kubectl patch horizontalpodautoscalers.autoscaling -n knative-eventing eventing-webhook -p '{"spec": {"minReplicas": '${REPLICAS}'}}'
# Publish test images.
echo ">> Publishing test images from eventing"
./test/upload-test-images.sh ${VENDOR_EVENTING_TEST_IMAGES} e2e || fail_test "Error uploading test images"
# Publish test images from pkg.
echo ">> Publishing test images from pkg"
./test/upload-test-images.sh "test/test_images" e2e || fail_test "Error uploading test images"
kafka_setup
}
function kafka_setup() {
echo ">> Prepare to deploy Strimzi"
./test/kafka/kafka_setup.sh || fail_test "Failed to set up Kafka cluster"
}
function build_components_from_source() {
[ -f "${EVENTING_KAFKA_CONTROL_PLANE_ARTIFACT}" ] && rm "${EVENTING_KAFKA_CONTROL_PLANE_ARTIFACT}"
[ -f "${EVENTING_KAFKA_SOURCE_ARTIFACT}" ] && rm "${EVENTING_KAFKA_SOURCE_ARTIFACT}"
[ -f "${EVENTING_KAFKA_BROKER_ARTIFACT}" ] && rm "${EVENTING_KAFKA_BROKER_ARTIFACT}"
[ -f "${EVENTING_KAFKA_SINK_ARTIFACT}" ] && rm "${EVENTING_KAFKA_SINK_ARTIFACT}"
[ -f "${EVENTING_KAFKA_CHANNEL_ARTIFACT}" ] && rm "${EVENTING_KAFKA_CHANNEL_ARTIFACT}"
[ -f "${EVENTING_KAFKA_POST_INSTALL_ARTIFACT}" ] && rm "${EVENTING_KAFKA_POST_INSTALL_ARTIFACT}"
header "Data plane setup"
data_plane_setup || fail_test "Failed to set up data plane components"
header "Control plane setup"
control_plane_setup || fail_test "Failed to set up control plane components"
header "Building Monitoring artifacts"
build_monitoring_artifacts || fail_test "Failed to create monitoring artifacts"
return $?
}
function install_latest_release() {
echo "Installing latest release from ${PREVIOUS_RELEASE_URL}"
ko apply -f ./test/config/ || fail_test "Failed to apply test configurations"
kubectl apply -f "${PREVIOUS_RELEASE_URL}/${EVENTING_KAFKA_CONTROL_PLANE_ARTIFACT}" || return $?
kubectl apply -f "${PREVIOUS_RELEASE_URL}/${EVENTING_KAFKA_BROKER_ARTIFACT}" || return $?
kubectl apply -f "${PREVIOUS_RELEASE_URL}/${EVENTING_KAFKA_SINK_ARTIFACT}" || return $?
kubectl apply -f "${PREVIOUS_RELEASE_URL}/${EVENTING_KAFKA_SOURCE_ARTIFACT}" || return $?
kubectl apply -f "${PREVIOUS_RELEASE_URL}/${EVENTING_KAFKA_CHANNEL_ARTIFACT}" || return $?
}
function install_head() {
echo "Installing head"
kubectl apply -f "${EVENTING_KAFKA_CONTROL_PLANE_ARTIFACT}" || return $?
kubectl apply -f "${EVENTING_KAFKA_SOURCE_ARTIFACT}" || return $?
kubectl apply -f "${EVENTING_KAFKA_BROKER_ARTIFACT}" || return $?
kubectl apply -f "${EVENTING_KAFKA_SINK_ARTIFACT}" || return $?
kubectl apply -f "${EVENTING_KAFKA_CHANNEL_ARTIFACT}" || return $?
kubectl apply -f "${EVENTING_KAFKA_POST_INSTALL_ARTIFACT}" || return $?
}
function test_setup() {
build_components_from_source || return $?
install_head || return $?
wait_until_pods_running knative-eventing || fail_test "System did not come up"
# Apply test configurations, and restart data plane components (we don't have hot reload)
ko apply -f ./test/config/ || fail_test "Failed to apply test configurations"
setup_kafka_channel_auth || fail_test "Failed to apply channel auth configuration ${EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO}"
kubectl rollout restart deployment -n knative-eventing kafka-source-dispatcher
kubectl rollout restart deployment -n knative-eventing kafka-broker-receiver
kubectl rollout restart deployment -n knative-eventing kafka-broker-dispatcher
kubectl rollout restart deployment -n knative-eventing kafka-sink-receiver
kubectl rollout restart deployment -n knative-eventing kafka-channel-receiver
kubectl rollout restart deployment -n knative-eventing kafka-channel-dispatcher
}
function test_teardown() {
kubectl delete --ignore-not-found -f "${EVENTING_KAFKA_CONTROL_PLANE_ARTIFACT}" || fail_test "Failed to tear down control plane"
kubectl delete --ignore-not-found -f "${EVENTING_KAFKA_BROKER_ARTIFACT}" || fail_test "Failed to tear down kafka broker"
kubectl delete --ignore-not-found -f "${EVENTING_KAFKA_SINK_ARTIFACT}" || fail_test "Failed to tear down kafka sink"
kubectl delete --ignore-not-found -f "${EVENTING_KAFKA_CHANNEL_ARTIFACT}" || fail_test "Failed to tear down kafka channel"
kubectl delete --ignore-not-found -f "${EVENTING_KAFKA_SOURCE_ARTIFACT}" || fail_test "Failed to tear down kafka source"
}
function scale_controlplane() {
for deployment in "$@"; do
# Make sure all pods run in leader-elected mode.
kubectl -n knative-eventing scale deployment "$deployment" --replicas=0 || fail_test "Failed to scale down to 0 ${deployment}"
# Give it time to kill the pods.
sleep 5
# Scale up components for HA tests
kubectl -n knative-eventing scale deployment "$deployment" --replicas="${REPLICAS}" || fail_test "Failed to scale up to ${REPLICAS} ${deployment}"
done
}
function apply_chaos() {
ko apply -f ./test/config/chaos || return $?
}
function delete_chaos() {
kubectl delete --ignore-not-found -f ./test/config/chaos || return $?
}
function apply_sacura() {
ko apply -f ./test/config/sacura/0-namespace.yaml || return $?
ko apply -f ./test/config/sacura/100-broker-config.yaml || return $?
ko apply -f ./test/config/sacura/101-broker.yaml || return $?
kubectl wait --for=condition=ready --timeout=3m -n sacura broker/broker || {
local ret=$?
kubectl get -n sacura broker/broker -oyaml
return ${ret}
}
ko apply -f ./test/config/sacura || return $?
}
function delete_sacura() {
kubectl delete --ignore-not-found -f ./test/config/sacura/101-broker.yaml || return $?
kubectl delete --ignore-not-found -f ./test/config/sacura/100-broker-config.yaml || return $?
kubectl delete --ignore-not-found -f ./test/config/sacura/0-namespace.yaml || return $?
}
function export_logs_continuously() {
labels=("kafka-broker-dispatcher" "kafka-broker-receiver" "kafka-sink-receiver" "kafka-channel-receiver" "kafka-channel-dispatcher" "kafka-source-dispatcher" "kafka-webhook-eventing" "kafka-controller")
mkdir -p "$ARTIFACTS/${SYSTEM_NAMESPACE}"
for deployment in "${labels[@]}"; do
kubectl logs -n ${SYSTEM_NAMESPACE} -f -l=app="$deployment" >"$ARTIFACTS/${SYSTEM_NAMESPACE}/$deployment" 2>&1 &
done
}
function save_release_artifacts() {
# Copy our release artifacts into artifacts, so that release artifacts of a PR can be tested and reviewed without
# building the project from source.
cp "${EVENTING_KAFKA_BROKER_ARTIFACT}" "${ARTIFACTS}/${EVENTING_KAFKA_BROKER_ARTIFACT}" || return $?
cp "${EVENTING_KAFKA_BROKER_PROMETHEUS_OPERATOR_ARTIFACT}" "${ARTIFACTS}/${EVENTING_KAFKA_BROKER_PROMETHEUS_OPERATOR_ARTIFACT}" || return $?
cp "${EVENTING_KAFKA_SOURCE_ARTIFACT}" "${ARTIFACTS}/${EVENTING_KAFKA_SOURCE_ARTIFACT}" || return $?
cp "${EVENTING_KAFKA_SOURCE_PROMETHEUS_OPERATOR_ARTIFACT}" "${ARTIFACTS}/${EVENTING_KAFKA_SOURCE_PROMETHEUS_OPERATOR_ARTIFACT}" || return $?
cp "${EVENTING_KAFKA_SINK_ARTIFACT}" "${ARTIFACTS}/${EVENTING_KAFKA_SINK_ARTIFACT}" || return $?
cp "${EVENTING_KAFKA_SINK_PROMETHEUS_OPERATOR_ARTIFACT}" "${ARTIFACTS}/${EVENTING_KAFKA_SINK_PROMETHEUS_OPERATOR_ARTIFACT}" || return $?
cp "${EVENTING_KAFKA_CHANNEL_ARTIFACT}" "${ARTIFACTS}/${EVENTING_KAFKA_CHANNEL_ARTIFACT}" || return $?
cp "${EVENTING_KAFKA_CHANNEL_PROMETHEUS_OPERATOR_ARTIFACT}" "${ARTIFACTS}/${EVENTING_KAFKA_CHANNEL_PROMETHEUS_OPERATOR_ARTIFACT}" || return $?
cp "${EVENTING_KAFKA_CONTROL_PLANE_ARTIFACT}" "${ARTIFACTS}/${EVENTING_KAFKA_CONTROL_PLANE_ARTIFACT}" || return $?
cp "${EVENTING_KAFKA_CONTROL_PLANE_PROMETHEUS_OPERATOR_ARTIFACT}" "${ARTIFACTS}/${EVENTING_KAFKA_CONTROL_PLANE_PROMETHEUS_OPERATOR_ARTIFACT}" || return $?
}
function build_monitoring_artifacts() {
ko resolve ${KO_FLAGS} \
-Rf "${EVENTING_KAFKA_CONTROLLER_PROMETHEUS_OPERATOR_ARTIFACT_PATH}" \
-Rf "${EVENTING_KAFKA_WEBHOOK_PROMETHEUS_OPERATOR_ARTIFACT_PATH}" |
"${LABEL_YAML_CMD[@]}" >"${EVENTING_KAFKA_CONTROL_PLANE_PROMETHEUS_OPERATOR_ARTIFACT}" || return $?
ko resolve ${KO_FLAGS} -Rf "${EVENTING_KAFKA_BROKER_PROMETHEUS_OPERATOR_ARTIFACT_PATH}" |
"${LABEL_YAML_CMD[@]}" >"${EVENTING_KAFKA_BROKER_PROMETHEUS_OPERATOR_ARTIFACT}" || return $?
ko resolve ${KO_FLAGS} -Rf "${EVENTING_KAFKA_SOURCE_PROMETHEUS_OPERATOR_ARTIFACT_PATH}" |
"${LABEL_YAML_CMD[@]}" >"${EVENTING_KAFKA_SOURCE_PROMETHEUS_OPERATOR_ARTIFACT}" || return $?
ko resolve ${KO_FLAGS} -Rf "${EVENTING_KAFKA_SINK_PROMETHEUS_OPERATOR_ARTIFACT_PATH}" |
"${LABEL_YAML_CMD[@]}" >"${EVENTING_KAFKA_SINK_PROMETHEUS_OPERATOR_ARTIFACT}" || return $?
ko resolve ${KO_FLAGS} -Rf "${EVENTING_KAFKA_CHANNEL_PROMETHEUS_OPERATOR_ARTIFACT_PATH}" |
"${LABEL_YAML_CMD[@]}" >"${EVENTING_KAFKA_CHANNEL_PROMETHEUS_OPERATOR_ARTIFACT}" || return $?
}
function setup_kafka_channel_auth() {
echo "Apply channel auth config ${EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO}"
if [ "$EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO" == "SSL" ]; then
echo "Setting up SSL configuration for KafkaChannel"
kubectl patch configmap/kafka-channel-config \
-n knative-eventing \
--type merge \
-p '{"data":{"bootstrap.servers":"my-cluster-kafka-bootstrap.kafka:9093", "auth.secret.ref.name": "strimzi-tls-secret", "auth.secret.ref.namespace": "knative-eventing"}}'
elif [ "$EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO" == "SASL_SSL" ]; then
echo "Setting up SASL_SSL configuration for KafkaChannel"
kubectl patch configmap/kafka-channel-config \
-n knative-eventing \
--type merge \
-p '{"data":{"bootstrap.servers":"my-cluster-kafka-bootstrap.kafka:9094", "auth.secret.ref.name": "strimzi-sasl-secret", "auth.secret.ref.namespace": "knative-eventing"}}'
elif [ "$EVENTING_KAFKA_BROKER_CHANNEL_AUTH_SCENARIO" == "SASL_PLAIN" ]; then
echo "Setting up SASL_PLAIN configuration for KafkaChannel"
kubectl patch configmap/kafka-channel-config \
-n knative-eventing \
--type merge \
-p '{"data":{"bootstrap.servers":"my-cluster-kafka-bootstrap.kafka:9095", "auth.secret.ref.name": "strimzi-sasl-plain-secret", "auth.secret.ref.namespace": "knative-eventing"}}'
else
echo "Setting up no auth configuration for KafkaChannel"
kubectl patch configmap/kafka-channel-config \
-n knative-eventing \
--type merge \
-p '{"data":{"bootstrap.servers":"my-cluster-kafka-bootstrap.kafka:9092"}}'
kubectl patch configmap/kafka-channel-config \
-n knative-eventing \
--type=json \
-p='[{"op": "remove", "path": "/data/auth.secret.ref.name"}, {"op": "remove", "path": "/data/auth.secret.ref.namespace"}]' || true
fi
}
|
SELECT birthday,
YEAR(NOW()) - YEAR(birthday) AS age
FROM users
WHERE registration_date BETWEEN DATE_SUB(NOW(), INTERVAL 1 MONTH) AND NOW(); |
<gh_stars>0
require('ts-node').register({
project: './e2e/tsconfig.e2e.json'
});
|
const requestLogger = (req, res, next) => {
const now = new Date();
const day = `${now.getUTCFullYear()}-${
now.getUTCMonth()+1
}-${now.getUTCDate()}`;
// log request to database
req.logger.log({ day, requestCount: 1 });
next();
};
module.exports = requestLogger; |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.github.jipsg.twelvemonkeys;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import javax.imageio.ImageIO;
import javax.imageio.ImageReader;
import javax.imageio.stream.ImageInputStream;
import java.awt.image.BufferedImage;
import java.io.File;
import java.util.*;
import static org.junit.Assert.assertEquals;
/**
* Load various images.
*/
public class ImageLoadTwelveMonkeysTest extends BaseTwelveMonkeysTest {
@Before
public void setup() {
super.setup();
}
// ======================================================================
// General
// ======================================================================
/**
* List available image formats.
*
* see http://examples.javacodegeeks.com/desktop-java/imageio/list-read-write-supported-image-formats/
*/
@Test
public void testListSupportedImageFormats() throws Exception {
Set<String> set = new HashSet<String>();
// Get list of all informal format names understood by the current set of registered readers
String[] formatNames = ImageIO.getReaderFormatNames();
for (int i = 0; i < formatNames.length; i++) {
set.add(formatNames[i].toLowerCase());
}
System.out.println("Supported read formats: " + set);
set.clear();
// Get list of all informal format names understood by the current set of registered writers
formatNames = ImageIO.getWriterFormatNames();
for (int i = 0; i < formatNames.length; i++) {
set.add(formatNames[i].toLowerCase());
}
System.out.println("Supported write formats: " + set);
set.clear();
// Get list of all MIME types understood by the current set of registered readers
formatNames = ImageIO.getReaderMIMETypes();
for (int i = 0; i < formatNames.length; i++) {
set.add(formatNames[i].toLowerCase());
}
System.out.println("Supported read MIME types: " + set);
set.clear();
// Get list of all MIME types understood by the current set of registered writers
formatNames = ImageIO.getWriterMIMETypes();
for (int i = 0; i < formatNames.length; i++) {
set.add(formatNames[i].toLowerCase());
}
System.out.println("Supported write MIME types: " + set);
}
// ======================================================================
// Load various image formats
// ======================================================================
@Test
public void testLoadVariousImageFormats() throws Exception {
List<File> sourceImageFileList = new ArrayList<File>();
sourceImageFileList.add(getImageFile("jpg", "marble.jpg"));
sourceImageFileList.add(getImageFile("png", "marble.png"));
sourceImageFileList.add(getImageFile("tiff", "marble.tiff"));
sourceImageFileList.add(getImageFile("gif", "marble.gif"));
for(File sourceImageFile : sourceImageFileList) {
BufferedImage bufferedImage = createBufferedImage(sourceImageFile);
assertValidBufferedImage(bufferedImage);
}
}
// ======================================================================
// JPEG
// ======================================================================
/**
* Plain-vanilla JPEG
*/
@Test
public void testLoadJPEGImage() throws Exception {
assertValidBufferedImage(createBufferedImage(getImageFile("jpg", "test-image-rgb-01.jpg")));
}
/**
* CMYK color model is supported.
*/
@Test
public void testLoadCMYKImage() throws Exception {
assertValidBufferedImage(createBufferedImage(getImageFile("jpg", "test-image-cmyk-uncompressed.jpg")));
}
// ======================================================================
// TIFF
// ======================================================================
/**
* Load a TIFF image with compression 2.
* Fails with "ArrayIndexOutOfBoundsException"
*/
@Test
public void testLoadTiffGrayWithCompression2() throws Exception {
assertValidBufferedImage(createBufferedImage(getImageFile("tiff", "test-single-gray-compression-type-2.tiff")));
}
/**
* Load a TIFF image with compression 3.
* Fails with "javax.imageio.IIOException: Unsupported TIFF Compression value: 3"
*/
@Test
@Ignore
public void testLoadTiffWithCompression3() throws Exception {
assertValidBufferedImage(createBufferedImage(getImageFile("tiff", "test-single-gray-compression-type-3.tiff")));
}
/**
* Load a TIFF image with compression 4.
* Fails with "javax.imageio.IIOException: Unsupported TIFF Compression value: 4"
*/
@Test
@Ignore
public void testLoadTiffWithCompression4() throws Exception {
assertValidBufferedImage(createBufferedImage(getImageFile("tiff", "test-single-gray-compression-type-4.tiff")));
}
/**
* Load a multi-page TIFF.
* Fails with a "javax.imageio.IIOException: Unsupported TIFF Compression value: 4"
*/
@Test
@Ignore
public void testLoadTiffMultiPageGray() throws Exception {
assertValidBufferedImage(createBufferedImage(getImageFile("tiff", "test-multi-gray-compression-type-4.tiff")));
}
/**
* Load a TIFF image with compression type 7 (JPEG).
*/
@Test
public void testLoadTiffMultiRgbCompression7() throws Exception {
assertValidBufferedImage(createBufferedImage(getImageFile("tiff", "test-multi-rgb-compression-type-7.tiff")));
}
/**
* Load a TIFF image with compression LZW.
*/
@Test
public void testLoadTiffSingleCmykCompressionLzw() throws Exception {
assertValidBufferedImage(createBufferedImage(getImageFile("tiff", "test-single-cmyk-compression-lzw.tiff")));
}
// ======================================================================
// Multi-page TIFF extraction
// ======================================================================
/**
* Load a multi-page TIFF image and split it into its individual pages.
*/
@Ignore
public void testExtractPagesFromMultiPageTiffCompression4() throws Exception {
File sourceImageFile = getImageFile("tiff", "test-multi-gray-compression-type-4.tiff");
ImageInputStream is = ImageIO.createImageInputStream(sourceImageFile);
// get the first matching reader
Iterator<ImageReader> iterator = ImageIO.getImageReaders(is);
ImageReader imageReader = iterator.next();
imageReader.setInput(is);
// split the multi-page TIFF
int pages = imageReader.getNumImages(true);
for(int i=0; i<pages; i++) {
BufferedImage bufferedImage = imageReader.read(i);
assertValidBufferedImage(bufferedImage);
}
assertEquals("Expect to have 2 pages", 2, pages);
}
/**
* Load a multi-page TIFF image and split it into its individual pages.
*/
@Test
public void testExtractPagesFromMultiPageTiffCompression7() throws Exception {
File sourceImageFile = getImageFile("tiff", "test-multi-rgb-compression-type-7.tiff");
ImageInputStream is = ImageIO.createImageInputStream(sourceImageFile);
// get the first matching reader
Iterator<ImageReader> iterator = ImageIO.getImageReaders(is);
ImageReader imageReader = iterator.next();
imageReader.setInput(is);
// split the multi-page TIFF
int pages = imageReader.getNumImages(true);
for(int i=0; i<pages; i++) {
BufferedImage bufferedImage = imageReader.read(i);
assertValidBufferedImage(bufferedImage);
}
assertEquals("Expect to have 10 pages", 10, pages);
}
}
|
export const config = {
secrets: {
jwt: '<PASSWORD>'
},
dbUrl: `mongodb+srv://${process.env.MONGO_USER}:${process.env.MONGO_PASSWORD}@<EMAIL>/${process.env.MONGO_DB}?retryWrites=true&w=majority`
}
|
/////////////////////////////////////////////////////////////////////////////
// Name: src/qt/apptraits.cpp
// Author: <NAME>, <NAME>, <NAME>
// Copyright: (c) 2010 wxWidgets dev team
// Licence: wxWindows licence
/////////////////////////////////////////////////////////////////////////////
// For compilers that support precompilation, includes "wx.h".
#include "wx/wxprec.h"
#include "wx/apptrait.h"
#include "wx/stdpaths.h"
#include "wx/evtloop.h"
#include "wx/timer.h"
#include "wx/qt/private/timer.h"
#include <QtCore/QtGlobal>
wxEventLoopBase *wxGUIAppTraits::CreateEventLoop()
{
return new wxEventLoop();
}
#if wxUSE_TIMER
wxTimerImpl *wxGUIAppTraits::CreateTimerImpl(wxTimer *timer)
{
return new wxQtTimerImpl( timer );
}
#endif
// #if wxUSE_THREADS
// void wxGUIAppTraits::MutexGuiEnter()
// {
// }
//
// void wxGUIAppTraits::MutexGuiLeave()
// {
// }
// #endif
wxPortId wxGUIAppTraits::GetToolkitVersion(int *majVer,
int *minVer,
int *microVer) const
{
if ( majVer )
*majVer = QT_VERSION >> 16;
if ( minVer )
*minVer = (QT_VERSION >> 8) & 0xFF;
if ( microVer )
*microVer = QT_VERSION & 0xFF;
return wxPORT_QT;
}
|
package restauthserver;
import restauthshared.dto.LoginResultDto;
import restauthshared.dto.RegistrationResultDto;
import fontysmultipurposelibrary.communication.rest.dto.BaseResultDto;
import fontysmultipurposelibrary.serialization.ISerializer;
import fontysmultipurposelibrary.serialization.SerializationProvider;
public class ResponseHelper {
private ResponseHelper(){}
public static String getErrorResponseString()
{
ISerializer<String> ser = SerializationProvider.getSerializer();
BaseResultDto response = new BaseResultDto();
response.setSuccess(false);
return ser.serialize(response);
}
public static String getLoginResultDtoResponseString(String token)
{
ISerializer<String> ser = SerializationProvider.getSerializer();
LoginResultDto response = new LoginResultDto();
response.setSuccess(true);
response.setToken(token);
return ser.serialize(response);
}
public static String getRegistrationResultDtoResponseString(boolean success)
{
ISerializer<String> ser = SerializationProvider.getSerializer();
RegistrationResultDto dto = new RegistrationResultDto();
dto.setSuccess(success);
return ser.serialize(dto);
}
}
|
#!/bin/bash
set -e
set -u
set -o pipefail
readonly ROOT_DIR="$(cd "$(dirname "${0}")/.." && pwd)"
readonly BIN_DIR="${ROOT_DIR}/.bin"
readonly BUILD_DIR="${ROOT_DIR}/build"
# shellcheck source=SCRIPTDIR/.util/tools.sh
source "${ROOT_DIR}/scripts/.util/tools.sh"
# shellcheck source=SCRIPTDIR/.util/print.sh
source "${ROOT_DIR}/scripts/.util/print.sh"
function main {
local version output
while [[ "${#}" != 0 ]]; do
case "${1}" in
--version|-v)
version="${2}"
shift 2
;;
--output|-o)
output="${2}"
shift 2
;;
--help|-h)
shift 1
usage
exit 0
;;
"")
# skip if the argument is empty
shift 1
;;
*)
util::print::error "unknown argument \"${1}\""
esac
done
if [[ -z "${version:-}" ]]; then
usage
echo
util::print::error "--version is required"
fi
if [[ -z "${output:-}" ]]; then
output="${BUILD_DIR}/buildpackage.cnb"
fi
repo::prepare
util::tools::pack::install --directory "${BIN_DIR}"
buildpack::archive "${version}"
buildpackage::create "${output}"
}
function usage() {
cat <<-USAGE
package.sh --version <version> [OPTIONS]
Packages the buildpack into a buildpackage .cnb file.
OPTIONS
--help -h prints the command usage
--version <version> -v <version> specifies the version number to use when packaging the buildpack
--output <output> -o <output> location to output the packaged buildpackage artifact (default: ${ROOT_DIR}/build/buildpackage.cnb)
USAGE
}
function repo::prepare() {
util::print::title "Preparing repo..."
rm -rf "${BUILD_DIR}"
mkdir -p "${BIN_DIR}"
mkdir -p "${BUILD_DIR}"
export PATH="${BIN_DIR}:${PATH}"
}
function buildpack::archive() {
local version
version="${1}"
util::print::title "Packaging buildpack into ${BUILD_DIR}/buildpack.tgz..."
util::tools::jam::install --directory "${BIN_DIR}"
jam pack \
--buildpack "${ROOT_DIR}/buildpack.toml" \
--version "${version}" \
--offline \
--output "${BUILD_DIR}/buildpack.tgz"
}
function buildpackage::create() {
local output
output="${1}"
util::print::title "Packaging buildpack..."
pack \
package-buildpack "${output}" \
--config "${ROOT_DIR}/package.toml" \
--format file
}
main "${@:-}"
|
#!/bin/bash
curl -k --request POST --data '{"PK_Master":"1","gpsLatittude":10, "gpsLongtitude":10}' https://localhost/register
echo ""
sleep 3
curl -k --request POST --data '{"PK_Master":"2","gpsLatittude":60, "gpsLongtitude":0}' https://localhost/register
echo ""
sleep 3
curl -k --request POST --data '{"PK_Master":"3","gpsLatittude":530, "gpsLongtitude":3210}' https://localhost/register
echo ""
sleep 3
curl -k --request POST --data '{"PK_Master":"4","gpsLatittude":2310, "gpsLongtitude":10}' https://localhost/register
echo ""
sleep 3
curl -k --request POST --data '{"PK_Master":"5","gpsLatittude":66660, "gpsLongtitude":200}' https://localhost/register
echo ""
sleep 3
curl -k --request POST --data '{"PK_Master":"6","gpsLatittude":3210, "gpsLongtitude":2230}' https://localhost/register
echo ""
sleep 3
curl -k --request POST --data '{"PK_Master":"7","gpsLatittude":2310, "gpsLongtitude":5650}' https://localhost/register
echo ""
sleep 3
curl -k --request POST --data '{"PK_Master":"8","gpsLatittude":60, "gpsLongtitude":70}' https://localhost/register
echo ""
sleep 3
curl -k --request POST --data '{"PK_Master":"9","gpsLatittude":40, "gpsLongtitude":40}' https://localhost/register
echo ""
|
<reponame>JimBae/pythonTips<filename>05_set.py
import os
import sys
# set datastructure
#aSet = {''}
#print(type(aSet)) # set
someList = ['a', 'a', 'b', 'c', 'd', 'e', 'e']
someSet = set(someList)
print(someSet)
# brute force version
duplicateList = []
for value in someList:
if someList.count(value) > 1:
if value not in duplicateList:
duplicateList.append(value)
print(duplicateList)
# more nice code by using set
someList = ['a', 'a', 'b', 'c', 'd', 'e', 'e']
duplicateSet = set([x for x in someList if someList.count(x) > 1])
print(list(duplicateSet))
#-------------
# set methods
#-------------
#>>> intersection
valid = set(['yellow', 'red', 'blue', 'green', 'black'])
inSet = set(['red', 'brown'])
print(inSet.intersection(valid))
#>>> difference
valid = set(['yellow', 'red', 'blue', 'green', 'black'])
inSet = set(['red', 'brown'])
print(inSet.difference(valid))
|
#!/usr/bin/env bash
source ./.build-scripts/sources/slack.sh
ACTOR=${GITHUB_ACTOR}
SLACK_CHANNEL='#cloud-native-directory'
github_workspace=${GITHUB_WORKSPACE:-.}
step_scripts=${github_workspace}/.github/steps/deploy
function print_help {
cat <<EOF
Use: get_slack_notification.sh [--debug --help]
Options:
-b, --block The notification block to get. (default: canvas)
-v, --version The version being deployed (e.g., 1.2.3)
-s, --stage The stage being deployed to (e.g., dev)
-q, --qualifier An optional qualifier for the deployment (e.g., 'DRY-RUN', 'RFC-1234')
REQUIRED if deploying to prod.
-a, --actor An optional actor for the deployment; REQUIRED if deploying to prod.
(e.g., '@husky')
-c, --channel The slack channel to send notifications to
-h, --help Show this message and exit
-g, --debug Show commands as they are executing
EOF
}
BLOCK=canvas
while (( $# ))
do
case $1 in
--block|-b)
shift
BLOCK="$1"
;;
--version|-v)
shift
VERSION="$1"
;;
--stage|-s)
shift
STAGE="$1"
;;
--actor|-a)
shift
ACTOR="$1"
;;
--qualifier|-q)
shift
QUALIFIER="$1"
;;
--channel|-c)
shift
SLACK_CHANNEL="$1"
;;
--help|-h)
print_help
exit 0
;;
--debug|-g)
set -x
;;
*)
echo "Invalid Option: $1"
print_help
exit 1
;;
esac
shift
done
function replace_template {
echo "$1" | sed "s|\${{ $2 }}|$3|g"
}
function build_canvas {
# The echo/cat construct reduces the output to a single
# line, making it easier to pass around and work with.
template="$(echo $(cat ${step_scripts}/canvas.json))"
template=$(replace_template "$template" stage $STAGE)
template=$(replace_template "$template" version $VERSION)
template=$(replace_template "$template" qualifier $QUALIFIER)
template=$(replace_template "$template" slack_channel $SLACK_CHANNEL)
echo "$template"
}
function build_context_artifact {
actor_link=$(slack_link "https://github.com/$ACTOR" "@${ACTOR}")
execution_link=$(slack_link "https://github.com/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}" execution)
echo "Triggered by ${GITHUB_EVENT_NAME} from $actor_link ($execution_link) on $(date)"
}
function_name="build_${BLOCK}"
$function_name
|
#!/bin/bash
set -ex
# Default parameters for nightly builds to be sourced both by build_cron.sh and
# by the build_docker.sh and wheel/build_wheel.sh scripts.
echo "nightly_defaults.sh at $(pwd) starting at $(date) on $(uname -a) with pid $$"
# NIGHTLIES_FOLDER
# N.B. this is also defined in cron_start.sh
# An arbitrary root folder to store all nightlies folders, each of which is a
# parent level date folder with separate subdirs for logs, wheels, conda
# packages, etc. This should be kept the same across all scripts called in a
# cron job, so it only has a default value in the top-most script
# build_cron.sh to avoid the default values from diverging.
if [[ -z "$NIGHTLIES_FOLDER" ]]; then
if [[ "$(uname)" == 'Darwin' ]]; then
export NIGHTLIES_FOLDER='/Users/administrator/nightlies/'
else
export NIGHTLIES_FOLDER='/scratch/hellemn/nightlies'
fi
fi
# NIGHTLIES_DATE
# N.B. this is also defined in cron_start.sh
# The date in YYYY_mm_dd format that we are building for. If this is not
# already set, then this will first try to find the date of the nightlies
# folder that this builder repo exists in; e.g. if this script exists in
# some_dir/2019_09_04/builder/cron/ then this will be set to 2019_09_04 (must
# match YYYY_mm_dd). This is for convenience when debugging/uploading past
# dates, so that you don't have to set NIGHTLIES_DATE yourself. If a date
# folder cannot be found in that exact location, then this will default to
# the current date.
if [[ -z "$NIGHTLIES_DATE" ]]; then
set +e
_existing_nightlies_date="$(basename $(cd $(dirname $0)/../.. && pwd) | grep -o '[0-9][0-9][0-9][0-9]_[0-9][0-9]_[0-9][0-9]')"
set -e
if [[ -n "$_existing_nightlies_date" ]]; then
export NIGHTLIES_DATE="$_existing_nightlies_date"
else
export NIGHTLIES_DATE="$(date +%Y_%m_%d)"
fi
fi
# Used in lots of places as the root dir to store all conda/wheel/manywheel
# packages as well as logs for the day
export today="$NIGHTLIES_FOLDER/$NIGHTLIES_DATE"
mkdir -p "$today" || true
##############################################################################
# Add new configuration variables below this line. 'today' should always be
# defined ASAP to avoid weird errors
##############################################################################
# List of people to email when things go wrong. This is passed directly to
# `mail -t`
export NIGHTLIES_EMAIL_LIST='hellemn@fb.com'
# PYTORCH_CREDENTIALS_FILE
# A bash file that exports credentials needed to upload to aws and anaconda.
# Needed variables are PYTORCH_ANACONDA_USERNAME, PYTORCH_ANACONDA_PASSWORD,
# AWS_ACCESS_KEY_ID, and AWS_SECRET_ACCESS_KEY. Or it can just export the AWS
# keys and then prepend a logged-in conda installation to the path.
if [[ -z "$PYTORCH_CREDENTIALS_FILE" ]]; then
if [[ "$(uname)" == 'Darwin' ]]; then
export PYTORCH_CREDENTIALS_FILE='/Users/administrator/nightlies/credentials.sh'
else
export PYTORCH_CREDENTIALS_FILE='/private/home/hellemn/nightly_credentials.sh'
fi
fi
# Location of the temporary miniconda that is downloaded to install conda-build
# and aws to upload finished packages TODO this is messy to install this in
# upload.sh and later use it in upload_logs.sh
CONDA_UPLOADER_INSTALLATION="${today}/miniconda"
# N.B. BUILDER_REPO and BUILDER_BRANCH are both set in cron_start.sh, as that
# is the script that actually clones the builder repo that /this/ script is
# running from.
export NIGHTLIES_BUILDER_ROOT="$(cd $(dirname $0)/.. && pwd)"
# The shared pytorch repo to be used by all builds
export NIGHTLIES_PYTORCH_ROOT="${today}/pytorch"
# PYTORCH_REPO
# The Github org/user whose fork of Pytorch to check out (git clone
# https://github.com/<THIS_PART>/pytorch.git). This will always be cloned
# fresh to build with. Default is 'pytorch'
if [[ -z "$PYTORCH_REPO" ]]; then
export PYTORCH_REPO='pytorch'
fi
# PYTORCH_BRANCH
# The branch of Pytorch to checkout for building (git checkout <THIS_PART>).
# This can either be the name of the branch (e.g. git checkout
# my_branch_name) or can be a git commit (git checkout 4b2674n...). Default
# is 'latest', which is a special term that signals to pull the last commit
# before 0:00 midnight on the NIGHTLIES_DATE
if [[ -z "$PYTORCH_BRANCH" ]]; then
export PYTORCH_BRANCH='latest'
fi
# Clone the requested pytorch checkout
if [[ ! -d "$NIGHTLIES_PYTORCH_ROOT" ]]; then
git clone --recursive "https://github.com/${PYTORCH_REPO}/pytorch.git" "$NIGHTLIES_PYTORCH_ROOT"
pushd "$NIGHTLIES_PYTORCH_ROOT"
# Switch to the latest commit by 11:59 yesterday
if [[ "$PYTORCH_BRANCH" == 'latest' ]]; then
echo "PYTORCH_BRANCH is set to latest so I will find the last commit"
echo "before 0:00 midnight on $NIGHTLIES_DATE"
git_date="$(echo $NIGHTLIES_DATE | tr '_' '-')"
last_commit="$(git log --before $git_date -n 1 | perl -lne 'print $1 if /^commit (\w+)/')"
echo "Setting PYTORCH_BRANCH to $last_commit since that was the last"
echo "commit before $NIGHTLIES_DATE"
export PYTORCH_BRANCH="$last_commit"
fi
git checkout "$PYTORCH_BRANCH"
git submodule update
popd
fi
# PYTORCH_BUILD_VERSION
# This is the version string, e.g. 0.4.1 , that will be used as the
# pip/conda version, OR the word 'nightly', which signals all the
# downstream scripts to use the current date as the version number (plus
# other changes). This is NOT the conda build string.
if [[ -z "$PYTORCH_BUILD_VERSION" ]]; then
export PYTORCH_BUILD_VERSION="1.0.0.dev$(date +%Y%m%d)"
fi
# PYTORCH_BUILD_NUMBER
# This is usually the number 1. If more than one build is uploaded for the
# same version/date, then this can be incremented to 2,3 etc in which case
# '.post2' will be appended to the version string of the package. This can
# be set to '0' only if OVERRIDE_PACKAGE_VERSION is being used to bypass
# all the version string logic in downstream scripts. Since we use the
# override below, exporting this shouldn't actually matter.
if [[ -z "$PYTORCH_BUILD_NUMBER" ]]; then
export PYTORCH_BUILD_NUMBER='1'
fi
if [[ "$PYTORCH_BUILD_NUMBER" -gt 1 ]]; then
export PYTORCH_BUILD_VERSION="${PYTORCH_BUILD_VERSION}${PYTORCH_BUILD_NUMBER}"
fi
# The nightly builds use their own versioning logic, so we override whatever
# logic is in setup.py or other scripts
export OVERRIDE_PACKAGE_VERSION="$PYTORCH_BUILD_VERSION"
# Build folder for conda builds to use
if [[ -z "$TORCH_CONDA_BUILD_FOLDER" ]]; then
export TORCH_CONDA_BUILD_FOLDER='pytorch-nightly'
fi
# TORCH_PACKAGE_NAME
# The name of the package to upload. This should probably be pytorch or
# pytorch-nightly. N.B. that pip will change all '-' to '_' but conda will
# not. This is dealt with in downstream scripts.
if [[ -z "$TORCH_PACKAGE_NAME" ]]; then
export TORCH_PACKAGE_NAME='torch-nightly'
fi
# PIP_UPLOAD_FOLDER should end in a slash. This is to handle it being empty
# (when uploading to e.g. whl/cpu/) and also to handle nightlies (when
# uploading to e.g. /whl/nightly/cpu)
if [[ -z "$PIP_UPLOAD_FOLDER" ]]; then
export PIP_UPLOAD_FOLDER='nightly/'
fi
# nightlies_package_folder()
# USAGE: nightlies_package_folder $package_type $cuda_version
# Function from (package_type, CUDA/cpu ver) -> where packages should be
# stored. OS does not factor since the folders will be on different machines
# and won't overlap
# N.B. PYTORCH_FINAL_PACKAGE_DIR is not a constant, and is not set here. This
# should be set by build_docker or build_mac according to this function.
nightlies_package_folder () {
echo "${today}/$1/$2/"
}
# (RUNNING|FAILED|SUCCEEDED)_LOG_DIR
# Absolute path to folders that store final logs. Initially these folders
# should be empty. Logs are written out to RUNNING_LOG_DIR. When a build
# fails, it's log should be moved to FAILED_LOG_DIR, and similarily for
# succeeded builds.
export RUNNING_LOG_DIR="${today}/logs"
export FAILED_LOG_DIR="${today}/logs/failed"
export SUCCEEDED_LOG_DIR="${today}/logs/succeeded"
# Log s3 directory, must not end in a /
if [[ "$(uname)" == 'Darwin' ]]; then
export LOGS_S3_DIR="nightly_logs/macos/$NIGHTLIES_DATE"
else
export LOGS_S3_DIR="nightly_logs/linux/$NIGHTLIES_DATE"
fi
# DAYS_TO_KEEP
# How many days to keep around for clean.sh. Build folders older than this
# will be purged at the end of cron jobs. '1' means to keep only the current
# day. Values less than 1 are not allowed. The default is 5.
if [[ -z "$DAYS_TO_KEEP" ]]; then
if [[ "$(uname)" == 'Darwin' ]]; then
# Mac machines have less memory
export DAYS_TO_KEEP=3
else
export DAYS_TO_KEEP=5
fi
fi
if [[ "$DAYS_TO_KEEP" < '1' ]]; then
echo "DAYS_TO_KEEP cannot be less than 1."
echo "A value of 1 means to only keep the build for today"
exit 1
fi
# PYTORCH_NIGHTLIES_TIMEOUT
# Timeout in seconds.
# When full testing is enabled, condas builds often take up to 2 hours 20
# minutes, so the default is set to (2 * 60 + 20 + 40 [buffer]) * 60 == 10800
# seconds.
# When only smoke testing is enabled, condas builds only take up to about an
# hour, so the default is set to (60 + 20 [buffer]) * 60 == 4800 seconds. On
# Mac, they can still take up to 2 hours.
if [[ -z "$PYTORCH_NIGHTLIES_TIMEOUT" ]]; then
if [[ "$(uname)" == 'Darwin' ]]; then
export PYTORCH_NIGHTLIES_TIMEOUT=9600
else
export PYTORCH_NIGHTLIES_TIMEOUT=4800
fi
fi
# PORTABLE_TIMEOUT
# Command/executable of some timeout command. Defined here because the path
# to the MacOS executable is harcoded to the gtimeout that I installed on my
# machine with homebrew.
if [[ "$(uname)" == 'Darwin' ]]; then
# On the Mac timeout was installed through 'brew install coreutils', which
# prepends a 'g' to all the command names
export PORTABLE_TIMEOUT='/usr/local/bin/gtimeout'
else
export PORTABLE_TIMEOUT='timeout'
fi
|
docker exec elk curl -XDELETE http://localhost:9200/filebeat-*/
|
<filename>AdoptionApplication/src/userinterface/ParentRegister.java
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package userinterface;
import Business.Directory.Counsellor;
import Business.Directory.Parents;
import Business.Directory.ParentsDirectory;
import Business.EcoSystem;
import Business.Enterprise.Enterprise;
import Business.Enterprise.HospitalEnterprise;
import Business.Mail.ConfigUtility;
import Business.Mail.EmailUtility;
import Business.Mail.EmailVariables;
import Business.Network.Network;
import Business.Organization.Organization;
import Business.UserAccount.UserAccount;
import Business.Validations.ValidateEmailTextField;
import Business.Validations.ValidatePasswords;
import Business.Validations.ValidateStrings;
import Business.WorkQueue.ParentToCounselor;
import java.awt.CardLayout;
import java.awt.Color;
import java.io.File;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javafx.stage.FileChooser;
import javax.swing.BorderFactory;
import javax.swing.InputVerifier;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
/**
*
* @author Joy
*/
public class ParentRegister extends javax.swing.JPanel {
/**
* Creates new form ParentRegister
*/
EcoSystem system;
JPanel userProcessContainer;
private String username;
private static int count = 0;
private String email;
private String password;
private boolean worstCaseScenerio;
private boolean finChild;
private boolean bigChanges;
private boolean comSituation;
private boolean currChildrenOnBoard;
private boolean eduRealities;
private boolean promises;
private boolean guilt;
private ParentsDirectory parentDirectory;
private ParentToCounselor parentToCounselor;
private Enterprise enterprise;
private Organization organization;
private ConfigUtility configUtil;
private EmailUtility emailUtil;
public ParentRegister(JPanel userProcessContainer, EcoSystem system) {
initComponents();
this.emailUtil = new EmailUtility();
this.configUtil = new ConfigUtility();
this.userProcessContainer = userProcessContainer;
this.system = system;
populateComboBox();
addInputVerifiers();
}
private void addInputVerifiers() {
InputVerifier stringValidation = new ValidateStrings();
InputVerifier passwordValidation = new ValidatePasswords();
InputVerifier emailValidation = new ValidateEmailTextField();
txtUsername.setInputVerifier(stringValidation);
txtPassword.setInputVerifier(passwordValidation);
txtConfPassword.setInputVerifier(passwordValidation);
txtEmail.setInputVerifier(emailValidation);
}
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
* regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
buttonGroup1 = new javax.swing.ButtonGroup();
txtEmail = new javax.swing.JTextField();
passwordLabel = new javax.<PASSWORD>.JLabel();
txtPassword = new <PASSWORD>();
jLabel3 = new javax.swing.JLabel();
txtConfPassword = new <PASSWORD>.JTextField();
hospitalJComboBox = new javax.swing.JComboBox();
jLabel13 = new javax.swing.JLabel();
usrNameLabel = new javax.swing.JLabel();
txtUsername = new javax.swing.JTextField();
emailIdLbl = new javax.swing.JLabel();
btnConfirm = new javax.swing.JButton();
jButton1 = new javax.swing.JButton();
jLabel5 = new javax.swing.JLabel();
jLabel9 = new javax.swing.JLabel();
isChildrenOnBoard = new javax.swing.JRadioButton();
notOnBoardChildren = new javax.swing.JRadioButton();
jLabel10 = new javax.swing.JLabel();
jLabel6 = new javax.swing.JLabel();
notPrepared = new javax.swing.JRadioButton();
isPrepared = new javax.swing.JRadioButton();
isFinance = new javax.swing.JRadioButton();
notSelectFinance = new javax.swing.JRadioButton();
isEducated = new javax.swing.JRadioButton();
notEducated = new javax.swing.JRadioButton();
jLabel11 = new javax.swing.JLabel();
jLabel7 = new javax.swing.JLabel();
isBigChange = new javax.swing.JRadioButton();
notBigChange = new javax.swing.JRadioButton();
isComfortable = new javax.swing.JRadioButton();
jLabel8 = new javax.swing.JLabel();
notComfortable = new javax.swing.JRadioButton();
isGuilty = new javax.swing.JRadioButton();
notGuilty = new javax.swing.JRadioButton();
jLabel12 = new javax.swing.JLabel();
notPromise = new javax.swing.JRadioButton();
isPromise = new javax.swing.JRadioButton();
jLabel1 = new javax.swing.JLabel();
setBackground(java.awt.SystemColor.activeCaption);
setMaximumSize(new java.awt.Dimension(1245, 1000));
setMinimumSize(new java.awt.Dimension(1245, 1000));
setPreferredSize(new java.awt.Dimension(1245, 1000));
passwordLabel.setFont(new java.awt.Font("Times New Roman", 1, 18)); // NOI18N
passwordLabel.setText("Password*:");
txtPassword.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
txtPasswordActionPerformed(evt);
}
});
jLabel3.setFont(new java.awt.Font("Times New Roman", 1, 18)); // NOI18N
jLabel3.setText("Confirm Password*:");
hospitalJComboBox.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "Item 1", "Item 2", "Item 3", "Item 4" }));
jLabel13.setFont(new java.awt.Font("Times New Roman", 1, 18)); // NOI18N
jLabel13.setText("Select Hospital:");
usrNameLabel.setFont(new java.awt.Font("Times New Roman", 1, 18)); // NOI18N
usrNameLabel.setText("Username*:");
txtUsername.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
txtUsernameActionPerformed(evt);
}
});
emailIdLbl.setFont(new java.awt.Font("Times New Roman", 1, 18)); // NOI18N
emailIdLbl.setText("Email*:");
btnConfirm.setFont(new java.awt.Font("Lucida Grande", 1, 13)); // NOI18N
btnConfirm.setText("CONFIRM");
btnConfirm.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
btnConfirmActionPerformed(evt);
}
});
jButton1.setIcon(new javax.swing.ImageIcon(getClass().getResource("/images/left-arrow-in-circular-button-black-symbol-2.png"))); // NOI18N
jButton1.setText("Back");
jButton1.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButton1ActionPerformed(evt);
}
});
jLabel5.setFont(new java.awt.Font("Times New Roman", 1, 18)); // NOI18N
jLabel5.setText("You are prepared for the “worst case scenario.”");
jLabel9.setFont(new java.awt.Font("Times New Roman", 1, 18)); // NOI18N
jLabel9.setText("Your current children are on board with the situation.");
isChildrenOnBoard.setText("Yes");
isChildrenOnBoard.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
isChildrenOnBoardActionPerformed(evt);
}
});
notOnBoardChildren.setText("No");
notOnBoardChildren.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
notOnBoardChildrenActionPerformed(evt);
}
});
jLabel10.setFont(new java.awt.Font("Times New Roman", 1, 18)); // NOI18N
jLabel10.setText("You are educated about the realities of the type of adoption you are pursuing.");
jLabel6.setFont(new java.awt.Font("Times New Roman", 1, 18)); // NOI18N
jLabel6.setText("You are financially prepared for the child.");
notPrepared.setText("No");
notPrepared.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
notPreparedActionPerformed(evt);
}
});
isPrepared.setText("Yes");
isPrepared.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
isPreparedActionPerformed(evt);
}
});
isFinance.setText("Yes");
isFinance.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
isFinanceActionPerformed(evt);
}
});
notSelectFinance.setText("No");
notSelectFinance.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
notSelectFinanceActionPerformed(evt);
}
});
isEducated.setText("Yes");
isEducated.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
isEducatedActionPerformed(evt);
}
});
notEducated.setText("No");
notEducated.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
notEducatedActionPerformed(evt);
}
});
jLabel11.setFont(new java.awt.Font("Times New Roman", 1, 18)); // NOI18N
jLabel11.setText("You plan to make promises or commitments you actually can or will keep");
jLabel7.setFont(new java.awt.Font("Times New Roman", 1, 18)); // NOI18N
jLabel7.setText("You are able or willing to make BIG changes.");
isBigChange.setText("Yes");
isBigChange.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
isBigChangeActionPerformed(evt);
}
});
notBigChange.setText("No");
notBigChange.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
notBigChangeActionPerformed(evt);
}
});
isComfortable.setText("Yes");
isComfortable.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
isComfortableActionPerformed(evt);
}
});
jLabel8.setFont(new java.awt.Font("Times New Roman", 1, 18)); // NOI18N
jLabel8.setText("Your partner is comfortable with the situation.");
notComfortable.setText("No");
notComfortable.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
notComfortableActionPerformed(evt);
}
});
isGuilty.setText("Yes");
isGuilty.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
isGuiltyActionPerformed(evt);
}
});
notGuilty.setText("No");
notGuilty.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
notGuiltyActionPerformed(evt);
}
});
jLabel12.setFont(new java.awt.Font("Times New Roman", 1, 18)); // NOI18N
jLabel12.setText("You want to say “yes” not out of guilt.");
notPromise.setText("No");
notPromise.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
notPromiseActionPerformed(evt);
}
});
isPromise.setText("Yes");
isPromise.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
isPromiseActionPerformed(evt);
}
});
jLabel1.setFont(new java.awt.Font("Tahoma", 1, 24)); // NOI18N
jLabel1.setText("Parent Registration");
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(this);
this.setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addGap(441, 441, 441)
.addComponent(btnConfirm, javax.swing.GroupLayout.PREFERRED_SIZE, 172, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGroup(layout.createSequentialGroup()
.addGap(41, 41, 41)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addComponent(isPrepared)
.addGap(30, 30, 30)
.addComponent(notPrepared))
.addComponent(jLabel5)
.addComponent(jLabel6)
.addComponent(jLabel7)
.addGroup(layout.createSequentialGroup()
.addComponent(isBigChange)
.addGap(29, 29, 29)
.addComponent(notBigChange))
.addComponent(jLabel8)
.addGroup(layout.createSequentialGroup()
.addComponent(isComfortable)
.addGap(33, 33, 33)
.addComponent(notComfortable))
.addGroup(layout.createSequentialGroup()
.addComponent(isFinance)
.addGap(31, 31, 31)
.addComponent(notSelectFinance))))
.addGroup(layout.createSequentialGroup()
.addGap(93, 93, 93)
.addComponent(jButton1)
.addGap(170, 170, 170)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel13)
.addComponent(usrNameLabel)
.addComponent(jLabel3)
.addComponent(emailIdLbl)
.addComponent(passwordLabel))
.addGap(80, 80, 80)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addComponent(txtUsername)
.addComponent(txtEmail)
.addComponent(txtPassword)
.addComponent(txtConfPassword)
.addComponent(hospitalJComboBox, 0, 222, Short.MAX_VALUE))))
.addContainerGap(429, Short.MAX_VALUE))
.addGroup(layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addGap(596, 596, 596)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel10)
.addGroup(layout.createSequentialGroup()
.addComponent(isGuilty)
.addGap(41, 41, 41)
.addComponent(notGuilty))
.addGroup(layout.createSequentialGroup()
.addComponent(isPromise)
.addGap(45, 45, 45)
.addComponent(notPromise))
.addComponent(jLabel11)
.addComponent(jLabel12)
.addGroup(layout.createSequentialGroup()
.addComponent(isEducated)
.addGap(41, 41, 41)
.addComponent(notEducated))
.addGroup(layout.createSequentialGroup()
.addComponent(isChildrenOnBoard)
.addGap(44, 44, 44)
.addComponent(notOnBoardChildren))
.addComponent(jLabel9)))
.addGroup(layout.createSequentialGroup()
.addGap(470, 470, 470)
.addComponent(jLabel1, javax.swing.GroupLayout.PREFERRED_SIZE, 248, javax.swing.GroupLayout.PREFERRED_SIZE)))
.addGap(0, 45, Short.MAX_VALUE))
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addContainerGap()
.addComponent(jLabel1, javax.swing.GroupLayout.PREFERRED_SIZE, 31, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(16, 16, 16)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(jButton1, javax.swing.GroupLayout.PREFERRED_SIZE, 42, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(usrNameLabel)
.addComponent(txtUsername, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)))
.addGap(28, 28, 28)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(emailIdLbl)
.addComponent(txtEmail, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(32, 32, 32)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(passwordLabel)
.addComponent(txtPassword, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(39, 39, 39)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(txtConfPassword, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jLabel3))
.addGap(32, 32, 32)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel13)
.addComponent(hospitalJComboBox, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addGap(62, 62, 62)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel5)
.addComponent(jLabel9))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(isPrepared)
.addComponent(notPrepared))
.addGap(39, 39, 39)
.addComponent(jLabel6)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(isFinance)
.addComponent(notSelectFinance)))
.addGroup(layout.createSequentialGroup()
.addGap(106, 106, 106)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(isChildrenOnBoard)
.addComponent(notOnBoardChildren))
.addGap(35, 35, 35)
.addComponent(jLabel10)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(isEducated)
.addComponent(notEducated))))
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addGap(24, 24, 24)
.addComponent(jLabel7))
.addGroup(layout.createSequentialGroup()
.addGap(18, 18, 18)
.addComponent(jLabel11)))
.addGap(5, 5, 5)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addGap(1, 1, 1)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(notPromise)
.addComponent(isPromise))
.addGap(18, 18, 18)
.addComponent(jLabel12)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(notGuilty)
.addComponent(isGuilty)))
.addGroup(layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(isBigChange)
.addComponent(notBigChange))
.addGap(18, 18, 18)
.addComponent(jLabel8)
.addGap(18, 18, 18)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(isComfortable)
.addComponent(notComfortable))))
.addGap(55, 55, 55)
.addComponent(btnConfirm, javax.swing.GroupLayout.PREFERRED_SIZE, 49, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap(156, Short.MAX_VALUE))
);
}// </editor-fold>//GEN-END:initComponents
private void txtUsernameActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_txtUsernameActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_txtUsernameActionPerformed
private void btnConfirmActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_btnConfirmActionPerformed
// TODO add your handling code here:
String username = txtUsername.getText();
if (username.equals("")){
JOptionPane.showMessageDialog(null, "Please enter the Username");
return;
//throw new RuntimeException("Please enter the Username");
}
String emailId = txtEmail.getText();
if (emailId.equals("")){
JOptionPane.showMessageDialog(null, "Please enter the Email Id");
return;
//throw new RuntimeException("Please enter the Email Id");
}
String password = txtPassword.getText();
String confpassword = txtConfPassword.getText();
if (password.equals("")){
JOptionPane.showMessageDialog(null, "Please enter the password");
return;
//throw new RuntimeException("Please enter the password");
}
if (!password.equals(confpassword)){
JOptionPane.showMessageDialog(null, "Confirm Password and Password should match");
return;
//throw new RuntimeException("Confirm Password and Password should match");
}
this.username = txtUsername.getText();
this.email = txtEmail.getText();
this.password = txtPassword.getText();
if(isPrepared.isSelected()){
this.worstCaseScenerio = true;
}
else
this.worstCaseScenerio = false;
if(isFinance.isSelected()){
this.finChild = true;
}
else
this.finChild = false;
if(isBigChange.isSelected()){
this.bigChanges = true;
}
else
this.bigChanges = false;
if(isComfortable.isSelected()){
this.comSituation = true;
}
else
this.comSituation = false;
if(isChildrenOnBoard.isSelected()){
this.currChildrenOnBoard = true;
}
else
this.currChildrenOnBoard = false;
if(isEducated.isSelected()){
this.eduRealities = true;
}
else
this.eduRealities = false;
if(isPromise.isSelected()){
this.promises = true;
}
else
this.promises = false;
if(isGuilty.isSelected()){
this.guilt = true;
}
else
this.guilt = false;
HospitalEnterprise hospital = (HospitalEnterprise) hospitalJComboBox.getSelectedItem();
if (hospital == null){
JOptionPane.showMessageDialog(null, "Please select the Hospital");
throw new RuntimeException("Please enter the Hospital");
}
if ( txtUsername == null){
JOptionPane.showMessageDialog(null, "Please enter Username");
throw new RuntimeException("Please enter username");
}
for (Network n : system.getNetworkList()){
for (Enterprise e : n.getEnterpriseDirectory().getEnterpriseList()){
if (e.getEnterpriseType().equals(Enterprise.EnterpriseType.Hospital)){
for(UserAccount ua : e.getUserAccountDirectory().getUserAccountList()){
if(ua.getUsername().equals(username)){
JOptionPane.showMessageDialog(null, "User Name already exists!, Please Enter valid user name","warning", JOptionPane.WARNING_MESSAGE);
return;
}
for(Organization o : e.getOrganizationDirectory().getOrganizationList()){
for(UserAccount ua1 : o.getUserAccountDirectory().getUserAccountList()){
if(ua1.getUsername().equals(username)){
JOptionPane.showMessageDialog(null, "User Name already exists!, Please Enter valid user name","warning", JOptionPane.WARNING_MESSAGE);
return;
}
}
}
}
}
}
}
Parents parent = new Parents(username, email, password,worstCaseScenerio,finChild, bigChanges, comSituation, currChildrenOnBoard, eduRealities, promises, guilt );
hospital.getParentDirectory().addParents(parent);
parentToCounselor = new ParentToCounselor("Please review the parent profile", parent);
hospital.getWorkQueue().getParentToCounselor().add(parentToCounselor);
sendMail(parent);
JOptionPane.showMessageDialog(null, "Account Registered Successfully. Account sent to Counselor for Approval");
txtUsername.setText("");
txtEmail.setText("");
txtPassword.setText("");
txtConfPassword.setText("");
}//GEN-LAST:event_btnConfirmActionPerformed
private void txtPasswordActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_txtPasswordActionPerformed
// TODO add your handling code here:
}//GEN-LAST:event_txtPasswordActionPerformed
private void jButton1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton1ActionPerformed
// TODO add your handling code here:
userProcessContainer.remove(this);
CardLayout cardlayout = (CardLayout) userProcessContainer.getLayout();
cardlayout.previous(userProcessContainer);
}//GEN-LAST:event_jButton1ActionPerformed
private void isPreparedActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_isPreparedActionPerformed
// TODO add your handling code here:
notPrepared.setSelected(false);
}//GEN-LAST:event_isPreparedActionPerformed
private void notPreparedActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_notPreparedActionPerformed
// TODO add your handling code here:
isPrepared.setSelected(false);
}//GEN-LAST:event_notPreparedActionPerformed
private void isFinanceActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_isFinanceActionPerformed
// TODO add your handling code here:
notSelectFinance.setSelected(false);
}//GEN-LAST:event_isFinanceActionPerformed
private void notSelectFinanceActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_notSelectFinanceActionPerformed
// TODO add your handling code here:
isFinance.setSelected(false);
}//GEN-LAST:event_notSelectFinanceActionPerformed
private void isBigChangeActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_isBigChangeActionPerformed
// TODO add your handling code here:
notBigChange.setSelected(false);
}//GEN-LAST:event_isBigChangeActionPerformed
private void notBigChangeActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_notBigChangeActionPerformed
// TODO add your handling code here:
isBigChange.setSelected(false);
}//GEN-LAST:event_notBigChangeActionPerformed
private void isComfortableActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_isComfortableActionPerformed
// TODO add your handling code here:
notComfortable.setSelected(false);
}//GEN-LAST:event_isComfortableActionPerformed
private void notComfortableActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_notComfortableActionPerformed
// TODO add your handling code here:
isComfortable.setSelected(false);
}//GEN-LAST:event_notComfortableActionPerformed
private void isChildrenOnBoardActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_isChildrenOnBoardActionPerformed
// TODO add your handling code here:
notOnBoardChildren.setSelected(false);
}//GEN-LAST:event_isChildrenOnBoardActionPerformed
private void notOnBoardChildrenActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_notOnBoardChildrenActionPerformed
// TODO add your handling code here:
isChildrenOnBoard.setSelected(false);
}//GEN-LAST:event_notOnBoardChildrenActionPerformed
private void isEducatedActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_isEducatedActionPerformed
// TODO add your handling code here:
notEducated.setSelected(false);
}//GEN-LAST:event_isEducatedActionPerformed
private void notEducatedActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_notEducatedActionPerformed
// TODO add your handling code here:
isEducated.setSelected(false);
}//GEN-LAST:event_notEducatedActionPerformed
private void isPromiseActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_isPromiseActionPerformed
// TODO add your handling code here:
notPromise.setSelected(false);
}//GEN-LAST:event_isPromiseActionPerformed
private void notPromiseActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_notPromiseActionPerformed
// TODO add your handling code here:
isPromise.setSelected(false);
}//GEN-LAST:event_notPromiseActionPerformed
private void isGuiltyActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_isGuiltyActionPerformed
// TODO add your handling code here:
notGuilty.setSelected(false);
}//GEN-LAST:event_isGuiltyActionPerformed
private void notGuiltyActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_notGuiltyActionPerformed
// TODO add your handling code here:
isGuilty.setSelected(false);
}//GEN-LAST:event_notGuiltyActionPerformed
// Validation part
/*private boolean emailIdPatternCorrect(){
Pattern p=Pattern.compile("^[a-zA-Z0-9]+[a-zA-Z0-9]+@[a-zA-Z0-9]+.[a-zA-Z0-9]+$");
Matcher m=p.matcher(txtEmail.getText());
boolean b=m.matches();
return b;
}
private boolean passwordPatternCorrect(){
Pattern p1;
p1 = Pattern.compile("^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[#?!@$%^&*-]).{6,}$");
Matcher m1=p1.matcher(txtPassword.getText());
boolean b1=m1.matches();
return b1;
}*/
public void sendMail(Parents parent){
String toAddress = txtEmail.getText();
String subject = "Family Registration";
EmailVariables eVar = new EmailVariables();
String start = eVar.getStart();
String footer = eVar.getFooter();
//FileChooser filePicker = new FileChooser();
String content = " <h3><br>Your Registration is successful! </br> <br>Your Profile ID is " + parent.getParentId()
+ " and your Userid: "+parent.getUsername()+"</br> <br> Kindly wait for your Counselor to review your details!</br></h3> <h2> Thank you! </h2>";
String message = start + content + footer;
File[] attachFiles = null;
//File selectedFile = new File("..\\images\\adopt.jpg");
//attachFiles = new File[] {selectedFile};
try {
Properties smtpProperties = configUtil.loadProperties();
emailUtil.sendEmail(smtpProperties, toAddress, subject, message, attachFiles);
} catch (Exception ex) {
JOptionPane.showMessageDialog(this,
"Error while sending the e-mail: " + ex.getMessage(),
"Error", JOptionPane.ERROR_MESSAGE);
}
}
private void populateComboBox() {
//hospitalJComboBox.removeAllItems();
hospitalJComboBox.removeAllItems();
for(Network n: system.getNetworkList()){
for(Enterprise e: n.getEnterpriseDirectory().getEnterpriseList()){
if(e.getEnterpriseType().equals(Enterprise.EnterpriseType.Hospital)){
hospitalJComboBox.addItem(e);
}
}
}
}
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JButton btnConfirm;
private javax.swing.ButtonGroup buttonGroup1;
private javax.swing.JLabel emailIdLbl;
private javax.swing.JComboBox hospitalJComboBox;
private javax.swing.JRadioButton isBigChange;
private javax.swing.JRadioButton isChildrenOnBoard;
private javax.swing.JRadioButton isComfortable;
private javax.swing.JRadioButton isEducated;
private javax.swing.JRadioButton isFinance;
private javax.swing.JRadioButton isGuilty;
private javax.swing.JRadioButton isPrepared;
private javax.swing.JRadioButton isPromise;
private javax.swing.JButton jButton1;
private javax.swing.JLabel jLabel1;
private javax.swing.JLabel jLabel10;
private javax.swing.JLabel jLabel11;
private javax.swing.JLabel jLabel12;
private javax.swing.JLabel jLabel13;
private javax.swing.JLabel jLabel3;
private javax.swing.JLabel jLabel5;
private javax.swing.JLabel jLabel6;
private javax.swing.JLabel jLabel7;
private javax.swing.JLabel jLabel8;
private javax.swing.JLabel jLabel9;
private javax.swing.JRadioButton notBigChange;
private javax.swing.JRadioButton notComfortable;
private javax.swing.JRadioButton notEducated;
private javax.swing.JRadioButton notGuilty;
private javax.swing.JRadioButton notOnBoardChildren;
private javax.swing.JRadioButton notPrepared;
private javax.swing.JRadioButton notPromise;
private javax.swing.JRadioButton notSelectFinance;
private javax.swing.JLabel passwordLabel;
private javax.swing.JTextField txtConfPassword;
private javax.swing.JTextField txtEmail;
private javax.swing.JTextField txtPassword;
private javax.swing.JTextField txtUsername;
private javax.swing.JLabel usrNameLabel;
// End of variables declaration//GEN-END:variables
}
|
#!/usr/bin/env bash
add_rw_mark() {
printf "#{?client_readonly,#[bg=colour9] R ,#[fg=colour0,bg=colour10] RW }"
}
add_sessions_list() {
tmux list-sessions -F "#{session_name}:#{session_attached}" | \
while read line; do
attached=${line##*:};
line=${line/$/};
line=${line%%:*};
if [[ "${attached}" == "1" ]]; then
printf "#[fg=colour0,bg=colour12] ${line} ";
else
printf "#[fg=colour8,bg=colour0] ${line} ";
fi;
done
}
main() {
add_sessions_list
add_rw_mark
}
main
|
#!/usr/bin/env bats
# Tests dont run on linux properly, something to do with set -u
# See: https://github.com/sstephenson/bats/issues/171
BATS_TEST_SKIPPED=0
#BATS_ERROR_STACK_TRACE=()
setup() {
# Source in ecs-deploy
. "ecs-deploy"
}
@test "check that usage() returns string and exits with status code 20" {
run usage
[ $status -eq 3 ]
}
@test "test assertRequiredArgumentsSet success" {
SERVICE=true
TASK_DEFINITION=false
run assertRequiredArgumentsSet
[ ! -z $status ]
}
@test "test assertRequiredArgumentsSet status=5" {
SERVICE=false
TASK_DEFINITION=false
run assertRequiredArgumentsSet
[ $status -eq 5 ]
}
@test "test assertRequiredArgumentsSet status=6" {
SERVICE=true
TASK_DEFINITION=true
run assertRequiredArgumentsSet
[ $status -eq 6 ]
}
@test "test assertRequiredArgumentsSet status=7" {
SERVICE=true
CLUSTER=false
run assertRequiredArgumentsSet
[ $status -eq 7 ]
}
@test "test assertRequiredArgumentsSet status=8" {
SERVICE=true
CLUSTER=true
IMAGE=false
run assertRequiredArgumentsSet
[ $status -eq 8 ]
}
@test "test assertRequiredArgumentsSet status=9" {
SERVICE=true
CLUSTER=true
IMAGE=true
MAX_DEFINITIONS="not a number"
run assertRequiredArgumentsSet
[ $status -eq 9 ]
}
# Image name parsing tests
# Reference image name format: [domain][:port][/repo][/][image][:tag]
@test "test parseImageName missing image name" {
IMAGE=""
run parseImageName
[ $status -eq 13 ]
}
@test "test parseImageName invalid image name 1" {
IMAGE="/something"
run parseImageName
[ $status -eq 13 ]
}
@test "test parseImageName invalid port" {
IMAGE="domain.com:abc/repo/image"
run parseImageName
[ $status -eq 13 ]
}
@test "test parseImageName root image no tag" {
IMAGE="mariadb"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "mariadb:latest" ]
}
@test "test parseImageName root image with tag" {
IMAGE="mariadb:1.2.3"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "mariadb:1.2.3" ]
}
@test "test parseImageName repo image no tag" {
IMAGE="repo/image"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "repo/image:latest" ]
}
@test "test parseImageName repo image with tag" {
IMAGE="repo/image:v1.2.3"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "repo/image:v1.2.3" ]
}
@test "test parseImageName repo multilevel image no tag" {
IMAGE="repo/multi/level/image"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "repo/multi/level/image:latest" ]
}
@test "test parseImageName repo multilevel image with tag" {
IMAGE="repo/multi/level/image:v1.2.3"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "repo/multi/level/image:v1.2.3" ]
}
@test "test parseImageName domain plus repo image no tag" {
IMAGE="docker.domain.com/repo/image"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "docker.domain.com/repo/image:latest" ]
}
@test "test parseImageName domain plus repo image with tag" {
IMAGE="docker.domain.com/repo/image:1.2.3"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "docker.domain.com/repo/image:1.2.3" ]
}
@test "test parseImageName domain plus repo multilevel image no tag" {
IMAGE="docker.domain.com/repo/multi/level/image"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "docker.domain.com/repo/multi/level/image:latest" ]
}
@test "test parseImageName domain plus repo multilevel image with tag" {
IMAGE="docker.domain.com/repo/multi/level/image:1.2.3"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "docker.domain.com/repo/multi/level/image:1.2.3" ]
}
@test "test parseImageName domain plus port plus repo image no tag" {
IMAGE="docker.domain.com:8080/repo/image"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "docker.domain.com:8080/repo/image:latest" ]
}
@test "test parseImageName domain plus port plus repo image with tag" {
IMAGE="docker.domain.com:8080/repo/image:1.2.3"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "docker.domain.com:8080/repo/image:1.2.3" ]
}
@test "test parseImageName domain plus port plus repo multilevel image no tag" {
IMAGE="docker.domain.com:8080/repo/multi/level/image"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "docker.domain.com:8080/repo/multi/level/image:latest" ]
}
@test "test parseImageName domain plus port plus repo multilevel image with tag" {
IMAGE="docker.domain.com:8080/repo/multi/level/image:1.2.3"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "docker.domain.com:8080/repo/multi/level/image:1.2.3" ]
}
@test "test parseImageName domain plus port plus repo image with tag from var" {
IMAGE="docker.domain.com:8080/repo/image"
TAGVAR="CI_TIMESTAMP"
CI_TIMESTAMP="1487623908"
run parseImageName
[ ! -z $status ]
[ "$output" == "docker.domain.com:8080/repo/image:1487623908" ]
}
@test "test parseImageName domain plus port plus repo multilevel image with tag from var" {
IMAGE="docker.domain.com:8080/repo/multi/level/image"
TAGVAR="CI_TIMESTAMP"
CI_TIMESTAMP="1487623908"
run parseImageName
[ ! -z $status ]
[ "$output" == "docker.domain.com:8080/repo/multi/level/image:1487623908" ]
}
@test "test parseImageName using ecr style domain" {
IMAGE="121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo"
TAGVAR=false
run parseImageName
[ ! -z $status ]
[ "$output" == "121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo:latest" ]
}
@test "test parseImageName using ecr style image name and tag from var" {
IMAGE="121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo"
TAGVAR="CI_TIMESTAMP"
CI_TIMESTAMP="1487623908"
run parseImageName
[ ! -z $status ]
[ "$output" == "121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo:1487623908" ]
}
@test "test createNewTaskDefJson with single container in definition" {
imageWithoutTag="121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo"
useImage="121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo:1111111111"
TASK_DEFINITION=$(cat <<EOF
{
"taskDefinition": {
"status": "ACTIVE",
"networkMode": "bridge",
"family": "app-task-def",
"requiresAttributes": [
{
"name": "com.amazonaws.ecs.capability.ecr-auth"
}
],
"volumes": [],
"taskDefinitionArn": "arn:aws:ecs:us-east-1:121212345678:task-definition/app-task-def:123",
"containerDefinitions": [
{
"environment": [
{
"name": "KEY",
"value": "value"
}
],
"name": "API",
"links": [],
"mountPoints": [],
"image": "121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo:1487623908",
"essential": true,
"portMappings": [
{
"protocol": "tcp",
"containerPort": 80,
"hostPort": 10080
}
],
"entryPoint": [],
"memory": 128,
"command": [
"/data/run.sh"
],
"cpu": 200,
"volumesFrom": []
}
],
"placementConstraints": null,
"revision": 123
}
}
EOF
)
expected='{ "family": "app-task-def", "volumes": [], "containerDefinitions": [ { "environment": [ { "name": "KEY", "value": "value" } ], "name": "API", "links": [], "mountPoints": [], "image": "121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo:1111111111", "essential": true, "portMappings": [ { "protocol": "tcp", "containerPort": 80, "hostPort": 10080 } ], "entryPoint": [], "memory": 128, "command": [ "/data/run.sh" ], "cpu": 200, "volumesFrom": [] } ], "placementConstraints": null, "networkMode": "bridge" }'
run createNewTaskDefJson
[ ! -z $status ]
[ $output == $expected ]
}
@test "test createNewTaskDefJson with single container in definition for AWS Fargate" {
imageWithoutTag="121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo"
useImage="121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo:1111111111"
TASK_DEFINITION=$(cat <<EOF
{
"taskDefinition": {
"status": "ACTIVE",
"networkMode": "awsvpc",
"family": "app-task-def",
"requiresAttributes": [
{
"name": "com.amazonaws.ecs.capability.ecr-auth"
}
],
"volumes": [],
"taskDefinitionArn": "arn:aws:ecs:us-east-1:121212345678:task-definition/app-task-def:123",
"containerDefinitions": [
{
"environment": [
{
"name": "KEY",
"value": "value"
}
],
"name": "API",
"links": [],
"mountPoints": [],
"image": "121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo:1487623908",
"essential": true,
"portMappings": [
{
"protocol": "tcp",
"containerPort": 80,
"hostPort": 10080
}
],
"entryPoint": [],
"memory": 128,
"command": [
"/data/run.sh"
],
"cpu": 200,
"volumesFrom": []
}
],
"revision": 123,
"executionRoleArn": "arn:aws:iam::121212345678:role/ecsTaskExecutionRole",
"compatibilities": [
"EC2",
"FARGATE"
],
"requiresCompatibilities": [
"FARGATE"
],
"cpu": "256",
"memory": "512"
}
}
EOF
)
expected='{ "family": "app-task-def", "volumes": [], "containerDefinitions": [ { "environment": [ { "name": "KEY", "value": "value" } ], "name": "API", "links": [], "mountPoints": [], "image": "121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo:1111111111", "essential": true, "portMappings": [ { "protocol": "tcp", "containerPort": 80, "hostPort": 10080 } ], "entryPoint": [], "memory": 128, "command": [ "/data/run.sh" ], "cpu": 200, "volumesFrom": [] } ], "placementConstraints": null, "networkMode": "awsvpc", "executionRoleArn": "arn:aws:iam::121212345678:role/ecsTaskExecutionRole", "requiresCompatibilities": [ "FARGATE" ], "cpu": "256", "memory": "512" }'
run createNewTaskDefJson
[ ! -z $status ]
[ $output == $expected ]
}
@test "test createNewTaskDefJson with multiple containers in definition" {
imageWithoutTag="121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo"
useImage="121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo:1111111111"
TASK_DEFINITION=$(cat <<EOF
{
"taskDefinition": {
"status": "ACTIVE",
"networkMode": "bridge",
"family": "app-task-def",
"requiresAttributes": [
{
"name": "com.amazonaws.ecs.capability.ecr-auth"
}
],
"volumes": [],
"taskDefinitionArn": "arn:aws:ecs:us-east-1:121212345678:task-definition/app-task-def:123",
"containerDefinitions": [
{
"environment": [
{
"name": "KEY",
"value": "value"
}
],
"name": "API",
"links": [],
"mountPoints": [],
"image": "121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo:1487623908",
"essential": true,
"portMappings": [
{
"protocol": "tcp",
"containerPort": 80,
"hostPort": 10080
}
],
"entryPoint": [],
"memory": 128,
"command": [
"/data/run.sh"
],
"cpu": 200,
"volumesFrom": []
},
{
"environment": [
{
"name": "KEY",
"value": "value"
}
],
"name": "cache",
"links": [],
"mountPoints": [],
"image": "redis:latest",
"essential": true,
"portMappings": [
{
"protocol": "tcp",
"containerPort": 6376,
"hostPort": 10376
}
],
"entryPoint": [],
"memory": 128,
"command": [
"/data/run.sh"
],
"cpu": 200,
"volumesFrom": []
}
],
"placementConstraints": null,
"revision": 123
}
}
EOF
)
expected='{ "family": "app-task-def", "volumes": [], "containerDefinitions": [ { "environment": [ { "name": "KEY", "value": "value" } ], "name": "API", "links": [], "mountPoints": [], "image": "121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo:1111111111", "essential": true, "portMappings": [ { "protocol": "tcp", "containerPort": 80, "hostPort": 10080 } ], "entryPoint": [], "memory": 128, "command": [ "/data/run.sh" ], "cpu": 200, "volumesFrom": [] }, { "environment": [ { "name": "KEY", "value": "value" } ], "name": "cache", "links": [], "mountPoints": [], "image": "redis:latest", "essential": true, "portMappings": [ { "protocol": "tcp", "containerPort": 6376, "hostPort": 10376 } ], "entryPoint": [], "memory": 128, "command": [ "/data/run.sh" ], "cpu": 200, "volumesFrom": [] } ], "placementConstraints": null, "networkMode": "bridge" }'
run createNewTaskDefJson
[ ! -z $status ]
[ $output == $expected ]
}
@test "test parseImageName with tagonly option" {
TAGONLY="newtag"
IMAGE="ignore"
expected=$TAGONLY
run parseImageName
[ ! -z $status ]
[ $output == $expected ]
}
@test "test createNewTaskDefJson with multiple containers in definition and replace only tags" {
TAGONLY="newtag"
useImage=$TAGONLY
TASK_DEFINITION=$(cat <<EOF
{
"taskDefinition": {
"status": "ACTIVE",
"networkMode": "bridge",
"family": "app-task-def",
"requiresAttributes": [
{
"name": "com.amazonaws.ecs.capability.ecr-auth"
}
],
"volumes": [],
"taskDefinitionArn": "arn:aws:ecs:us-east-1:121212345678:task-definition/app-task-def:123",
"containerDefinitions": [
{
"environment": [
{
"name": "KEY",
"value": "value"
}
],
"name": "API",
"links": [],
"mountPoints": [],
"image": "121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo:1487623908",
"essential": true,
"portMappings": [
{
"protocol": "tcp",
"containerPort": 80,
"hostPort": 10080
}
],
"entryPoint": [],
"memory": 128,
"command": [
"/data/run.sh"
],
"cpu": 200,
"volumesFrom": []
},
{
"environment": [
{
"name": "KEY",
"value": "value"
}
],
"name": "cache",
"links": [],
"mountPoints": [],
"image": "redis:latest",
"essential": true,
"portMappings": [
{
"protocol": "tcp",
"containerPort": 6376,
"hostPort": 10376
}
],
"entryPoint": [],
"memory": 128,
"command": [
"/data/run.sh"
],
"cpu": 200,
"volumesFrom": []
}
],
"placementConstraints": null,
"revision": 123
}
}
EOF
)
expected='{ "family": "app-task-def", "volumes": [], "containerDefinitions": [ { "environment": [ { "name": "KEY", "value": "value" } ], "name": "API", "links": [], "mountPoints": [], "image": "121212345678.dkr.ecr.us-east-1.amazonaws.com/acct/repo:newtag", "essential": true, "portMappings": [ { "protocol": "tcp", "containerPort": 80, "hostPort": 10080 } ], "entryPoint": [], "memory": 128, "command": [ "/data/run.sh" ], "cpu": 200, "volumesFrom": [] }, { "environment": [ { "name": "KEY", "value": "value" } ], "name": "cache", "links": [], "mountPoints": [], "image": "redis:newtag", "essential": true, "portMappings": [ { "protocol": "tcp", "containerPort": 6376, "hostPort": 10376 } ], "entryPoint": [], "memory": 128, "command": [ "/data/run.sh" ], "cpu": 200, "volumesFrom": [] } ], "placementConstraints": null, "networkMode": "bridge" }'
run createNewTaskDefJson
echo $output
[ ! -z $status ]
[ $output == $expected ]
}
|
public func listRobotApplicationsPaginator(
_ input: ListRobotApplicationsRequest,
logger: Logger = AWSClient.loggingDisabled,
on eventLoop: EventLoop? = nil,
onPage: @escaping (ListRobotApplicationsResponse, EventLoop) -> EventLoopFuture<Bool>
) -> EventLoopFuture<Void> {
func listApplicationsPaginatorHelper(
_ input: ListRobotApplicationsRequest,
logger: Logger,
eventLoop: EventLoop?,
onPage: @escaping (ListRobotApplicationsResponse, EventLoop) -> EventLoopFuture<Bool>,
token: String?
) -> EventLoopFuture<Void> {
var input = input
if let token = token {
input.nextToken = token
}
return listRobotApplications(input, logger: logger, on: eventLoop)
.flatMap { response in
onPage(response, eventLoop ?? self.eventLoop)
.flatMap { shouldContinue in
if let nextToken = response.nextToken, shouldContinue {
return listApplicationsPaginatorHelper(input, logger: logger, eventLoop: eventLoop, onPage: onPage, token: nextToken)
} else {
return eventLoop?.makeSucceededFuture(()) ?? self.eventLoop.makeSucceededFuture(())
}
}
}
}
return listApplicationsPaginatorHelper(input, logger: logger, eventLoop: eventLoop, onPage: onPage, token: nil)
} |
#!/usr/bin/env bats
setup() {
BATS_TMPDIR=`mktemp --directory`
}
teardown() {
rm -rf "$BATS_TMPDIR"
}
@test "Your server code compiles" {
cd src
rm -f echoserver/EchoServer.class
run javac echoserver/EchoServer.java
cd ..
[ "$status" -eq 0 ]
}
@test "Your server starts successfully" {
cd src
java echoserver.EchoServer &
status=$?
kill %1
cd ..
[ "$status" -eq 0 ]
}
@test "Your server handles a small bit of text" {
cd src
rm -f echoserver/*.class
javac echoserver/EchoServer.java
java echoserver.EchoServer &
cd ..
cd test/sampleBin
java echoserver.EchoClient < ../etc/textTest.txt > "$BATS_TMPDIR"/textTest.txt
run diff ../etc/textTest.txt "$BATS_TMPDIR"/textTest.txt
cd ../..
kill %1
[ "$status" -eq 0 ]
}
@test "Your server handles a large chunk of text" {
cd src
rm -f echoserver/*.class
javac echoserver/EchoServer.java
java echoserver.EchoServer &
cd ..
cd test/sampleBin
java echoserver.EchoClient < ../etc/words.txt > "$BATS_TMPDIR"/words.txt
run diff ../etc/words.txt "$BATS_TMPDIR"/words.txt
cd ../..
kill %1
[ "$status" -eq 0 ]
}
@test "Your server handles binary content" {
cd src
rm -f echoserver/*.class
javac echoserver/EchoServer.java
java echoserver.EchoServer &
cd ..
cd test/sampleBin
java echoserver.EchoClient < ../etc/pumpkins.jpg > "$BATS_TMPDIR"/pumpkins.jpg
run diff ../etc/pumpkins.jpg "$BATS_TMPDIR"/pumpkins.jpg
cd ../..
kill %1
[ "$status" -eq 0 ]
}
|
import urllib
import urllib.parse
import urllib.request
import ssl
# Proovl SMS API settings www.proovl.com / for Python 3 send SMS
user = "*****" # change ***** to your Proovl user ID
token = "*****" # change ***** to your Proovl token
from_n = "*****" # change ***** to your Proovl SMS number
to = "*****" # change ***** to receiver number
text = "*****" # Text message
url = "https://www.proovl.com/api/send.php?"
params = {
"user": user,
"token": token,
"from": from_n,
"to": to,
"text": text}
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
query_string = urllib.parse.urlencode(params)
http_req = url + query_string
f = urllib.request.urlopen(http_req)
txt = (f.read().decode('utf-8'))
x = txt.split(";")
if x[0] == "Error":
print("Error message:",x[1])
else:
print("Message has been sent! ID:",x[1])
f.close() |
<gh_stars>0
import * as tslib_1 from "tslib";
import { Gain } from "../../core/context/Gain";
import { connect, ToneAudioNode } from "../../core/context/ToneAudioNode";
import { optionsFromArguments } from "../../core/util/Defaults";
import { readOnly } from "../../core/util/Interface";
import { GainToAudio } from "../../signal/GainToAudio";
import { Signal } from "../../signal/Signal";
/**
* Tone.Crossfade provides equal power fading between two inputs.
* More on crossfading technique [here](https://en.wikipedia.org/wiki/Fade_(audio_engineering)#Crossfading).
* ```
* +---------+
* +> input a +>--+
* +-----------+ +---------------------+ | | |
* | 1s signal +>--> stereoPannerNode L +>----> gain | |
* +-----------+ | | +---------+ |
* +-> pan R +>-+ | +--------+
* | +---------------------+ | +---> output +>
* +------+ | | +---------+ | +--------+
* | fade +>----+ | +> input b +>--+
* +------+ | | |
* +--> gain |
* +---------+
* ```
* @example
* import { CrossFade, Oscillator } from "tone";
* const crossFade = new CrossFade().toDestination();
* // connect two inputs to a/b
* const inputA = new Oscillator(440, "square").connect(crossFade.a).start();
* const inputB = new Oscillator(440, "sine").connect(crossFade.b).start();
* // use the fade to control the mix between the two
* crossFade.fade.value = 0.5;
* @category Component
*/
var CrossFade = /** @class */ (function (_super) {
tslib_1.__extends(CrossFade, _super);
function CrossFade() {
var _this = _super.call(this, Object.assign(optionsFromArguments(CrossFade.getDefaults(), arguments, ["fade"]))) || this;
_this.name = "CrossFade";
/**
* The crossfading is done by a StereoPannerNode
*/
_this._panner = _this.context.createStereoPanner();
/**
* Split the output of the panner node into two values used to control the gains.
*/
_this._split = _this.context.createChannelSplitter(2);
/**
* Convert the fade value into an audio range value so it can be connected
* to the panner.pan AudioParam
*/
_this._g2a = new GainToAudio({ context: _this.context });
/**
* The input which is at full level when fade = 0
*/
_this.a = new Gain({
context: _this.context,
gain: 0,
});
/**
* The input which is at full level when fade = 1
*/
_this.b = new Gain({
context: _this.context,
gain: 0,
});
/**
* The output is a mix between `a` and `b` at the ratio of `fade`
*/
_this.output = new Gain({ context: _this.context });
_this._internalChannels = [_this.a, _this.b];
var options = optionsFromArguments(CrossFade.getDefaults(), arguments, ["fade"]);
_this.fade = new Signal({
context: _this.context,
units: "normalRange",
value: options.fade,
});
readOnly(_this, "fade");
_this.context.getConstant(1).connect(_this._panner);
_this._panner.connect(_this._split);
// this is necessary for standardized-audio-context
// doesn't make any difference for the native AudioContext
// https://github.com/chrisguttandin/standardized-audio-context/issues/647
_this._panner.channelCount = 1;
_this._panner.channelCountMode = "explicit";
connect(_this._split, _this.a.gain, 0);
connect(_this._split, _this.b.gain, 1);
_this.fade.chain(_this._g2a, _this._panner.pan);
_this.a.connect(_this.output);
_this.b.connect(_this.output);
return _this;
}
CrossFade.getDefaults = function () {
return Object.assign(ToneAudioNode.getDefaults(), {
fade: 0.5,
});
};
CrossFade.prototype.dispose = function () {
_super.prototype.dispose.call(this);
this.a.dispose();
this.b.dispose();
this.output.dispose();
this.fade.dispose();
this._g2a.dispose();
this._panner.disconnect();
this._split.disconnect();
return this;
};
return CrossFade;
}(ToneAudioNode));
export { CrossFade };
//# sourceMappingURL=CrossFade.js.map |
// Rubro.php
class Rubro {
public function getRubros() {
// Assuming database connection is established
// Perform query to retrieve rubros from the database
$queryResult = $this->db->query("SELECT * FROM rubros");
$rubros = array();
foreach ($queryResult->result() as $row) {
$rubros[$row->id] = $row->name;
}
return $rubros;
}
}
// registroinversor.php
// Assuming the rubros array is passed to the view
<select name="rubro">
<?php foreach ($rubros as $id => $rubro) : ?>
<option value="<?php echo $id; ?>"><?php echo $rubro; ?></option>
<?php endforeach; ?>
</select> |
#include "llvm/IR/LLVMContext.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CallGraphSCCPass.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/RegionPass.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/CodeGen/CommandFlags.h"
#include "llvm/DebugInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Module.h"
#include "llvm/IRReader/IRReader.h"
#include "llvm/LinkAllIR.h"
#include "llvm/LinkAllPasses.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/PassManager.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/PassNameParser.h"
#include "llvm/Support/PluginLoader.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/SystemUtils.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include <algorithm>
#include <memory>
#include "ALIVE.h"
using namespace llvm;
static cl::opt<std::string>
InputFilename(cl::Positional, cl::desc("<input bitcode file>"),
cl::init("-"), cl::value_desc("filename"));
int
main (int argc, char ** argv)
{
llvm_shutdown_obj Y;
LLVMContext &Context = getGlobalContext();
std::string OutputFilename;
cl::ParseCommandLineOptions(argc, argv, "Alive Pass for instcombine \n");
sys::PrintStackTraceOnErrorSignal();
PassRegistry &Registry = *PassRegistry::getPassRegistry();
initializeCore(Registry);
initializeScalarOpts(Registry);
initializeIPO(Registry);
initializeAnalysis(Registry);
initializeIPA(Registry);
initializeTransformUtils(Registry);
initializeInstCombine(Registry);
initializeInstrumentation(Registry);
initializeTarget(Registry);
PassManager Passes;
SMDiagnostic Err;
OwningPtr<Module> M1;
M1.reset(ParseIRFile(InputFilename, Err, Context));
if(M1.get() == 0){
Err.print(argv[0], errs());
return 1;
}
OwningPtr<tool_output_file> Out;
std::string ErrorInfo;
OutputFilename = InputFilename + ".alive.bc";
Out.reset(new tool_output_file(OutputFilename.c_str(), ErrorInfo, sys::fs::F_None));
if(!ErrorInfo.empty()){
errs()<< ErrorInfo<<'\n';
return 1;
}
Passes.add(createALIVEPass());
Passes.add(createBitcodeWriterPass(Out->os()));
Passes.run(*M1.get());
Out->keep();
return 0;
}
|
#!/bin/bash
#SBATCH --account=def-dkulic
#SBATCH --mem=8000M # memory per node
#SBATCH --time=23:00:00 # time (DD-HH:MM)
#SBATCH --output=/project/6001934/lingheng/Double_DDPG_Job_output/continuous_RoboschoolInvertedPendulum-v1_doule_ddpg_hardcopy_action_noise_seed5_run3_%N-%j.out # %N for node name, %j for jobID
module load qt/5.9.6 python/3.6.3 nixpkgs/16.09 gcc/7.3.0 boost/1.68.0 cuda cudnn
source ~/tf_cpu/bin/activate
python ./ddpg_discrete_action.py --env RoboschoolInvertedPendulum-v1 --random-seed 5 --exploration-strategy action_noise --summary-dir ../Double_DDPG_Results_no_monitor/continuous/RoboschoolInvertedPendulum-v1/doule_ddpg_hardcopy_action_noise_seed5_run3 --continuous-act-space-flag --target-hard-copy-flag
|
<filename>src/main/java/com/plugra/tracker/interfaces/Tracker.java<gh_stars>0
package com.plugra.tracker.interfaces;
public interface Tracker {
// begin logging tracked data from username
public void startTracking(String username, String type);
}
|
from django.db import models
class User(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField(unique=True)
registration_date = models.DateTimeField(auto_now_add=True)
class Post(models.Model):
content = models.TextField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
class Hashtag(models.Model):
name = models.CharField(max_length=50) |
<filename>pagefetch_project/populate_pagefetch.py
#!/usr/bin/env python
"""
=============================
Author: mtbvc <<EMAIL>>
Date: 04/07/2013
Last revision: 27/10/2013
Requires:
---------
"""
from django.core.management import setup_environ
from pagefetch_project import settings
setup_environ(settings)
from pagefetch import game_model_functions
import argparse
import os
from pagefetch import deployment_model_functions
#fetch data for population from file
def get_trending_queries(filename):
"""Extract population data from a file.
Returns a list of tuples created from comma separated values in file
"""
f = open(filename, 'r')
trend_tuples_list = []
for line in f:
trend_tuples_list.append(tuple((line.strip()).split(',')))
f.close()
return trend_tuples_list
def main():
parser = argparse.ArgumentParser(
description="Populate a category and the pages associated with it")
parser.add_argument("-a", "--append", type=int,default=True,
help="if set to false deletes category from database\
if already present.")
#TODO(mtbvc):won't need this
parser.add_argument("-cn", "--category_name", type=str,
help="Name of category.")
parser.add_argument("-f", "--filename",
default= os.getcwd() + '/data/trends/game_data.txt', type=str,
help="relative path to population data file")
parser.add_argument("-l", "-local", type=str, default=False,
help="If populating images from disk use this flag")
args = parser.parse_args()
if args.filename:
data_list = get_trending_queries(args.filename)
for data_tuple in data_list:
cat = game_model_functions.get_category(data_tuple[0],'icon','',append=args.append)
#data_tuple[1] is url
#game_model_functions.populate_pages([data_tuple[1]],cat)
deployment_model_functions.populate_pages([data_tuple[1]],cat,halved_screen_shot=True)
return 0
else:
print parser.print_help()
return 2
#import doctest
#test_results = doctest.testmod()
#print test_results
#if not test_results.failed:
# populate(args.file_name, args.category_name, args.append)
# print "Category and pages have been populated"
#return 0
if __name__ == '__main__':
main()
|
package io.github.tdhd.robakka.behaviours
import io.github.tdhd.robakka._
import io.github.tdhd.robakka.Agent.MoveCommand
case object FollowPlantBehaviour extends BaseBehaviour {
def act(entity: World.AgentEntity, worldState: World.StateContainer) = {
val plants = BehaviourHelpers.entities2MoveCommand(entity, worldState)
val move = plants.collect {
case (plant: World.PlantEntity, moveCommand) => moveCommand
}.headOption.getOrElse(BehaviourHelpers.getRandomMove)
Agent.CommandSet(move = Option(move))
}
}
|
import React from 'react';
import { Box } from 'grommet';
import { Avatar } from '../Avatar';
export default {
title: 'controls/Avatar',
component: Avatar,
};
export var main = function () { return (React.createElement(Box, { basis: 'medium', align: 'center' },
React.createElement(Avatar, { image: '//v2.grommet.io/assets/Wilderpeople_Ricky.jpg', title: '<NAME>', subTitle: 'admin' }))); };
|
<filename>src/main/resources/dbmigrate/hsql/user/V1_5_0_1__account_info.sql<gh_stars>1000+
-------------------------------------------------------------------------------
-- account info
-------------------------------------------------------------------------------
CREATE TABLE ACCOUNT_INFO(
ID BIGINT NOT NULL,
CODE VARCHAR(50),
USERNAME VARCHAR(50),
TYPE VARCHAR(50),
DISPLAY_NAME VARCHAR(200),
STATUS VARCHAR(50),
PASSWORD_REQUIRED VARCHAR(50),
LOCKED VARCHAR(50),
CREATE_TIME DATETIME,
CLOSE_TIME DATETIME,
LOGIN_TIME DATETIME,
NICK_NAME VARCHAR(200),
DESCRIPTION VARCHAR(200),
LANGUAGE VARCHAR(50),
TIMEZONE VARCHAR(50),
CONSTRAINT PK_ACCOUNT_INFO PRIMARY KEY(ID)
);
COMMENT ON TABLE ACCOUNT_INFO IS '账号信息';
COMMENT ON COLUMN ACCOUNT_INFO.ID IS '主键';
COMMENT ON COLUMN ACCOUNT_INFO.CODE IS '用户标识';
COMMENT ON COLUMN ACCOUNT_INFO.USERNAME IS '账号';
COMMENT ON COLUMN ACCOUNT_INFO.TYPE IS '类型';
COMMENT ON COLUMN ACCOUNT_INFO.DISPLAY_NAME IS '显示名';
COMMENT ON COLUMN ACCOUNT_INFO.STATUS IS '状态';
COMMENT ON COLUMN ACCOUNT_INFO.PASSWORD_REQUIRED IS '是否<PASSWORD>';
COMMENT ON COLUMN ACCOUNT_INFO.LOCKED IS '是否被锁定';
COMMENT ON COLUMN ACCOUNT_INFO.CREATE_TIME IS '创建时间';
COMMENT ON COLUMN ACCOUNT_INFO.CLOSE_TIME IS '关闭时间';
COMMENT ON COLUMN ACCOUNT_INFO.LOGIN_TIME IS '登录时间';
COMMENT ON COLUMN ACCOUNT_INFO.NICK_NAME IS '昵称';
COMMENT ON COLUMN ACCOUNT_INFO.DESCRIPTION IS '备注';
COMMENT ON COLUMN ACCOUNT_INFO.LANGUAGE IS '语言';
COMMENT ON COLUMN ACCOUNT_INFO.TIMEZONE IS '时区';
|
# vim: set ts=4 sw=4 et:
[ -n "$VIRTUALIZATION" ] && return 0
if [ -x /usr/lib/systemd/systemd-udevd ]; then
_udevd=/usr/lib/systemd/systemd-udevd
elif [ -x /sbin/udevd -o -x /bin/udevd -o -x /usr/sbin/udevd ]; then
_udevd=udevd
else
msg_warn "cannot find udevd!"
fi
if [ -n "${_udevd}" ]; then
msg "Starting udev and waiting for devices to settle..."
udevadm hwdb --update
${_udevd} --daemon
udevadm trigger --action=add --type=subsystems
udevadm trigger --action=add --type=devices
udevadm settle
fi
|
#!/bin/bash
# Current directory
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
PROJECT_ROOT=$( cd "$( dirname "${DIR}/../.." )" && pwd )
# Make modules available for import in Python
export PYTHONPATH=$PROJECT_ROOT:$PYTHONPATH
# Clean results
$DIR/clean.sh
# Generate datasets and get results for dataset-simple
python $DIR/generate_dataset_toy.py 2>&1 | tee -a generate_dataset_toy.log
python $DIR/scale_weight_effect_mlcsc_toy.py 2>&1 | tee -a scale_weight_effect_mlcsc_toy.log
# Generate datasets and get results for dataset-complex
python $DIR/generate_dataset.py 2>&1 | tee -a generate_dataset.log
python $DIR/scale_weight_effect_mlcsc.py 2>&1 | tee -a scale_weight_effect_mlcsc.log
echo 'done.' |
import pulsar as psr
def load_ref_system():
""" Returns d-galactose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 2.9711 -1.0371 0.0910
C 1.8817 0.0283 -0.1809
C 0.4656 -0.6153 -0.1140
C -0.6589 0.4628 -0.1520
C -2.0598 -0.1836 0.0497
C -3.1420 0.8635 -0.2174
O 4.2178 -0.4769 0.3869
O 2.1286 0.7141 -1.3836
O -2.1849 -0.8442 1.2800
O -3.9705 1.1899 0.5973
O 0.2607 -1.4567 -1.2327
O -0.5054 1.4221 0.8735
H 3.0584 -1.7427 -0.7599
H 2.7376 -1.6229 0.9972
H 1.9527 0.8559 0.5697
H 0.3839 -1.1993 0.8358
H -0.6421 0.9854 -1.1414
H -2.1912 -1.0175 -0.6799
H -3.1325 1.3167 -1.2220
H 4.4523 0.0972 -0.3325
H 1.9415 0.1195 -2.1006
H -2.2515 -0.1817 1.9578
H 0.6098 -2.3037 -0.9920
H 0.3953 1.7276 0.8314
""")
|
/**
* Demo Layout Cover
*/
import React from 'react';
import { Short } from '@demo/includes/SampleContent';
import { Cover, Container } from '@components/Layout';
const LayoutCover = () => (
<div className="d-layout">
<Container fluid="both">
<h1>Cover Demo</h1>
<div className="d-block bg--primary text--white">
<Short />
</div>
</Container>
<br />
<br />
<br />
<Container fluid="both">
<h2><Cover></h2>
<Cover
header={<strong>Cover Header</strong>}
footer={<em>Cover Footer</em>}
className="bg--primary text--white"
>
<Short />
</Cover>
</Container>
<br />
<br />
<br />
<Container fluid="both">
<h2><Cover> (main content only)</h2>
<Cover className="bg--primary text--white">
<Short />
</Cover>
</Container>
<br />
<br />
<br />
<Container fluid="both">
<h2><Cover center></h2>
<Cover
center
header={<strong>Cover Header</strong>}
footer={<em>Cover Footer</em>}
className="bg--primary text--white"
>
<Short />
</Cover>
</Container>
<br />
<br />
<br />
</div>
);
export default LayoutCover;
|
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is for configuring kubernetes master and node instances. It is
# uploaded in the manifests tar ball.
# TODO: this script duplicates templating logic from cluster/saltbase/salt
# using sed. It should use an actual template parser on the manifest
# files, or the manifest files should not be templated salt
set -o errexit
set -o nounset
set -o pipefail
function create-dirs {
echo "Creating required directories"
mkdir -p /var/lib/kubelet
mkdir -p /etc/kubernetes/manifests
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
mkdir -p /var/lib/kube-proxy
fi
}
# Create directories referenced in the kube-controller-manager manifest for
# bindmounts. This is used under the rkt runtime to work around
# https://github.com/kubernetes/kubernetes/issues/26816
function create-kube-controller-manager-dirs {
mkdir -p /etc/srv/kubernetes /var/ssl /etc/{ssl,openssl,pki}
}
# Formats the given device ($1) if needed and mounts it at given mount point
# ($2).
function safe-format-and-mount() {
device=$1
mountpoint=$2
# Format only if the disk is not already formatted.
if ! tune2fs -l "${device}" ; then
echo "Formatting '${device}'"
mkfs.ext4 -F -E lazy_itable_init=0,lazy_journal_init=0,discard "${device}"
fi
mkdir -p "${mountpoint}"
echo "Mounting '${device}' at '${mountpoint}'"
mount -o discard,defaults "${device}" "${mountpoint}"
}
# Local ssds, if present, are mounted at /mnt/disks/ssdN.
function ensure-local-ssds() {
for ssd in /dev/disk/by-id/google-local-ssd-*; do
if [ -e "${ssd}" ]; then
ssdnum=`echo ${ssd} | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/'`
ssdmount="/mnt/disks/ssd${ssdnum}/"
mkdir -p ${ssdmount}
safe-format-and-mount "${ssd}" ${ssdmount}
echo "Mounted local SSD $ssd at ${ssdmount}"
chmod a+w ${ssdmount}
else
echo "No local SSD disks found."
fi
done
}
# Finds the master PD device; returns it in MASTER_PD_DEVICE
function find-master-pd {
MASTER_PD_DEVICE=""
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
return
fi
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
}
# Mounts a persistent disk (formatting if needed) to store the persistent data
# on the master -- etcd's data, a few settings, and security certs/keys/tokens.
# safe-format-and-mount only formats an unformatted disk, and mkdir -p will
# leave a directory be if it already exists.
function mount-master-pd {
find-master-pd
if [[ -z "${MASTER_PD_DEVICE:-}" ]]; then
return
fi
echo "Mounting master-pd"
local -r pd_path="/dev/disk/by-id/google-master-pd"
local -r mount_point="/mnt/disks/master-pd"
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
mkdir -p "${mount_point}"
safe-format-and-mount "${pd_path}" "${mount_point}"
echo "Mounted master-pd '${pd_path}' at '${mount_point}'"
# NOTE: These locations on the PD store persistent data, so to maintain
# upgradeability, these locations should not change. If they do, take care
# to maintain a migration path from these locations to whatever new
# locations.
# Contains all the data stored in etcd.
mkdir -m 700 -p "${mount_point}/var/etcd"
ln -s -f "${mount_point}/var/etcd" /var/etcd
mkdir -p /etc/srv
# Contains the dynamically generated apiserver auth certs and keys.
mkdir -p "${mount_point}/srv/kubernetes"
ln -s -f "${mount_point}/srv/kubernetes" /etc/srv/kubernetes
# Directory for kube-apiserver to store SSH key (if necessary).
mkdir -p "${mount_point}/srv/sshproxy"
ln -s -f "${mount_point}/srv/sshproxy" /etc/srv/sshproxy
if ! id etcd &>/dev/null; then
useradd -s /sbin/nologin -d /var/etcd etcd
fi
chown -R etcd "${mount_point}/var/etcd"
chgrp -R etcd "${mount_point}/var/etcd"
}
# replace_prefixed_line ensures:
# 1. the specified file exists
# 2. existing lines with the specified ${prefix} are removed
# 3. a new line with the specified ${prefix}${suffix} is appended
function replace_prefixed_line {
local -r file="${1:-}"
local -r prefix="${2:-}"
local -r suffix="${3:-}"
touch "${file}"
awk "substr(\$0,0,length(\"${prefix}\")) != \"${prefix}\" { print }" "${file}" > "${file}.filtered" && mv "${file}.filtered" "${file}"
echo "${prefix}${suffix}" >> "${file}"
}
# After the first boot and on upgrade, these files exist on the master-pd
# and should never be touched again (except perhaps an additional service
# account, see NB below.)
function create-master-auth {
echo "Creating master auth files"
local -r auth_dir="/etc/srv/kubernetes"
if [[ ! -e "${auth_dir}/ca.crt" && ! -z "${CA_CERT:-}" && ! -z "${MASTER_CERT:-}" && ! -z "${MASTER_KEY:-}" ]]; then
echo "${CA_CERT}" | base64 --decode > "${auth_dir}/ca.crt"
echo "${MASTER_CERT}" | base64 --decode > "${auth_dir}/server.cert"
echo "${MASTER_KEY}" | base64 --decode > "${auth_dir}/server.key"
fi
local -r basic_auth_csv="${auth_dir}/basic_auth.csv"
if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then
replace_prefixed_line "${basic_auth_csv}" "${KUBE_PASSWORD},${KUBE_USER}," "admin,system:masters"
fi
local -r known_tokens_csv="${auth_dir}/known_tokens.csv"
if [[ -n "${KUBE_BEARER_TOKEN:-}" ]]; then
replace_prefixed_line "${known_tokens_csv}" "${KUBE_BEARER_TOKEN}," "admin,admin,system:masters"
fi
if [[ -n "${KUBE_CONTROLLER_MANAGER_TOKEN:-}" ]]; then
replace_prefixed_line "${known_tokens_csv}" "${KUBE_CONTROLLER_MANAGER_TOKEN}," "system:kube-controller-manager,uid:system:kube-controller-manager"
fi
if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]]; then
replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN}," "system:kube-scheduler,uid:system:kube-scheduler"
fi
if [[ -n "${KUBELET_TOKEN:-}" ]]; then
replace_prefixed_line "${known_tokens_csv}" "${KUBELET_TOKEN}," "kubelet,uid:kubelet,system:nodes"
fi
if [[ -n "${KUBE_PROXY_TOKEN:-}" ]]; then
replace_prefixed_line "${known_tokens_csv}" "${KUBE_PROXY_TOKEN}," "system:kube-proxy,uid:kube_proxy"
fi
local use_cloud_config="false"
cat <<EOF >/etc/gce.conf
[global]
EOF
if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
token-url = ${TOKEN_URL}
token-body = ${TOKEN_BODY}
project-id = ${PROJECT_ID}
network-name = ${NODE_NETWORK}
EOF
if [[ -n "${NODE_SUBNETWORK:-}" ]]; then
cat <<EOF >>/etc/gce.conf
subnetwork-name = ${NODE_SUBNETWORK}
EOF
fi
fi
if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then
use_cloud_config="true"
if [[ -n "${NODE_TAGS:-}" ]]; then
local -r node_tags="${NODE_TAGS}"
else
local -r node_tags="${NODE_INSTANCE_PREFIX}"
fi
cat <<EOF >>/etc/gce.conf
node-tags = ${node_tags}
node-instance-prefix = ${NODE_INSTANCE_PREFIX}
EOF
fi
if [[ -n "${MULTIZONE:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
multizone = ${MULTIZONE}
EOF
fi
if [[ "${use_cloud_config}" != "true" ]]; then
rm -f /etc/gce.conf
fi
if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
cat <<EOF >/etc/gcp_authn.config
clusters:
- name: gcp-authentication-server
cluster:
server: ${GCP_AUTHN_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-authentication-server
user: kube-apiserver
name: webhook
EOF
fi
if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
cat <<EOF >/etc/gcp_authz.config
clusters:
- name: gcp-authorization-server
cluster:
server: ${GCP_AUTHZ_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-authorization-server
user: kube-apiserver
name: webhook
EOF
fi
if [[ -n "${GCP_IMAGE_VERIFICATION_URL:-}" ]]; then
# This is the config file for the image review webhook.
cat <<EOF >/etc/gcp_image_review.config
clusters:
- name: gcp-image-review-server
cluster:
server: ${GCP_IMAGE_VERIFICATION_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-image-review-server
user: kube-apiserver
name: webhook
EOF
# This is the config for the image review admission controller.
cat <<EOF >/etc/admission_controller.config
imagePolicy:
kubeConfigFile: /etc/gcp_image_review.config
allowTTL: 30
denyTTL: 30
retryBackoff: 500
defaultAllow: true
EOF
fi
}
function create-kubelet-kubeconfig {
echo "Creating kubelet kubeconfig file"
if [[ -z "${KUBELET_CA_CERT:-}" ]]; then
KUBELET_CA_CERT="${CA_CERT}"
fi
cat <<EOF >/var/lib/kubelet/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate-data: ${KUBELET_CERT}
client-key-data: ${KUBELET_KEY}
clusters:
- name: local
cluster:
certificate-authority-data: ${KUBELET_CA_CERT}
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
EOF
}
# Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and KUBELET_KEY
# to generate a kubeconfig file for the kubelet to securely connect to the apiserver.
# Set REGISTER_MASTER_KUBELET to true if kubelet on the master node
# should register to the apiserver.
function create-master-kubelet-auth {
# Only configure the kubelet on the master if the required variables are
# set in the environment.
if [[ -n "${KUBELET_APISERVER:-}" && -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
REGISTER_MASTER_KUBELET="true"
create-kubelet-kubeconfig
fi
}
function create-kubeproxy-kubeconfig {
echo "Creating kube-proxy kubeconfig file"
cat <<EOF >/var/lib/kube-proxy/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT}
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
}
function create-kubecontrollermanager-kubeconfig {
echo "Creating kube-controller-manager kubeconfig file"
mkdir -p /etc/srv/kubernetes/kube-controller-manager
cat <<EOF >/etc/srv/kubernetes/kube-controller-manager/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-controller-manager
user:
token: ${KUBE_CONTROLLER_MANAGER_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
server: https://localhost:443
contexts:
- context:
cluster: local
user: kube-controller-manager
name: service-account-context
current-context: service-account-context
EOF
}
function create-kubescheduler-kubeconfig {
echo "Creating kube-scheduler kubeconfig file"
mkdir -p /etc/srv/kubernetes/kube-scheduler
cat <<EOF >/etc/srv/kubernetes/kube-scheduler/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-scheduler
user:
token: ${KUBE_SCHEDULER_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
server: https://localhost:443
contexts:
- context:
cluster: local
user: kube-scheduler
name: kube-scheduler
current-context: kube-scheduler
EOF
}
function create-master-etcd-auth {
if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
local -r auth_dir="/etc/srv/kubernetes"
echo "${ETCD_CA_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-ca.crt"
echo "${ETCD_PEER_KEY}" | base64 --decode > "${auth_dir}/etcd-peer.key"
echo "${ETCD_PEER_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-peer.crt"
fi
}
function configure-docker-daemon {
echo "Configuring the Docker daemon"
local docker_opts="-p /var/run/docker.pid --iptables=false --ip-masq=false"
if [[ "${TEST_CLUSTER:-}" == "true" ]]; then
docker_opts+=" --log-level=debug"
else
docker_opts+=" --log-level=warn"
fi
local use_net_plugin="true"
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" || "${NETWORK_PROVIDER:-}" == "cni" ]]; then
# set docker0 cidr to private ip address range to avoid conflict with cbr0 cidr range
docker_opts+=" --bip=169.254.123.1/24"
else
use_net_plugin="false"
docker_opts+=" --bridge=cbr0"
fi
# Decide whether to enable a docker registry mirror. This is taken from
# the "kube-env" metadata value.
if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then
echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}"
docker_opts+=" --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}"
fi
mkdir -p /etc/systemd/system/docker.service.d/
local kubernetes_conf_dropin="/etc/systemd/system/docker.service.d/00_kubelet.conf"
cat > "${kubernetes_conf_dropin}" <<EOF
[Service]
Environment="DOCKER_OPTS=${docker_opts} ${EXTRA_DOCKER_OPTS:-}"
EOF
# Always restart to get the cbr0 change
echo "Docker daemon options updated. Restarting docker..."
systemctl daemon-reload
systemctl restart docker
}
# A helper function for loading a docker image. It keeps trying up to 5 times.
#
# $1: Full path of the docker image
function try-load-docker-image {
local -r img=$1
echo "Try to load docker image file ${img}"
# Temporarily turn off errexit, because we don't want to exit on first failure.
set +e
local -r max_attempts=5
local -i attempt_num=1
if [[ "${CONTAINER_RUNTIME:-}" == "rkt" ]]; then
for attempt_num in $(seq 1 "${max_attempts}"); do
local aci_tmpdir="$(mktemp -t -d docker2aci.XXXXX)"
(cd "${aci_tmpdir}"; timeout 40 "${DOCKER2ACI_BIN}" "$1")
local aci_success=$?
timeout 40 "${RKT_BIN}" fetch --insecure-options=image "${aci_tmpdir}"/*.aci
local fetch_success=$?
rm -f "${aci_tmpdir}"/*.aci
rmdir "${aci_tmpdir}"
if [[ ${fetch_success} && ${aci_success} ]]; then
echo "rkt: Loaded ${img}"
break
fi
if [[ "${attempt}" == "${max_attempts}" ]]; then
echo "rkt: Failed to load image file ${img} after ${max_attempts} retries."
exit 1
fi
sleep 5
done
else
until timeout 30 docker load -i "${img}"; do
if [[ "${attempt_num}" == "${max_attempts}" ]]; then
echo "Fail to load docker image file ${img} after ${max_attempts} retries."
exit 1
else
attempt_num=$((attempt_num+1))
sleep 5
fi
done
fi
# Re-enable errexit.
set -e
}
# Loads kube-system docker images. It is better to do it before starting kubelet,
# as kubelet will restart docker daemon, which may interfere with loading images.
function load-docker-images {
echo "Start loading kube-system docker images"
local -r img_dir="${KUBE_HOME}/kube-docker-files"
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
try-load-docker-image "${img_dir}/kube-apiserver.tar"
try-load-docker-image "${img_dir}/kube-controller-manager.tar"
try-load-docker-image "${img_dir}/kube-scheduler.tar"
else
try-load-docker-image "${img_dir}/kube-proxy.tar"
fi
}
# This function assembles the kubelet systemd service file and starts it
# using systemctl.
function start-kubelet {
echo "Start kubelet"
local kubelet_bin="${KUBE_HOME}/bin/kubelet"
local -r version="$("${kubelet_bin}" --version=true | cut -f2 -d " ")"
echo "Using kubelet binary at ${kubelet_bin}"
local flags="${KUBELET_TEST_LOG_LEVEL:-"--v=2"} ${KUBELET_TEST_ARGS:-}"
flags+=" --allow-privileged=true"
flags+=" --cgroup-root=/"
flags+=" --cloud-provider=gce"
flags+=" --cluster-dns=${DNS_SERVER_IP}"
flags+=" --cluster-domain=${DNS_DOMAIN}"
flags+=" --pod-manifest-path=/etc/kubernetes/manifests"
flags+=" --experimental-check-node-capabilities-before-mount=true"
if [[ -n "${KUBELET_PORT:-}" ]]; then
flags+=" --port=${KUBELET_PORT}"
fi
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
flags+=" --enable-debugging-handlers=false"
flags+=" --hairpin-mode=none"
if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then
flags+=" --api-servers=https://${KUBELET_APISERVER}"
flags+=" --register-schedulable=false"
else
# Standalone mode (not widely used?)
flags+=" --pod-cidr=${MASTER_IP_RANGE}"
fi
else # For nodes
flags+=" --enable-debugging-handlers=true"
flags+=" --api-servers=https://${KUBERNETES_MASTER_NAME}"
if [[ "${HAIRPIN_MODE:-}" == "promiscuous-bridge" ]] || \
[[ "${HAIRPIN_MODE:-}" == "hairpin-veth" ]] || \
[[ "${HAIRPIN_MODE:-}" == "none" ]]; then
flags+=" --hairpin-mode=${HAIRPIN_MODE}"
fi
fi
# Network plugin
if [[ -n "${NETWORK_PROVIDER:-}" ]]; then
if [[ "${NETWORK_PROVIDER:-}" == "cni" ]]; then
flags+=" --cni-bin-dir=/opt/kubernetes/bin"
else
flags+=" --network-plugin-dir=/opt/kubernetes/bin"
fi
flags+=" --network-plugin=${NETWORK_PROVIDER}"
fi
if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then
flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
fi
if [[ "${ENABLE_MANIFEST_URL:-}" == "true" ]]; then
flags+=" --manifest-url=${MANIFEST_URL}"
flags+=" --manifest-url-header=${MANIFEST_URL_HEADER}"
fi
if [[ -n "${ENABLE_CUSTOM_METRICS:-}" ]]; then
flags+=" --enable-custom-metrics=${ENABLE_CUSTOM_METRICS}"
fi
if [[ -n "${NODE_LABELS:-}" ]]; then
flags+=" --node-labels=${NODE_LABELS}"
fi
if [[ -n "${EVICTION_HARD:-}" ]]; then
flags+=" --eviction-hard=${EVICTION_HARD}"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
flags+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ -n "${CONTAINER_RUNTIME:-}" ]]; then
flags+=" --container-runtime=${CONTAINER_RUNTIME}"
flags+=" --rkt-path=${KUBE_HOME}/bin/rkt"
flags+=" --rkt-stage1-image=${RKT_STAGE1_IMAGE}"
fi
local -r kubelet_env_file="/etc/kubelet-env"
echo "KUBELET_OPTS=\"${flags}\"" > "${kubelet_env_file}"
# Write the systemd service file for kubelet.
cat <<EOF >/etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes kubelet
Requires=network-online.target
After=network-online.target
[Service]
Restart=always
RestartSec=10
EnvironmentFile=${kubelet_env_file}
ExecStart=${kubelet_bin} \$KUBELET_OPTS
[Install]
WantedBy=multi-user.target
EOF
# Flush iptables nat table
iptables -t nat -F || true
systemctl start kubelet.service
}
# Create the log file and set its properties.
#
# $1 is the file to create.
function prepare-log-file {
touch $1
chmod 644 $1
chown root:root $1
}
# Starts kube-proxy pod.
function start-kube-proxy {
echo "Start kube-proxy pod"
prepare-log-file /var/log/kube-proxy.log
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/kube-proxy.manifest"
remove-salt-config-comments "${src_file}"
local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig"
local kube_docker_registry="gcr.io/google_containers"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
kube_docker_registry=${KUBE_DOCKER_REGISTRY}
fi
local -r kube_proxy_docker_tag=$(cat /opt/kubernetes/kube-docker-files/kube-proxy.docker_tag)
local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
local params="${KUBEPROXY_TEST_LOG_LEVEL:-"--v=2"}"
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
params+=" --iptables-sync-period=1m --iptables-min-sync-period=10s"
if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then
params+=" ${KUBEPROXY_TEST_ARGS}"
fi
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="env:\n - name: KUBE_CACHE_MUTATION_DETECTOR\n value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
fi
sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file}
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file}
sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file}
sed -i -e "s@{{params}}@${params}@g" ${src_file}
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file}
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file}
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" ${src_file}
fi
if [[ "${CONTAINER_RUNTIME:-}" == "rkt" ]]; then
# Work arounds for https://github.com/coreos/rkt/issues/3245 and https://github.com/coreos/rkt/issues/3264
# This is an incredibly hacky workaround. It's fragile too. If the kube-proxy command changes too much, this breaks
# TODO, this could be done much better in many other places, such as an
# init script within the container, or even within kube-proxy's code.
local extra_workaround_cmd="ln -sf /proc/self/mounts /etc/mtab; \
mount -o remount,rw /proc; \
mount -o remount,rw /proc/sys; \
mount -o remount,rw /sys; "
sed -i -e "s@-\\s\\+kube-proxy@- ${extra_workaround_cmd} kube-proxy@g" "${src_file}"
fi
cp "${src_file}" /etc/kubernetes/manifests
}
# Replaces the variables in the etcd manifest file with the real values, and then
# copy the file to the manifest dir
# $1: value for variable 'suffix'
# $2: value for variable 'port'
# $3: value for variable 'server_port'
# $4: value for variable 'cpulimit'
# $5: pod name, which should be either etcd or etcd-events
function prepare-etcd-manifest {
local host_name=$(hostname -s)
local etcd_cluster=""
local cluster_state="new"
local etcd_protocol="http"
local etcd_creds=""
if [[ -n "${ETCD_CA_KEY:-}" && -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
etcd_creds=" --peer-trusted-ca-file /etc/srv/kubernetes/etcd-ca.crt --peer-cert-file /etc/srv/kubernetes/etcd-peer.crt --peer-key-file /etc/srv/kubernetes/etcd-peer.key -peer-client-cert-auth "
etcd_protocol="https"
fi
for host in $(echo "${INITIAL_ETCD_CLUSTER:-${host_name}}" | tr "," "\n"); do
etcd_host="etcd-${host}=${etcd_protocol}://${host}:$3"
if [[ -n "${etcd_cluster}" ]]; then
etcd_cluster+=","
cluster_state="existing"
fi
etcd_cluster+="${etcd_host}"
done
local -r temp_file="/tmp/$5"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd.manifest" "${temp_file}"
remove-salt-config-comments "${temp_file}"
sed -i -e "s@{{ *suffix *}}@$1@g" "${temp_file}"
sed -i -e "s@{{ *port *}}@$2@g" "${temp_file}"
sed -i -e "s@{{ *server_port *}}@$3@g" "${temp_file}"
sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}"
sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}"
sed -i -e "s@{{ *srv_kube_path *}}@/etc/srv/kubernetes@g" "${temp_file}"
sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}"
# Get default storage backend from manifest file.
local -r default_storage_backend=$(cat "${temp_file}" | \
grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \
sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g")
if [[ -n "${STORAGE_BACKEND:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
if [[ "${STORAGE_BACKEND:-${default_storage_backend}}" == "etcd3" ]]; then
sed -i -e "s@{{ *quota_bytes *}}@--quota-backend-bytes=4294967296@g" "${temp_file}"
else
sed -i -e "s@{{ *quota_bytes *}}@@g" "${temp_file}"
fi
sed -i -e "s@{{ *cluster_state *}}@$cluster_state@g" "${temp_file}"
if [[ -n "${ETCD_IMAGE:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@${ETCD_IMAGE}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
sed -i -e "s@{{ *etcd_protocol *}}@$etcd_protocol@g" "${temp_file}"
sed -i -e "s@{{ *etcd_creds *}}@$etcd_creds@g" "${temp_file}"
if [[ -n "${ETCD_VERSION:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@${ETCD_VERSION}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
# Replace the volume host path.
sed -i -e "s@/mnt/master-pd/var/etcd@/mnt/disks/master-pd/var/etcd@g" "${temp_file}"
mv "${temp_file}" /etc/kubernetes/manifests
}
function start-etcd-empty-dir-cleanup-pod {
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml" "/etc/kubernetes/manifests"
}
# Starts etcd server pod (and etcd-events pod if needed).
# More specifically, it prepares dirs and files, sets the variable value
# in the manifests, and copies them to /etc/kubernetes/manifests.
function start-etcd-servers {
echo "Start etcd pods"
if [[ -d /etc/etcd ]]; then
rm -rf /etc/etcd
fi
if [[ -e /etc/default/etcd ]]; then
rm -f /etc/default/etcd
fi
if [[ -e /etc/systemd/system/etcd.service ]]; then
rm -f /etc/systemd/system/etcd.service
fi
if [[ -e /etc/init.d/etcd ]]; then
rm -f /etc/init.d/etcd
fi
prepare-log-file /var/log/etcd.log
prepare-etcd-manifest "" "2379" "2380" "200m" "etcd.manifest"
prepare-log-file /var/log/etcd-events.log
prepare-etcd-manifest "-events" "4002" "2381" "100m" "etcd-events.manifest"
}
# Calculates the following variables based on env variables, which will be used
# by the manifests of several kube-master components.
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
function compute-master-manifest-variables {
CLOUD_CONFIG_OPT=""
CLOUD_CONFIG_VOLUME=""
CLOUD_CONFIG_MOUNT=""
if [[ -f /etc/gce.conf ]]; then
CLOUD_CONFIG_OPT="--cloud-config=/etc/gce.conf"
CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\"}},"
CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true},"
fi
DOCKER_REGISTRY="gcr.io/google_containers"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}"
fi
}
# A helper function for removing salt configuration and comments from a file.
# This is mainly for preparing a manifest file.
#
# $1: Full path of the file to manipulate
function remove-salt-config-comments {
# Remove salt configuration.
sed -i "/^[ |\t]*{[#|%]/d" $1
# Remove comments.
sed -i "/^[ |\t]*#/d" $1
}
# Starts kubernetes apiserver.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
function start-kube-apiserver {
echo "Start kubernetes api-server"
prepare-log-file /var/log/kube-apiserver.log
prepare-log-file /var/log/kube-apiserver-audit.log
# Calculate variables and assemble the command line.
local params="${API_SERVER_TEST_LOG_LEVEL:-"--v=2"} ${APISERVER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
params+=" --address=127.0.0.1"
params+=" --allow-privileged=true"
params+=" --cloud-provider=gce"
params+=" --client-ca-file=/etc/srv/kubernetes/ca.crt"
params+=" --etcd-servers=http://127.0.0.1:2379"
params+=" --etcd-servers-overrides=/events#http://127.0.0.1:4002"
params+=" --secure-port=443"
params+=" --tls-cert-file=/etc/srv/kubernetes/server.cert"
params+=" --tls-private-key-file=/etc/srv/kubernetes/server.key"
params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv"
if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then
params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv"
fi
if [[ -n "${STORAGE_BACKEND:-}" ]]; then
params+=" --storage-backend=${STORAGE_BACKEND}"
fi
if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]]; then
params+=" --storage-media-type=${STORAGE_MEDIA_TYPE}"
fi
if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
fi
if [[ -n "${NUM_NODES:-}" ]]; then
# If the cluster is large, increase max-requests-inflight limit in apiserver.
if [[ "${NUM_NODES}" -ge 1000 ]]; then
params+=" --max-requests-inflight=1500 --max-mutating-requests-inflight=500"
fi
# Set amount of memory available for apiserver based on number of nodes.
# TODO: Once we start setting proper requests and limits for apiserver
# we should reuse the same logic here instead of current heuristic.
params+=" --target-ram-mb=$((${NUM_NODES} * 60))"
fi
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
fi
if [[ -n "${ETCD_QUORUM_READ:-}" ]]; then
params+=" --etcd-quorum-read=${ETCD_QUORUM_READ}"
fi
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
# We currently only support enabling with a fixed path and with built-in log
# rotation "disabled" (large value) so it behaves like kube-apiserver.log.
# External log rotation should be set up the same as for kube-apiserver.log.
params+=" --audit-log-path=/var/log/kube-apiserver-audit.log"
params+=" --audit-log-maxage=0"
params+=" --audit-log-maxbackup=0"
# Lumberjack doesn't offer any way to disable size-based rotation. It also
# has an in-memory counter that doesn't notice if you truncate the file.
# 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
# grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
# never restarts. Please manually restart apiserver before this time.
params+=" --audit-log-maxsize=2000000000"
fi
if [[ "${ENABLE_APISERVER_LOGS_HANDLER:-}" == "false" ]]; then
params+=" --enable-logs-handler=false"
fi
local admission_controller_config_mount=""
local admission_controller_config_volume=""
local image_policy_webhook_config_mount=""
local image_policy_webhook_config_volume=""
if [[ -n "${ADMISSION_CONTROL:-}" ]]; then
params+=" --admission-control=${ADMISSION_CONTROL}"
if [[ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]]; then
params+=" --admission-control-config-file=/etc/admission_controller.config"
# Mount the file to configure admission controllers if ImagePolicyWebhook is set.
admission_controller_config_mount="{\"name\": \"admissioncontrollerconfigmount\",\"mountPath\": \"/etc/admission_controller.config\", \"readOnly\": false},"
admission_controller_config_volume="{\"name\": \"admissioncontrollerconfigmount\",\"hostPath\": {\"path\": \"/etc/admission_controller.config\"}},"
# Mount the file to configure the ImagePolicyWebhook's webhook.
image_policy_webhook_config_mount="{\"name\": \"imagepolicywebhookconfigmount\",\"mountPath\": \"/etc/gcp_image_review.config\", \"readOnly\": false},"
image_policy_webhook_config_volume="{\"name\": \"imagepolicywebhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_image_review.config\"}},"
fi
fi
if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]]; then
params+=" --min-request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT}"
fi
if [[ -n "${RUNTIME_CONFIG:-}" ]]; then
params+=" --runtime-config=${RUNTIME_CONFIG}"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then
local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")
params+=" --advertise-address=${vm_external_ip}"
params+=" --ssh-user=${PROXY_SSH_USER}"
params+=" --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile"
elif [ -n "${MASTER_ADVERTISE_ADDRESS:-}" ]; then
params="${params} --advertise-address=${MASTER_ADVERTISE_ADDRESS}"
fi
local webhook_authn_config_mount=""
local webhook_authn_config_volume=""
if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
params+=" --authentication-token-webhook-config-file=/etc/gcp_authn.config"
webhook_authn_config_mount="{\"name\": \"webhookauthnconfigmount\",\"mountPath\": \"/etc/gcp_authn.config\", \"readOnly\": false},"
webhook_authn_config_volume="{\"name\": \"webhookauthnconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authn.config\"}},"
fi
local authorization_mode="RBAC"
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
# Enable ABAC mode unless the user explicitly opts out with ENABLE_LEGACY_ABAC=false
if [[ "${ENABLE_LEGACY_ABAC:-}" != "false" ]]; then
echo "Warning: Enabling legacy ABAC policy. All service accounts will have superuser API access. Set ENABLE_LEGACY_ABAC=false to disable this."
# Create the ABAC file if it doesn't exist yet, or if we have a KUBE_USER set (to ensure the right user is given permissions)
if [[ -n "${KUBE_USER:-}" || ! -e /etc/srv/kubernetes/abac-authz-policy.jsonl ]]; then
local -r abac_policy_json="${src_dir}/abac-authz-policy.jsonl"
remove-salt-config-comments "${abac_policy_json}"
if [[ -n "${KUBE_USER:-}" ]]; then
sed -i -e "s/{{kube_user}}/${KUBE_USER}/g" "${abac_policy_json}"
else
sed -i -e "/{{kube_user}}/d" "${abac_policy_json}"
fi
cp "${abac_policy_json}" /etc/srv/kubernetes/
fi
params+=" --authorization-policy-file=/etc/srv/kubernetes/abac-authz-policy.jsonl"
authorization_mode+=",ABAC"
fi
local webhook_config_mount=""
local webhook_config_volume=""
if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
authorization_mode+=",Webhook"
params+=" --authorization-webhook-config-file=/etc/gcp_authz.config"
webhook_config_mount="{\"name\": \"webhookconfigmount\",\"mountPath\": \"/etc/gcp_authz.config\", \"readOnly\": false},"
webhook_config_volume="{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authz.config\"}},"
fi
params+=" --authorization-mode=${authorization_mode}"
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}],"
fi
src_file="${src_dir}/kube-apiserver.manifest"
remove-salt-config-comments "${src_file}"
# Evaluate variables.
local -r kube_apiserver_docker_tag=$(cat /opt/kubernetes/kube-docker-files/kube-apiserver.docker_tag)
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{srv_sshproxy_path}}@/etc/srv/sshproxy@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-apiserver_docker_tag'\]}}@${kube_apiserver_docker_tag}@g" "${src_file}"
sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
sed -i -e "s@{{secure_port}}@443@g" "${src_file}"
sed -i -e "s@{{secure_port}}@8080@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
sed -i -e "s@{{webhook_authn_config_mount}}@${webhook_authn_config_mount}@g" "${src_file}"
sed -i -e "s@{{webhook_authn_config_volume}}@${webhook_authn_config_volume}@g" "${src_file}"
sed -i -e "s@{{webhook_config_mount}}@${webhook_config_mount}@g" "${src_file}"
sed -i -e "s@{{webhook_config_volume}}@${webhook_config_volume}@g" "${src_file}"
sed -i -e "s@{{admission_controller_config_mount}}@${admission_controller_config_mount}@g" "${src_file}"
sed -i -e "s@{{admission_controller_config_volume}}@${admission_controller_config_volume}@g" "${src_file}"
sed -i -e "s@{{image_policy_webhook_config_mount}}@${image_policy_webhook_config_mount}@g" "${src_file}"
sed -i -e "s@{{image_policy_webhook_config_volume}}@${image_policy_webhook_config_volume}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts kubernetes controller manager.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
function start-kube-controller-manager {
echo "Start kubernetes controller-manager"
create-kubecontrollermanager-kubeconfig
prepare-log-file /var/log/kube-controller-manager.log
# Calculate variables and assemble the command line.
local params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"} ${CONTROLLER_MANAGER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
params+=" --use-service-account-credentials"
params+=" --cloud-provider=gce"
params+=" --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig"
params+=" --root-ca-file=/etc/srv/kubernetes/ca.crt"
params+=" --service-account-private-key-file=/etc/srv/kubernetes/server.key"
if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
fi
if [[ -n "${INSTANCE_PREFIX:-}" ]]; then
params+=" --cluster-name=${INSTANCE_PREFIX}"
fi
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
params+=" --cluster-cidr=${CLUSTER_IP_RANGE}"
fi
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
fi
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then
params+=" --allocate-node-cidrs=true"
elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then
params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
fi
if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then
params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}"
fi
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
params+=" --cidr-allocator-type=CloudAllocator"
params+=" --configure-cloud-routes=false"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
local -r kube_rc_docker_tag=$(cat /opt/kubernetes/kube-docker-files/kube-controller-manager.docker_tag)
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}],"
fi
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
remove-salt-config-comments "${src_file}"
# Evaluate variables.
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts kubernetes scheduler.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in compute-master-manifest-variables)
# DOCKER_REGISTRY
function start-kube-scheduler {
echo "Start kubernetes scheduler"
create-kubescheduler-kubeconfig
prepare-log-file /var/log/kube-scheduler.log
# Calculate variables and set them in the manifest.
params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}"
params+=" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig"
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]]; then
params+=" --algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}"
fi
local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")
# Remove salt comments and replace variables with values.
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
remove-salt-config-comments "${src_file}"
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts cluster autoscaler.
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
function start-cluster-autoscaler {
if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
echo "Start kubernetes cluster autoscaler"
prepare-log-file /var/log/cluster-autoscaler.log
# Remove salt comments and replace variables with values
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
remove-salt-config-comments "${src_file}"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{%.*%}@@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
fi
}
# A helper function for copying addon manifests and set dir/files
# permissions.
#
# $1: addon category under /etc/kubernetes
# $2: manifest source dir
function setup-addon-manifests {
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/$2"
local -r dst_dir="/etc/kubernetes/$1/$2"
if [[ ! -d "${dst_dir}" ]]; then
mkdir -p "${dst_dir}"
fi
local files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.yaml "${dst_dir}"
fi
files=$(find "${src_dir}" -maxdepth 1 -name "*.json")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.json "${dst_dir}"
fi
files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml.in")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.yaml.in "${dst_dir}"
fi
chown -R root:root "${dst_dir}"
chmod 755 "${dst_dir}"
chmod 644 "${dst_dir}"/*
}
# Prepares the manifests of k8s addons, and starts the addon manager.
function start-kube-addons {
echo "Prepare kube-addons manifests and start kube addon manager"
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
local -r dst_dir="/etc/kubernetes/addons"
# prep the additional bindings that are particular to e2e users and groups
setup-addon-manifests "addons" "e2e-rbac-bindings"
# Set up manifests of other addons.
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "google" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "standalone" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]]; then
local -r file_dir="cluster-monitoring/${ENABLE_CLUSTER_MONITORING}"
setup-addon-manifests "addons" "${file_dir}"
# Replace the salt configurations with variable values.
base_metrics_memory="140Mi"
base_eventer_memory="190Mi"
base_metrics_cpu="80m"
nanny_memory="90Mi"
local -r metrics_memory_per_node="4"
local -r metrics_cpu_per_node="0.5"
local -r eventer_memory_per_node="500"
local -r nanny_memory_per_node="200"
if [[ -n "${NUM_NODES:-}" && "${NUM_NODES}" -ge 1 ]]; then
num_kube_nodes="$((${NUM_NODES}+1))"
nanny_memory="$((${num_kube_nodes} * ${nanny_memory_per_node} + 90 * 1024))Ki"
fi
controller_yaml="${dst_dir}/${file_dir}"
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]]; then
controller_yaml="${controller_yaml}/heapster-controller-combined.yaml"
else
controller_yaml="${controller_yaml}/heapster-controller.yaml"
fi
remove-salt-config-comments "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_memory_per_node *}}@${metrics_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *eventer_memory_per_node *}}@${eventer_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" "${controller_yaml}"
fi
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns"
local -r dns_controller_file="${dst_dir}/dns/kubedns-controller.yaml"
local -r dns_svc_file="${dst_dir}/dns/kubedns-svc.yaml"
mv "${dst_dir}/dns/kubedns-controller.yaml.in" "${dns_controller_file}"
mv "${dst_dir}/dns/kubedns-svc.yaml.in" "${dns_svc_file}"
# Replace the salt configurations with variable values.
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${dns_controller_file}"
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${dns_svc_file}"
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns-horizontal-autoscaler"
fi
fi
if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then
setup-addon-manifests "addons" "registry"
local -r registry_pv_file="${dst_dir}/registry/registry-pv.yaml"
local -r registry_pvc_file="${dst_dir}/registry/registry-pvc.yaml"
mv "${dst_dir}/registry/registry-pv.yaml.in" "${registry_pv_file}"
mv "${dst_dir}/registry/registry-pvc.yaml.in" "${registry_pvc_file}"
# Replace the salt configurations with variable values.
remove-salt-config-comments "${controller_yaml}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pv_file}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pvc_file}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_name'\] *}}@${CLUSTER_REGISTRY_DISK}@g" "${registry_pvc_file}"
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]] && \
[[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]]; then
setup-addon-manifests "addons" "fluentd-elasticsearch"
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
setup-addon-manifests "addons" "fluentd-gcp"
fi
if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]]; then
setup-addon-manifests "addons" "dashboard"
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "daemonset" ]]; then
setup-addon-manifests "addons" "node-problem-detector"
fi
if echo "${ADMISSION_CONTROL:-}" | grep -q "LimitRanger"; then
setup-addon-manifests "admission-controls" "limit-range"
fi
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
setup-addon-manifests "addons" "calico-policy-controller"
# Replace the cluster cidr.
local -r calico_file="${dst_dir}/calico-policy-controller/calico-node.yaml"
sed -i -e "s@__CLUSTER_CIDR__@${CLUSTER_IP_RANGE}@g" "${calico_file}"
fi
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"
fi
# Place addon manager pod manifest.
cp "${src_dir}/kube-addon-manager.yaml" /etc/kubernetes/manifests
}
# Starts an image-puller - used in test clusters.
function start-image-puller {
echo "Start image-puller"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/e2e-image-puller.manifest" \
/etc/kubernetes/manifests/
}
# Starts kube-registry proxy
function start-kube-registry-proxy {
echo "Start kube-registry-proxy"
cp "${KUBE_HOME}/kube-manifests/kubernetes/kube-registry-proxy.yaml" /etc/kubernetes/manifests
}
# Starts a l7 loadbalancing controller for ingress.
function start-lb-controller {
if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then
echo "Start GCE L7 pod"
prepare-log-file /var/log/glbc.log
setup-addon-manifests "addons" "cluster-loadbalancing/glbc"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/glbc.manifest" \
/etc/kubernetes/manifests/
fi
}
# Starts rescheduler.
function start-rescheduler {
if [[ "${ENABLE_RESCHEDULER:-}" == "true" ]]; then
echo "Start Rescheduler"
prepare-log-file /var/log/rescheduler.log
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/rescheduler.manifest" \
/etc/kubernetes/manifests/
fi
}
# Install and setup rkt
# TODO(euank): There should be a toggle to use the distro-provided rkt binary
# Sets the following variables:
# RKT_BIN: the path to the rkt binary
function setup-rkt {
local rkt_bin="${KUBE_HOME}/bin/rkt"
if [[ -x "${rkt_bin}" ]]; then
# idempotency, skip downloading this time
# TODO(euank): this might get in the way of updates, but 'file busy'
# because of rkt-api would too
RKT_BIN="${rkt_bin}"
return
fi
mkdir -p /etc/rkt "${KUBE_HOME}/download/"
local rkt_tar="${KUBE_HOME}/download/rkt.tar.gz"
local rkt_tmpdir=$(mktemp -d "${KUBE_HOME}/rkt_download.XXXXX")
curl --retry 5 --retry-delay 3 --fail --silent --show-error \
--location --create-dirs --output "${rkt_tar}" \
https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz
tar --strip-components=1 -xf "${rkt_tar}" -C "${rkt_tmpdir}" --overwrite
mv "${rkt_tmpdir}/rkt" "${rkt_bin}"
if [[ ! -x "${rkt_bin}" ]]; then
echo "Could not download requested rkt binary"
exit 1
fi
RKT_BIN="${rkt_bin}"
# Cache rkt stage1 images for speed
"${RKT_BIN}" fetch --insecure-options=image "${rkt_tmpdir}"/*.aci
rm -rf "${rkt_tmpdir}"
cat > /etc/systemd/system/rkt-api.service <<EOF
[Unit]
Description=rkt api service
Documentation=http://github.com/coreos/rkt
After=network.target
[Service]
ExecStart=${RKT_BIN} api-service --listen=127.0.0.1:15441
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl enable rkt-api.service
systemctl start rkt-api.service
}
# Install docker2aci, needed to load server images if using rkt runtime
# This should be removed once rkt can fetch on-disk docker tarballs directly
# Sets the following variables:
# DOCKER2ACI_BIN: the path to the docker2aci binary
function install-docker2aci {
local tar_path="${KUBE_HOME}/download/docker2aci.tar.gz"
local tmp_path="${KUBE_HOME}/docker2aci"
mkdir -p "${KUBE_HOME}/download/" "${tmp_path}"
curl --retry 5 --retry-delay 3 --fail --silent --show-error \
--location --create-dirs --output "${tar_path}" \
https://github.com/appc/docker2aci/releases/download/v0.14.0/docker2aci-v0.14.0.tar.gz
tar --strip-components=1 -xf "${tar_path}" -C "${tmp_path}" --overwrite
DOCKER2ACI_BIN="${KUBE_HOME}/bin/docker2aci"
mv "${tmp_path}/docker2aci" "${DOCKER2ACI_BIN}"
}
########### Main Function ###########
echo "Start to configure instance for kubernetes"
# Note: this name doesn't make as much sense here as in gci where it's actually
# /home/kubernetes, but for ease of diff-ing, retain the same variable name
KUBE_HOME="/opt/kubernetes"
if [[ ! -e "${KUBE_HOME}/kube-env" ]]; then
echo "The ${KUBE_HOME}/kube-env file does not exist!! Terminate cluster initialization."
exit 1
fi
source "${KUBE_HOME}/kube-env"
if [[ -n "${KUBE_USER:-}" ]]; then
if ! [[ "${KUBE_USER}" =~ ^[-._@a-zA-Z0-9]+$ ]]; then
echo "Bad KUBE_USER format."
exit 1
fi
fi
# generate the controller manager and scheduler tokens here since they are only used on the master.
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
# KUBERNETES_CONTAINER_RUNTIME is set by the `kube-env` file, but it's a bit of a mouthful
if [[ "${CONTAINER_RUNTIME:-}" == "" ]]; then
CONTAINER_RUNTIME="${KUBERNETES_CONTAINER_RUNTIME:-docker}"
fi
create-dirs
ensure-local-ssds
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
mount-master-pd
create-master-auth
create-master-kubelet-auth
create-master-etcd-auth
else
create-kubelet-kubeconfig
create-kubeproxy-kubeconfig
fi
if [[ "${CONTAINER_RUNTIME:-}" == "rkt" ]]; then
systemctl stop docker
systemctl disable docker
setup-rkt
install-docker2aci
create-kube-controller-manager-dirs
else
configure-docker-daemon
fi
load-docker-images
start-kubelet
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
compute-master-manifest-variables
start-etcd-servers
start-etcd-empty-dir-cleanup-pod
start-kube-apiserver
start-kube-controller-manager
start-kube-scheduler
start-kube-addons
start-cluster-autoscaler
start-lb-controller
start-rescheduler
else
start-kube-proxy
# Kube-registry-proxy.
if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then
start-kube-registry-proxy
fi
if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then
start-image-puller
fi
fi
echo "Done for the configuration for kubernetes"
|
<gh_stars>1-10
package org.opentaps.base.constants;
/*
* Copyright (c) Open Source Strategies, Inc.
*
* Opentaps is free software: you can redistribute it and/or modify it
* under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Opentaps is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Opentaps. If not, see <http://www.gnu.org/licenses/>.
*/
// DO NOT EDIT THIS FILE! THIS IS AUTO GENERATED AND WILL GET WRITTEN OVER PERIODICALLY WHEN THE DATA CHANGE
/**
* StatusType constant values.
*/
public final class StatusTypeConstants {
private StatusTypeConstants() { }
/** Acctg Entry Reconcile. */
public static final String ACCTG_ENREC_STATUS = "ACCTG_ENREC_STATUS";
/** Agreement. */
public static final String AGREEMENT = "AGREEMENT";
/** Amazon Order Document. */
public static final String AMZN_DOC = "AMZN_DOC";
/** Amazon Order. */
public static final String AMZN_ORDER = "AMZN_ORDER";
/** Amazon Product Statuses. */
public static final String AMZN_PRODUCT = "AMZN_PRODUCT";
/** Amazon Order. */
public static final String AMZN_SHIP = "AMZN_SHIP";
/** Budget. */
public static final String BUDGET_STATUS = "BUDGET_STATUS";
public static final class CalendarStatus {
private CalendarStatus() { }
/** Event. */
public static final String EVENT_STATUS = "EVENT_STATUS";
/** Task. */
public static final String TASK_STATUS = "TASK_STATUS";
}
/** Case. */
public static final String CASE_STATUS = "CASE_STATUS";
/** Content Approval Status. */
public static final String CNTNTAPPR_STATUS = "CNTNTAPPR_STATUS";
public static final class ComEventStatus {
private ComEventStatus() { }
/** Communication Event Role. */
public static final String COM_EVENT_ROL_STATUS = "COM_EVENT_ROL_STATUS";
/** Communication Event. */
public static final String COM_EVENT_STATUS = "COM_EVENT_STATUS";
}
/** Contact List Party. */
public static final String CONTACTLST_PARTY = "CONTACTLST_PARTY";
/** Content. */
public static final String CONTENT_STATUS = "CONTENT_STATUS";
/** Custom Request Status. */
public static final String CUSTREQ_STTS = "CUSTREQ_STTS";
/** Data Import Status. */
public static final String DATAIMPORT = "DATAIMPORT";
/** Employee Position Status. */
public static final String EMPL_POSITION_STATUS = "EMPL_POSITION_STATUS";
/** Employment Application. */
public static final String EMPLOYMENT_APP_STTS = "EMPLOYMENT_APP_STTS";
/** Entity Sync Run. */
public static final String ENTSYNC_RUN = "ENTSYNC_RUN";
/** Example. */
public static final String EXAMPLE_STATUS = "EXAMPLE_STATUS";
/** Fixed Asset Assignment Status. */
public static final String FA_ASGN_STATUS = "FA_ASGN_STATUS";
/** Financial Account Status. */
public static final String FINACCT_STATUS = "FINACCT_STATUS";
/** Fixed Asset Maintenance. */
public static final String FIXEDAST_MNT_STATUS = "FIXEDAST_MNT_STATUS";
/** Degree status. */
public static final String HR_DEGREE_STATUS = "HR_DEGREE_STATUS";
/** Job status. */
public static final String HR_JOB_STATUS = "HR_JOB_STATUS";
public static final class InventoryItemStts {
private InventoryItemStts() { }
/** Inventory Item. */
public static final String INVENTORY_ITEM_STTS = "INVENTORY_ITEM_STTS";
/** Non-Serialized Inventory Item. */
public static final String INV_NON_SER_STTS = "INV_NON_SER_STTS";
/** Serialized Inventory Item. */
public static final String INV_SERIALIZED_STTS = "INV_SERIALIZED_STTS";
}
/** Inventory Transfer. */
public static final String INVENTORY_XFER_STTS = "INVENTORY_XFER_STTS";
/** Invoice Processing Status. */
public static final String INVOICE_PROCESS_STTS = "INVOICE_PROCESS_STTS";
/** Invoice Status. */
public static final String INVOICE_STATUS = "INVOICE_STATUS";
/** Marketing Campaign. */
public static final String MKTG_CAMP_STATUS = "MKTG_CAMP_STATUS";
/** Not Applicable. */
public static final String _NA_ = "_NA_";
/** OAGIS Message Processing Status. */
public static final String OAGIS_MP_STATUS = "OAGIS_MP_STATUS";
/** Order Item Ship Group status. */
public static final String OISG_STATUS = "OISG_STATUS";
/** Order Delivery Schedule. */
public static final String ORDER_DEL_SCH = "ORDER_DEL_SCH";
/** Order Return Status For Customer Returns. */
public static final String ORDER_RETURN_STTS = "ORDER_RETURN_STTS";
public static final class OrderStatus {
private OrderStatus() { }
/** Order Item. */
public static final String ORDER_ITEM_STATUS = "ORDER_ITEM_STATUS";
/** Order. */
public static final String ORDER_STATUS = "ORDER_STATUS";
}
/** Party Asset. */
public static final String PARTY_ASSET_STATUS = "PARTY_ASSET_STATUS";
/** Party Invitation. */
public static final String PARTY_INV_STATUS = "PARTY_INV_STATUS";
/** Party Relationship. */
public static final String PARTY_REL_STATUS = "PARTY_REL_STATUS";
public static final class PartyStatus {
private PartyStatus() { }
/** Lead Status. */
public static final String LEAD_STATUS = "LEAD_STATUS";
/** Status codes for parties which are leads (role = PROSPECT). */
public static final String PARTY_LEAD_STATUS = "PARTY_LEAD_STATUS";
/** Party status. */
public static final String PARTY_STATUS = "PARTY_STATUS";
}
/** PartyQual verification status. */
public static final String PARTYQUAL_VERIFY = "PARTYQUAL_VERIFY";
/** Payment Preference. */
public static final String PAYMENT_PREF_STATUS = "PAYMENT_PREF_STATUS";
/** Picklist Item. */
public static final String PICKITEM_STATUS = "PICKITEM_STATUS";
/** Picklist. */
public static final String PICKLIST_STATUS = "PICKLIST_STATUS";
/** Payment Status. */
public static final String PMNT_STATUS = "PMNT_STATUS";
/** Order Return Status For Supplier Returns. */
public static final String PORDER_RETURN_STTS = "PORDER_RETURN_STTS";
/** Pos Transaction. */
public static final String POSTX_STATUS = "POSTX_STATUS";
/** Product Review. */
public static final String PRODUCT_REVIEW_STTS = "PRODUCT_REVIEW_STTS";
public static final class Project {
private Project() { }
/** Project. */
public static final String PROJECT = "PROJECT";
/** Project Assignment. */
public static final String PROJECT_ASSGN_STATUS = "PROJECT_ASSGN_STATUS";
/** Project status. */
public static final String PROJECT_STATUS = "PROJECT_STATUS";
/** Project Task. */
public static final String PROJECT_TASK_STATUS = "PROJECT_TASK_STATUS";
}
/** Purchase Shipment. */
public static final String PURCH_SHIP_STATUS = "PURCH_SHIP_STATUS";
/** Quote Status. */
public static final String QUOTE_STATUS = "QUOTE_STATUS";
/** Requirement Status. */
public static final String REQUIREMENT_STATUS = "REQUIREMENT_STATUS";
/** Server Hit. */
public static final String SERVER_HIT_STATUS = "SERVER_HIT_STATUS";
/** Scheduled Service. */
public static final String SERVICE_STATUS = "SERVICE_STATUS";
/** Shipment. */
public static final String SHIPMENT_STATUS = "SHIPMENT_STATUS";
/** ShipmentRouteSegment:CarrierService. */
public static final String SHPRTSG_CS_STATUS = "SHPRTSG_CS_STATUS";
/** Synchronize. */
public static final String SYNCHRONIZE_STATUS = "SYNCHRONIZE_STATUS";
/** Timesheet. */
public static final String TIMESHEET_STATUS = "TIMESHEET_STATUS";
/** Unemployment Claim. */
public static final String UNEMPL_CLAIM_STATUS = "UNEMPL_CLAIM_STATUS";
/** Web Content. */
public static final String WEB_CONTENT_STATUS = "WEB_CONTENT_STATUS";
/** Work Effort Fixed Asset Availability. */
public static final String WEFA_AVAILABILITY = "WEFA_AVAILABILITY";
/** WorkEffort Review. */
public static final String WEFF_REVIEW_STTS = "WEFF_REVIEW_STTS";
/** Work Effort Good Standard Status. */
public static final String WEFG_STATUS = "WEFG_STATUS";
/** Work Effort Party Availability. */
public static final String WEPA_AVAILABILITY = "WEPA_AVAILABILITY";
/** WorkEffort Asset. */
public static final String WORK_EFF_ASSET_STTS = "WORK_EFF_ASSET_STTS";
/** WorkEffort Assignment. */
public static final String WORK_EFFORT_ASSIGN = "WORK_EFFORT_ASSIGN";
public static final class WorkEffortStatus {
private WorkEffortStatus() { }
/** Calendar. */
public static final String CALENDAR_STATUS = "CALENDAR_STATUS";
/** Production Run Status. */
public static final String PRODUCTION_RUN = "PRODUCTION_RUN";
/** Party Assignment Status. */
public static final String PRTYASGN_STATUS = "PRTYASGN_STATUS";
/** Manufacturing Task and Routing status. */
public static final String ROUTING_STATUS = "ROUTING_STATUS";
/** Project. */
public static final String WE_PROJECT_STATUS = "WE_PROJECT_STATUS";
/** Workeffort. */
public static final String WORK_EFFORT_STATUS = "WORK_EFFORT_STATUS";
}
}
|
export class Locations {
constructor(
public id: number,
// public location?: string
public city?: string,
public state?: string
)
{}} |
<gh_stars>1-10
angular.module('ngTimeBlocks', [])
/**
* TimeBlocks directive
*/
.directive('visTimeblocks', function () {
'use strict';
return {
restrict: 'EA',
transclude: false,
scope: {
data: '=',
options: '=',
events: '='
},
link: function (scope, element, attr) {
var timeblocksEvents = [
'rangechange',
'rangechanged',
'timechange',
'timechanged',
'select',
'doubleClick',
'click',
'contextmenu',
'beforeRedraw',
'afterRedraw'
];
// Declare the timeblocks visualization
var timeblocks = new TimeBlocks(element[0], scope.data, scope.options || {});
scope.$watchCollection('data', function () {
timeblocks.setData(scope.data);
});
scope.$watchCollection('options', function (options) {
timeblocks.setOptions(options);
});
// Attach an event handlers
timeblocksEvents.forEach(function (event) {
timeblocks.on(event, function callback () {
var args = [];
for (var i = 0; i < arguments.length; i++) {
args[i] = arguments[i];
}
if (scope.events && scope.events[event]) {
scope.events[event].apply(null, args);
}
});
});
if (scope.events && scope.events.onload) {
scope.events.onload(timeblocks);
}
// TODO: implement select event
if (scope.events && scope.events.select) {
throw new Error('Select event is not yet implemented...')
}
}
};
});
|
#!/usr/bin/env bash
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v /etc:/etc spotify/docker-gc
docker volume rm $(docker volume ls -qf dangling=true)
|
# Import necessary libraries
import numpy as np
import scipy
import matplotlib.pyplot as plt
import glob, os, sys, time
from read_Lbl import lbl_Parse
from read_Aux import aux_Parse
from read_Anc import anc_Parse
from read_Chirp import open_Chirp
from plotting import rgram
from read_EDR import EDR_Parse, sci_Decompress
def main(EDRName, auxName, lblName, chirp = 'synth', presumFac = None, beta = 0):
"""
-----------
This python script is used to pulse compress raw SHARAD EDRs to return chirp compressed science record. Output should be complex voltage.
This code was adapted from <NAME>'s FrankenRDR work, along with <NAME>'s sharad-tools. Certain packages were directly updated from their work (ie. FrankenRDR-readLBL, readAnc, readAux).
This code simply aims to pulse compress the raw data, without performing any other processing steps.
github: b-tober
Updated by: <NAME>
Last Updated: 09Jan18
-----------
Example call:
EDRName = '/media/anomalocaris/Swaps/Google_Drive/MARS/orig/edr_test/e_5050702_001_ss19_700_a_s.dat'
auxName = '/media/anomalocaris/Swaps/Google_Drive/MARS/orig/edr_test/e_5050702_001_ss19_700_a_a.dat'
lblName = '/media/anomalocaris/Swaps/Google_Drive/MARS/orig/edr_test/e_5050702_001_ss19_700_a.lbl'
chirp = 'calib'
presumFac = 8
beta = 0
main(EDRName, auxName, lblName, chirp = chirp, presumFac = presumFac)
"""
t0 = time.time() #start time
print('--------------------------------')
print(runName)
print('--------------------------------')
# extract relecant information from lbl file
print('Reading label file...')
lblDic = lbl_Parse(lblName)
records = lblDic['FILE_RECORDS'] # number of records in observation (traces)
instrPresum = lblDic['INSTR_MODE_ID']['Presum'] # onboard presums
instrMode = lblDic['INSTR_MODE_ID']['Mode']
BitsPerSample = lblDic['INSTR_MODE_ID']['BitsPerSample']
# toggle on to downsize for testing purposes
records = int(records / 10)
# presumming is just for visualization purposes
presumCols = int(np.ceil(records/presumFac))
# parse aux file into data frame
auxDF = aux_Parse(auxName)
# determine Bits per Sample
if BitsPerSample == 4:
recLen = 1986
elif BitsPerSample == 6:
recLen = 2886
elif BitsPerSample == 8:
recLen = 3786
print('InstrPresum:\t' + format(instrPresum))
print('Instrument Mode:\t' + format(instrMode))
print('Bits Per Sample:\t' + format(BitsPerSample))
print('Record Length:\t' + format(recLen))
print('Number of Records:\t' + format(records))
print('Using Kaiser window of beta value:\t' + format(beta))
print('---- Begin Processing ----')
# determine TX and RX temps if using Italian reference chirp
txTemp = auxDF['TX_TEMP'][:]
rxTemp = auxDF['RX_TEMP'][:]
# read in reference chirp as matched filter - this should be imported in Fourier frequency domain, as complex conjugate
if chirp == 'calib':
refChirpMF, refChirpMF_index = open_Chirp(chirp, txTemp, rxTemp)
else:
refChirpMF = open_Chirp(chirp, txTemp, rxTemp)
print('Reference chirp opened, type:\t' + format(chirp))
# read in raw science data and ancil data
sci, ancil = EDR_Parse(EDRName, records, recLen, BitsPerSample)
print('EDR Science Data Parsed')
# parse ancilliary data
ancil = anc_Parse(ancil, records)
print('Ancilliary Data Parsed')
# decompress science data
sci = sci_Decompress(sci, lblDic['COMPRESSION'], instrPresum, BitsPerSample, ancil['SDI_BIT_FIELD'][:])
print('EDR Science Data Decompressed')
# all data imported and decompressed
# set up empty data arrays to hold Output and kaiser window of specified beta value
if chirp =='ideal' or chirp == 'synth' or chirp == 'UPB':
EDRData = np.zeros((3600,records), complex)
EDRData_presum = np.zeros((3600, presumCols), complex)
window = np.kaiser(3600, beta)
elif chirp == 'calib':
EDRData = np.zeros((2048,records), complex)
EDRData_presum = np.zeros((2048, presumCols), complex)
window = np.kaiser(2048,beta)
EDRData2 = np.zeros((4096,records), complex)
EDRData2_presum = np.zeros((4096, presumCols), complex)
window2 = np.pad(np.kaiser(2048,beta),(0,4096 - refChirpMF.shape[1]),'constant')
EDRData3 = np.zeros((4096,records), complex)
EDRData3_presum = np.zeros((4096, presumCols), complex)
window3 = np.pad(np.kaiser(2048,beta),(0,4096 - refChirpMF.shape[1]),'constant')
EDRData4 = np.zeros((4096,records), complex)
EDRData4_presum = np.zeros((4096, presumCols), complex)
window4 = np.pad(np.kaiser(2048,beta),(0,4096 - refChirpMF.shape[1]),'constant')
geomData = np.zeros((records,5))
#-------------------
# setup complete; begin range compression
#-------------------
if chirp =='calib':
fc = ((80./3.) - 20.)*1e6 # 6.66 MHz - fc defined by PDS documentation
dt = (3./80.)*1e-6 # 0.0375 Microseconds
t = np.arange(0*dt, 4096*dt, dt)
phase_shift = np.exp(2*np.pi*1j*fc*t) # shift spectrum when multiplied by zero padded raw data
refChirpMF_pad = np.pad(refChirpMF,[(0,0),(0,2049 - refChirpMF.shape[1])], 'constant') # zeros pad reference chirp to length 2049 prior to range compression to account for missing sample in fourier spectra
refChirpMF_padx = np.pad(refChirpMF,[(0,0),(0,4096 - refChirpMF.shape[1])], 'constant') # zeros pad reference chirp to length 4096 prior to range compression to account for missing sample in fourier spectra
sciPad = np.pad(sci,[(0,4096 - sci.shape[0]),(0,0)],'constant') # zero-pad science data to length of 4096
for _i in range(records):
#-------------------
# PDS documented range compression steps -EDRData
#-------------------
sciShift = sciPad[:,_i] * phase_shift
sciFFT = np.fft.fft(sciShift) #/ len(sciShift) # Matt has his code set up to scale by length array
# take central 2048 samples
st = 1024
en = 3072
sciFFT_cut = sciFFT[st:en]
# perform chirp compression
dechirpData = (sciFFT_cut * refChirpMF[refChirpMF_index[_i],:]) #* window
# Inverse Fourier transfrom and fix scaling
EDRData[:,_i] = np.fft.ifft(dechirpData) #* len(dechirpData)
#-------------------
# revised PDS method - EDRData2
#-------------------
sciShift = sciPad[:,_i] * phase_shift
sciFFT2 = np.fft.fft(sciShift) #/ len(sciShift2) # Matt has his code set up to scale by length array
# take central 2049 samples
st2 = 1024
en2 = 3073
sciFFT_cut2 = sciFFT2[st2:en2]
# perform chirp compression
dechirpData2 = (sciFFT_cut2 * refChirpMF_pad[refChirpMF_index[_i],:]) #* window2
dechirpData2 = np.pad(dechirpData2,(0,4096 - dechirpData2.shape[0]),'constant') # zero-pad output data to length of 409
# Inverse Fourier transfrom and fix scaling
EDRData2[:,_i] = np.fft.ifft(dechirpData2) #* len(dechirpData2)
#-------------------
# alternative method - EDRData3
#-------------------
sciFFT3 = np.fft.fft(sciPad[:,_i]) #/ len(sciShift3) # Matt has his code set up to scale by length array
# take the first 2049 samples
sciFFT3_cut = sciFFT3[:2049]
dechirpData3 = (sciFFT3_cut * refChirpMF_pad[refChirpMF_index[_i],:]) #* window3
dechirpData3 = np.pad(dechirpData3,(0,4096 - dechirpData3.shape[0]),'constant') # zero-pad output data to length of 4096
EDRData3[:,_i] = np.fft.ifft(dechirpData3) #* len(dechirpData3)
#-------------------
# another alternative method - EDRData4 - using reference chirp zero padded to 4096
#-------------------
sciFFT4 = np.fft.fft(sciPad[:,_i]) #/ len(sciShift3) # Matt has his code set up to scale by length array
dechirpData4 = (sciFFT4 * refChirpMF_padx[refChirpMF_index[_i],:]) #* window4
EDRData4[:,_i] = np.fft.ifft(dechirpData4)# * len(dechirpData4)
#truncate revised and alternate range compressed vector to 3600
EDRData2 = EDRData2[:3600,:]
EDRData3 = EDRData3[:3600,:]
EDRData4 = EDRData4[:3600,:]
print(EDRData.shape,EDRData2.shape,EDRData3.shape, EDRData4.shape)
else:
for _i in range(records):
# fourier transform of data
sciFFT = np.fft.fft(sci[:,_i])# / len(sci[:,_i])
# multiple Fourier transform of reference chip by that of the data
dechirpData = (sciFFT * refChirpMF) * window
# inverse fourier transform of dechirped data to place back in time domain
EDRData[:,_i] = np.fft.ifft(dechirpData)# * len(sci[:,_i])
print('Range compression complete')
# presum data by factor or eight for visualization purposes
# for _i in range(presumCols - 1):
# EDRData_presum[:,_i] = np.mean(EDRData[:,presumFac*_i:presumFac*(_i+1)], axis = 1)
# # account for traces left if number of traces is not divisible by presumFac
# EDRData_presum[:,-1] = np.mean(EDRData[:,presumFac*(_i+1):-1], axis = 1)
# print('Presumming complete')
# create geom array with relavant data for each record
for _i in range(records):
geomData[_i,0] = runName.split('_')[1] + runName.split('_')[2]
geomData[_i,1] = int(_i)
geomData[_i,2] = auxDF['SUB_SC_PLANETOCENTRIC_LATITUDE'][_i]
geomData[_i,3] = auxDF['SUB_SC_EAST_LONGITUDE'][_i]
geomData[_i,4] = auxDF['SOLAR_ZENITH_ANGLE'][_i]
# convert complex-valued voltage return to power values
BruceData = np.fromfile('../../../../../orig/supl/SHARAD/EDR/EDR_pc_bruce/592101000_1_Unif_SLC.raw', dtype = 'complex64')
BruceData = BruceData.reshape(3600, int(len(BruceData)/3600))
BruceAmp = np.abs(BruceData)
ampOut = np.abs(EDRData)
# plot outputs for different methods while comparing range compression options
if chirp =='calib':
ampOut2 = np.abs(EDRData2)
ampOut3 = np.abs(EDRData3)
ampOut4 = np.abs(EDRData4)
print(BruceAmp)
print(ampOut4)
plt.subplot(4,1,1)
plt.plot(ampOut[:,int(records/2)])
plt.title('original PDS method')
plt.subplot(4,1,2)
plt.plot(ampOut2[:,int(records/2)])
plt.title('revised PDS method')
plt.subplot(4,1,3)
plt.plot(ampOut3[:,int(records/2)])
plt.title('alternative method')
plt.subplot(4,1,4)
plt.plot(ampOut4[:,int(records/2)])
plt.title('alternate-alternative method')
plt.xlabel('sample')
plt.ylabel('amplitude')
plt.show()
else:
plt.subplot(2,1,1)
plt.plot(ampOut[:,int(records/2)])
plt.subplot(2,1,2)
plt.plot(BruceAmp[:,int(records/2)])
plt.show()
# sys.exit()
# create radargrams from presummed data to ../../orig/supl/SHARAD/EDR/EDR_pc_brucevisualize output, also save data
# rgram(EDRData3, data_path, runName + '_' + chirp, rel = True)
# np.savetxt(data_path + 'processed/data/geom/' + runName.split('_')[1] + '_' + runName.split('_')[2] + '_geom.csv', geomData, delimiter = ',', newline = '\n',fmt = '%s')
# np.save(data_path + 'processed/data/rgram/comp/' + runName.split('_')[1] + '_' + runName.split('_')[2] + '_' + windowName + '_SLC_comp.npy', EDRData)
# np.save(data_path + 'processed/data/rgram/' + runName.split('_')[1] + '_' + runName.split('_')[2] + '_' + windowName + '_SLC_amp.npy', ampOut)
t1 = time.time() # End time
print('--------------------------------')
print('Total Runtime: ' + str(round((t1 - t0),4)) + ' seconds')
print('--------------------------------')
return
if __name__ == '__main__':
data_path = '/MARS/orig/supl/SHARAD/EDR/edr_test/'
if os.getcwd().split('/')[1] == 'media':
data_path = '/media/anomalocaris/Swaps' + data_path
elif os.getcwd().split('/')[1] == 'mnt':
data_path = '/mnt/d' + data_path
else:
print('Data path not found')
sys.exit()
lbl_file = sys.argv[1]
lblName = data_path + lbl_file
runName = lbl_file.rstrip('_a.lbl')
auxName = data_path + runName + '_a_a.dat'
EDRName = data_path + runName + '_a_s.dat'
chirp = 'calib'
presumFac = 8 # presum factor for radargram visualization; actual data is not presummed
beta = 0 # beta value for kaiser window [0 = rectangular, 5 Similar to a Hamming, 6 Similar to a Hann, 8.6 Similar to a Blackman]
if beta == 0:
windowName = 'Unif'
elif beta == 5:
windowName = 'Hamming'
elif bea == 6:
windowName = 'Hann'
elif beta == 8.6:
windowName = 'Blackman'
else:
print('Unknown window type')
sys.exit()
#if (not os.path.isfile(data_path + 'processed/data/geom/' + runName + '_geom.csv')):
main(EDRName, auxName, lblName, chirp = chirp, presumFac = presumFac, beta = beta)
# for file in os.listdir(data_path):
# if file.endswith('.lbl'):
# lbl_file = file
# lblName = data_path + lbl_file
# runName = lbl_file.rstrip('_a.lbl')
# auxName = data_path + runName + '_a_a.dat'
# EDRName = data_path + runName + '_a_s.dat'
# main(EDRName, auxName, lblName, chirp = chirp, presumFac = presumFac)
#else :
# print('\n' + runName.split('_')[1] + runName.split('_')[2] + ' already processed!\n')
|
<filename>src/locales/arabic.js
module.exports = {
GLOBAL: {
EVERYONE: "Everyone",
NOT_SPECIFIED: "غير محدد",
PROVIDE_ARGS: "من فضلك ادخل المتغير",
ERROR: "حدث خطأ ما",
NAME: "الاسم",
NOT_AN_OPTION: "{option} ليس خيارا صحيح ",
SUCCESS: "تم",
REASON: "السبب",
URL: "URLعنوان ",
NONE: "بلا",
YES: "نعم",
NO: "لا",
},
GUILD: {
NOT_FOUND: "لم يتم العثور علي السرفر",
LEFT: "لقد غادرت **${guild_name}**",
LEVEL_UP_MESSAGES: "رسالة رفع المستوي",
ANNOUNCE_CHANNEL: "قناة الاخبار",
SUGGEST_CHANNEL: "قناة الاقتراحات",
WELCOME_CHANNEL: "قناة الترحيب",
LEAVE_CHANNEL: "قناة المغادرة",
PREFIX: "Prefix",
IS_VERIFIED: "نعم ، هذا السرفر تم تأكيده",
IS_PARTNERED: "نعم هذا السرفر شريك",
NOT_VERIFIED: "لا هذا السرفر لم يتم تأكيده",
NOT_PARTNERED: "لا هذا السرفر ليس شريك",
OWNER: "اونر السرفر",
CHANNEL_C: "عدد القنوات",
EMOJI_C: "عدد الايموجيات",
ROLES_C: "عدد الرولات",
MEMBER_C: "عدد الاعضاء",
REGION: "Region",
VERIFICATION: "مستوي الحماية",
MFA: "MFA Level",
BOOSTS: "البوست",
BOOST_LVL: "مستوي البوست",
VERIFIED: "مؤكد",
PARTNERED: "شريك",
},
MEMBER: {
TAG: "علامة",
BOT: "هل هو بوت؟",
ROLES: "الرولات",
BADGES: "شارات",
ONLINE: "متصل",
OFFLINE: "غير متصل",
MEMBERS: "الاعضاء",
STATUS: "الحالة",
CREATED_ON: "تم انشائه بتاريخ",
JOINED_AT: "انضم بتاريخ",
ID: "الايدي",
USERNAME: "<NAME>",
NICKNAME: "اللقب",
PROVIDE_MEMBER: "من فضلك منشن شخص",
NOT_FOUND: "هذا الشخص غير موجود",
CANNOT_BE_BANNED: "لا يمكنك تبنيد هذا الشخص",
DM_BAN_MESSAGE: "لقد تم تبنيدك **banned**من **{guild_name}**, السبب: **{ban_reason}**",
GUILD_BAN_MESSAGE:
"{member} لقد تم تبنيده بنجاح . السبب: **{ban_reason}**. لقد قمت بارسال رسالة اليه .",
BOT_DATA: "Bot data does not save, therefore I cannot fetch his data",
},
ROLES: {
MY_ROLE_NOT_HIGH_ENOUGH: "رتبتي ليست اعلي من رول **{role}** ",
MY_ROLE_MUST_BE_HIGHER: "يجب ان تكون رتبتي اعلي من **{member}** ",
YOUR_ROLE_MUST_BE_HIGHER: "يحب ان تكون رتبتك اعلي من **{role}** ",
PROVIDE_ROLE: "من فضلك منشن الرول ",
ALREADY_HAS_ROLE: "هذا المستخدم يمتلك هذه الرتبة بالفعل ",
ADDED_ROLE_TO: "تم اعطاء رول **{role}** الي {member} بنجاح",
},
IMAGE: {
CLICK_TO_VIEW: "[اضغط هنا اذا لم يتم تحميل الصورة]",
CLYDE: "كلايد",
CUDDLES: "كلود مع ",
FEEDED: "اطعم",
HUGGED: "عانق",
PATTED: "قام بملاطفة",
KISSED: "قام بتقبيل ",
POKED: "قام بوخز",
SLAPPED: "قام بصفع",
NO_GIPHY_KEY: "No giphy api was found in the config (contact the bot owner)",
NO_GPIHY_FOUND: "No gifs were found with that",
},
ANIMAL: {
CAT_FACT: "حقيقة القطة",
DOG_FACT: "حقيقة الكلب",
SNAIL_FACT: "حقيقة الحلزونt",
COW: "بقرة",
},
BOT_OWNER: {
SHUTDOWN: "جاري ايقاف البوت",
EVAL: "Eval command",
EVAL_TYPE: "النوع",
EVAL_INPUT: "الادخال",
EVAL_OUTPUT: "الاخراج",
UPDATE_NICKNAME: "تم تحديث اللقب بواسطة اونر البوتr",
UPDATED_NICKNAME: "تم تغير اللقب الي **{nickname}**",
PROVIDE_TYPE: "من فضلك قم بادخال شئ ما",
CANNOT_BL_OWNER: "لا يمكن اعطاء اونر البوت قائمة سوداء",
CANNOT_BL_BOT: "The bot cannot be blacklisted",
NOT_BLD: "هذا المستخدم ليس بالقائمة السوداء",
ALREADY_BLD: "{member} بالفعل بالقائمة السوداء",
NOT_OPTION: "**{type}** ليس خيارا صحيحاً",
BLACKLISTED: "blacklisted",
UNBLACKLISTED: "unblacklisted",
BLACKLISTED_SUCCESS: "{member} كان {type}",
BLD_STATUS: "حالة القائمة السوداء",
},
LEVELS: {
XP: "خبرة",
LEVEL: "المستوي",
LEADERBOARD: "قائمة المستويات",
MEMBER_IS_LEVEL: "{member} بالمستوي **{level}** مع **{user_xp}خبرة**",
RESET_CONF: "اعادة تعيين جميع الخبرة? y/n",
RESET_SUCCESS: "تم اعادة تعيين خبرة الجميع",
RESET_CANCEL: "نم الغاء اعادة تعيين الخبرة",
PROVIDE_AMOUNT: "من فضلك قم بادخال العدد",
PROVIDE_VALID_NR: "من فضلك قم بكتابة رقم صحيح",
GIVE_XP_SUCCESS: "تم اعطاء **{member}** **{amount}**خبرة بنجاح",
},
MUSIC: {
MUST_BE_IN_VC: "يجب ان تكون بقناة صوتية",
NO_QUEUE: "ليس هناك شيئ لتشغيله",
QUEUE_CLEARED: "تم حذف القائمة",
QUEUE: "قائمة الموسيقيlume",
BETWEEN_0_100: "يجب ان يكون الصوت بين 0 و 100",
VOL_SUCCESS: "تم ضبط الصوت الي {vol}%",
PLAYING: "جاري التشغيل",
PAUSED: "تم الايقاف",
DURATION: "التوقيت",
UPLOADED_BY: "تم الرفع بواسطة",
UPLOADED_AT: "تم الرفع بتاريخ",
VIEWS: "المشاهدات",
LIKES: "الاعجابات",
DISLIKES: " لم يعجبه",
SECONDS: "ثانية",
VOLUME: "الصوت",
PROVIDE_SEARCH: "الرجاء تقديم استعلام بحث",
ADDED_TO_QUEUE: "{song} has been added to the queue",
ADDED_PL_TO_QUEUE: "Playlist: {name} was added to queue ({length} songs)",
PROVIDER_NOT_SUPP: "That provider is not supported",
NOW_PLAYING: "جاري التشغيل:",
REQUESTED_BY: "بواسطة",
NO_PERMS: "لا امتلك صلاحيات لهذه القناة الصوتية",
NOT_SAME_VC: "يجب ان تكون بقناتي الصوتية ",
MUST_BE_SAME_VC: "You must be in the same voice chat with the bot",
NO_RESULTS: "No songs were found",
JOIN_ERROR: "There was an error joining the voice channel, make sure it's not full!",
LIVE_NOT_SUPPORTED: "Live videos are not supported",
},
ECONOMY: {
MONEY: "المال",
BANK: "البنك",
BALANCE: "الرصيد",
DAILY_ERROR: "لقد استلمت بالفعل الراتب اليومي",
WEEKLY_ERROR: "لقد استلمت بالفعل الراتب الاسبوعي",
DAILY_SUCCESS: "لقد تم اضافة الراتب اليومي اليك وهو **{amount}** كوينز",
WEEKLY_SUCCESS: "لقد تم اضافة الراتب الاسبوعي اليك وهو **{amount}** كوينز",
STORE_EMPTY:
"المتجر في هذا السرفر فارغ يمكنك طلب من الادمن اضافة اغراض عن كريق امر `{prefix}store add <الغرض>`",
PROVIDE_ITEM_TO_BUY: "من فضلك ادخل الغرض المراد اضافته للمتجر",
NOT_ENOUGH_MONEY: "انت لا تمتلك الرصيد الكافي لشراء ذلك",
BUY_SUCCESS: "تم شراء **{item}** المبلغ **{price}**",
NOT_FOUND_STORE:
"**{query}** الغرض ليس موجود بالمتجر فضلك استخدمuse `{prefix}store` لتري جميع الاغراض المتوفرة بالمتجر",
ALREADY_OWN_ITEM: "انت بالفعل تمتلك هذا الغرض ",
PROVIDE_VALID_AMOUNT: "من فضلك استخدم رقم صحيح ",
DEPOSITED_ALL: "تم ادخار جميع اموالك بنجاح",
DEPOSITED_AMOUNT: "تم ادخار **{amount} كوينز**",
WITHDRAW_ALL: "لقد سحبت جميع اموالك بنجاح",
WITHDRAW_AMOUNT: "لقد قمت بسحب **{amount}كوينز **",
PROFILE: "الملف الشخصي",
INV_EMP: "مخزن المستخدم فارغ",
INVENTORY: "المخزن",
INV_ITEMS: "اغراض بالمخزن",
VIEW_INVENTORY: "استخدم `{prefix}inventory <user>` لعرض جميع ممتلكاته",
MONEY_LEADERBOARD: "قائمة متصدرين الاموال",
TOTAL_BALANCE: "اجمالي الرصيد",
BOTH_COUNTED: "تم احتساب كل من الرصيد والبنك",
DICE_LANDED: "لقد حصلت علي : {roll}",
DICE_WON: "تهانينا لقد فزت ب **{price}كوينز**",
DICE_LOST: "تحتاج الي الرقم **6* لتفوز ب **{price}كوينز**",
RECENTLY_WORKED: "لقد حصلت علي عملك اليومي مسبقا, {time} الوقت المتبقي",
WORKED: "{member} عمله لليوم هو **{job_name}** ولقد حصل علي **{amount}**!",
CANNOT_PAY_SELF: "لا يمكنك الدفع لنفسك",
PAY_SUCCESS: "تم اعطاء **{member}** **{amount}كوينز **",
CANNOT_ROB_SELF: "لا يمكنك سرقة نفسك",
BETWEEN_1_1000: "يجب ان يكون المبلغ بين 1 و 1000",
MEMBER_NO_MONEY: "لا يملك المستخدم اي اموال لذلك لا يمكنك سرقته.",
ROB_SUCCESS: "تمت السرقة **{amount}كوينز ** من **{member}**",
STORE: "المتجر",
MANAGE_STORE_PERMS: "انت لا تمتلك الصلاحيات الكافية (Manage Server)",
PROVIDE_VALID_ITEM: "من فضلك اختر غرض صحيح add/remove!",
PRICE: "السعر",
ALREADY_EXISTS: "**{item}** بالفعل متواجد بالمتجر",
PROVIDE_PRICE: "من فضلك ادخل السعر ",
MUST_BE_NUMBER: "المبلغ يجب ان يكون رقم!",
ADDED_TO_STORE: "الي المتجر {item} تم اضافة",
NOT_IN_STORE: "**{item}** غير موجود بالمتجر",
REMOVED_FROM_STORE: "من المتجر {item} تمت ازالة ",
WON_SLOTS: "You won and got {amount} coins",
LOST_SLOTS: "You lost!",
MAX_BET: "max bet amount is 500",
ADDED_MONEY: "Successfully added {amount} to user balance",
MIN_BET: "Minimum bet of 1 is required",
MIN_AMOUNT: "Amount must be above 0",
RESET_CONF: "Reset all balance? y/n",
RESET_SUCCESS: "Successfully reset everyone's balance",
RESET_CANCEL: "reset-economy was canceled",
},
GAMES: {
BETS_ON: "{member_1} يراهن علي {member_2}",
LOST_BET: "{member_1} راهن علي {member_2}!\n {member_1} لم يفز بالمراهنة",
WON_BET: "{member_1} bet on {member_2} و {member_1} فاز بالمراهنة",
CALC: "حاسبة",
INVALID_CALC: "حساب غير صحيح",
COMPLIMENT: "جمع",
LANDED_TAILS: "لقد هبطت علي صورة",
LANDED_HEADS: "لقد هبطت علي رأس",
HAPPINESS: "السعادة",
IQ_TEST: "اختبار الذكاء",
IQ_IS: "معدل ذكائك هو: {iq}",
RPS: "حجر ورقة مقص",
ROCK: "حجر",
PAPER: "ورقة",
SCISSORS: "مقص",
WYR: "هل تفضل ؟",
ANSWER: "الاجابة",
QUESTION: "السؤال",
YOU_WON: "You won 50coins!",
BOT_WON: "The bot has won!",
BOTH_WON: "It's a tie",
OPPONENTS_CHOICE: "Opponents choice",
YOUR_CHOICE: "Your choice",
WINNER: "Winner",
INVALID_INPUT: "Input must be 1 of the following:",
},
UTIL: {
AVATAR: "االافاتار",
NOT_AFK: "تم الغاء تفعيل نظام ال AFK",
AFK: "تم تشغيل نظام ال AFK",
CLICK_TO_DOWNLOAD: "[اضغط هنا لتحميل الملف]",
BMI_WEIGHT: "الوزن",
BMI_HEIGHT: "الطول",
BMI: "التناسق",
BMI_CM: "من فضلك ادخل طولك بالسنتيميتر",
BMI_KG: "من فضلك ادخل وزنك بالكيلو جرام",
SUPPORT_SERVER: "سرفر الدعم الفني",
BUG_REPORT: "{member} has reported a bug",
BUG_REPORTED: "Bug report was send!",
CHANNEL_TOPIC: "وصف القناة",
TEXT_CHANNEL: "قناة كتابية",
VOICE_CHANNEL: "قناة صوتية",
TEXT_CHANNELS: "قنوات كتابية" /* plural! */,
VOICE_CHANNELS: "قنوات صوتية" /* plural! */,
NO_DEF_FOUND: "لم يتم العثور علي تعريف لل {word}",
DEF_FOR_WORD: "تعريف ال {word}",
CATEGORY: "كتاجري",
DEFINITION: "التعريف",
DEPENDENCIES: "التبعيات",
ANIMATED: "متحركة",
NON_ANIMATED: "غير متحركة",
NEW_FEEDBACK: "ملاحظة جديدة",
FEEDBACK_SEND: "تم ارسال الملاحظة بنجاح",
GH_NOT_FOUND: "حساب الجيت هب هذا ليس موجودا",
GH_FOLLOWING: "يتابع",
GH_FOLLOWERS: "المتابعون",
GH_WEBSITE: "الموقع الالكتروني",
GH_LOCATION: "الموقع",
GH_BIO: "Bio",
GH_PROVIDE_USERNAME: "<NAME>",
SEARCHING: "جاري البحث",
PROVIDE_EXT: "من فضلك ادخل الملحقات كمثال: `ts`, `js`, `html`, ...",
PROVIDE_CODE: "من فضلك ادخل الكود",
NO_IMG_FOUND: "لم يتم العثور علي صور",
PROVIDE_M_S: "من فضلك ادخل اسم او فيلم او مسلسل",
DB_RATINGS: "التقييمات",
DB_COUNTRY: "البلد",
DB_GENRES: "الانواع",
DB_AWARDS: "الجوائز",
DB_LANGS: "اللغات",
DB_RELEASED: "تم الاصدار",
DB_NOT_FOUND: "لم يتم العثور علي فيلم {search}",
TOTAL_MB: "الاجمالي",
HUMANS: "البشريين",
BOTS: "البوتات",
PLAYERS: "اللاعبين",
VERSION: "الاصدار",
PROTOCOL: "البروتوكول",
DESCRIPTION: "الوصف",
MC_NOT_FOUND: "السرفر غير موجود",
MC_PROVIDE_IP: "من فضلك ادخل اي بي السرفى",
NPM_SEARCH: "NPM بحث",
NPM_TOP_5: "تم العثور علي اعلي 5 تقييمات**{query}**",
AUTHOR: "المؤلف",
VIEW_ON_NPM: "العرض علي npm",
MAX_PLAYERS: "اقصي عدد لاعبين",
PS_NOT_FOUND: "لم يتم العثور علي هذا التطبيق",
DEVELOPER: "المطور",
SCORE: "النتيجة",
CREATED_BY: "بواسطة {member}",
MENTIONABLE: "المنشن",
ROLE_NOT_FOUND: "لم يتم العثور علي الرول",
ROLES: "الرولات",
NO_GUILD_ICON: "هذا السرفر لا يملك صورة",
PROVIDE_EMOJI: "من فضلك اختر ايموجي",
ENLARGED_EMOJI: "النسخة المكبرة من {emoji}",
INVALID_EMOJI: "ايموجي غير صالح",
PROVIDE_SB: "الرجاء تقديم subreddit",
PROVIDE_SKIN: " من فضلك ادخل اسم الاسكين",
SKIN_NOT_FOUND: "للاعب `{search}` لا يوجد",
SKIN_NAME: " سكين اللاعب {name}",
DOWNLOAD_SKIN: "[تحميل الاسكين]",
G_TRANSLATE: "ترجمة جوجل",
NEW_SUGGESTION: "اقتراح جديد",
NO_SUGG_CHANNEL: "لا يمتلك سرفرك قناة للاقتراحات من فضلك قم بتعيين القناة اولا",
UPTIME: "{member} يعمل منذ {time}",
WEATHER: "الطقس",
PROVIDE_COUNTRY: "من فضلك ادخل اسم المدينة",
C_NOT_FOUNCit: "**{query}** لم يتم العثور علي المدينة",
MAIN: "الرئسية",
CURRENT: "الحالي",
CURRENT_TEMP: "درجة الحرارة الحالية",
FEELS_LIKE: "التوقعات",
WIND_SPEED: "سرعة الرياح",
WIND_DEGREES: "درجات الرياح",
COUNTRY: "الدولة",
NO_W_FOUND: "لم يتم العثور علي نتائج",
TOTAL_EPISODES: "اجمالي الحلقات",
ANIME_NOT_FOUND: "لم يتم العثور علي الانمي المطلوب",
START_DATE: "تاريخ البدء",
END_DATE: "تاريخ الانتهاء",
POPULARITY_RANK: "الشعبية",
DOC_NOT_FOUND: "That was not found on the docs",
MAINTAINERS: "Maintainers",
LAST_MODIFIED: "Last modified",
},
BOT: {
GUILDS: "السرفرات",
CHANNELS: "القنوات",
USERS: "المستخدمين",
COMMAND_COUNT: "عدد الاوامر",
VC_CONNS: "المتصلين صوتيا",
INFO_2: "بيانات البوت",
INFO: "بيانات البوت",
SYSTEM_INFO: "بيانات النظام",
RAM_USAGE: "استخدام الرام",
UPTIME: "وقت التشغيل",
DJS_V: "Discord.js نسخة",
NODE_V: "NodeJS نسخة",
REPO: "المستودع",
DASHBOARD: "Dashboard",
DEVELOPER: "Developer",
CONTRIBUTORS: "Contributors",
INVITE_BOT: "Invite bot",
USED_SINCE_UP: "Used since up",
TOTAL_USED_CMDS: "Total used",
LATENCY: "Latency",
},
CONFIG: {
OPTION_CMD_WORK: " يجب توفير{option} ، لكي يعمل هذا الأمر",
},
HELP: {
CAT_NOT_EXIST: "الكتاجري هذا غير مدرج",
CMD_NOT_FOUND: "الامر او الاختصار غير موجود",
COMMANDS: "الاوامر",
COOLDOWN: "وقت التهدئة",
ALIASES: "الاختصارات",
USAGE: "الاستخدام",
COMMAND: "الامر",
OPTIONS: "الخيارات",
GUILD_PREFIX: "بريفكس السرفر",
CMD_DESC: "استخدم `{prefix}help <اسم الامر>",
OWNER_ONLY: "لا يمكن عرض الاوامر هذه للمستخدمين",
NSFW_ONLY: "لا يمكن عرض الاوامر غير بقناة NSFW",
CUSTOM_CMD: "This is a custom command, therefore I cannot show more info",
CATEGORIES: {
admin: "اوامر الادمنز",
animal: "اوامر الحيوانات",
botowner: "اوامر اونر البوت",
nsfw: "NSFW اوامر",
hentainsfw: "Hentai اوامر",
games: "اوامر الالعاب",
image: "اوامر الصورة",
music: "اومر الميوزك",
util: "الاوامر العامة",
economy: "الاوامر الاقتصادية",
levels: "اوامر الفلات",
exempt: "أوامر الاستثناء (الأوامر التي لا يمكن تعطيلها)",
disabled: "Disabled commands (this guild only)",
giveaway: "Giveaway commands",
reminder: "Reminder commands",
reactions: "Reaction role commands",
custom: "Custom commands",
ticket: "Ticket commands",
},
},
POKEMON: {
SPECIES: "النوع",
ABILITIES: "القدرات",
HEIGHT: "الطول",
WEIGHT: "الوزن",
EXPERIENCE: "الخبرة",
GENDER: "الجنس",
EGG_GROUPS: "مجموعات البيض",
FAMILY: "العائلة",
EVO_STAGE: "مرحلة التطور",
EVO_LINE: "خط التطور",
STATS: "الحالة",
HP: "نقاط الصحة",
ATTACK: "الهجوم",
DEFENSE: "الدفاع",
SP_ATK: "SP ",
SP_DEF: "SP DEF",
SPEED: "السرعة",
TOTAL: "الاجمالي",
PROVIDE_NAME: "من فضلك ادخل اسم بوكيمون",
NOT_FOUND: "لم يتم العثور علي {query}. من فضلك تحقق من التهجئة الصحيحة وحاول مرة اخري",
},
REACTIONS: {
NO_CHANNEL_ID: "Please provide a channelId",
NO_EMOJI: "Please provide a valid emoji (Custom emojis soon™)",
NO_ROLE: "Please provide a valid role",
CHANNEL_NOT_FOUND: "The channel with id `{channelId}` was not found",
MSG_NOT_FOUND: "The message with id `{messageId}` was not found",
TITLE: "Reaction Role",
DESC: "Reactions:",
SUCCESS: "Successfully send message with reactions",
NOT_FOUND: "Reaction was not found by that messageId",
DELETE_SUCCESS: "Successfully deleted reaction",
FOUND_NO_MSG:
"Reaction was found but the message was not, reaction was deleted from the database",
},
REMINDER: {
SUCCESS: "Success! I will ping you **in this channel** in {time}",
REMOVE_SUCCESS: "Successfully removed your reminder",
ALREADY_ON: "You already have a reminder set",
NO_REMINDER_SET: "You don't have a reminder set",
INVALID_DATE: "That is not a valid date",
},
COVID: {
CASES: "Cases",
RECOVERED: "Recovered",
DEATHS: "Deaths",
TOTAL: "Total",
TODAY: "Today",
CRITICAL: "Critical",
TESTS: "Tests",
LAST_UPDATED: "Last updated",
NOT_FOUND: "Country was not found",
TOTAL_POP: "Population",
},
EASY_GAMES: {
PROVIDE_MEMBER: "Please provide a member",
ACCEPT_CHALLENGE: "{user} Do you accept this challange?",
DOESNT_PLAY: "looks like {user} doesnt wanna play",
WICH_SIDE: "**{user}, Which Side Do You Pick? Type `End` To Forfeit!**",
GAME_OVER: "Times up!",
END: "end",
INACTIVITY: "game ended due to inactivity!",
WINNER: "Congrats u have won {winner}",
DRAW: "Its a draw",
},
ADMIN: {
ADD_CMD_ALREADY_EXISTS: "This command name is already added in guild custom commands.",
ADD_CMD_USED_BY_BOT: "This command name is already in use by the bot",
ADD_CMD_ADDED: "Successfully added **{name}** as a custom command to this guild",
DEL_CMD_NOT_FOUND: "That command was not found",
DEL_CMD_DELETED: "Successfully deleted the **{cmd}** Command",
DEL_CMD_NO_COMMANDS: "This guild doesn't have any custom commands",
BLACKLISTED_PROVIDE_OPTION: "Please provide an option '`add`, `remove`, `get`'",
BLACKLISTED_ALREADY_EXISTS: "**{item}** already exist in blacklisted words",
BLACKLISTED_ADDED: "Successfully added **{item}** to blacklisted words",
BLACKLISTED_NOT_EXISTS: "**{item}** does not exist in blacklisted words",
BLACKLISTED_REMOVED: "Successfully removed **{item}** from blacklisted words",
BLACKLISTED_NONE_YET: "There are no blacklisted words yet.",
BLACKLISTED_NO_WORDS: "This guid does not have any blacklisted words yet",
CREATED_ROLE_CREATED: "Created Role",
CREATED_ROLE_ADDED: "Successfully created the `{roleName}` role",
C_TOPIC_PROVIDE_TOPIC: "Please provide a new topic",
C_TOPIC_ADDED: "Successfully updated channel topic to {topic}",
DEAFEN_ALREADY_DEAFENED: "User is not in a voice channel or is already deafened",
DEAFEN_SUCCESS:
"{member} was successfully deafenned from the server. Reason: **{reason}**. I have also send a DM letting the person know.",
DEAFEN_SUCCESS_DM: "You've been **Deafenned** from **{guild}**, Reason: **{reason}**",
DELETE_PROVIDE_AMOUNT: "Amount must be a valid number and between 0 below 100",
DELETE_DELETED: "Deleted {amount} messages.",
DELETE_ERROR:
"An error occurred when deleting the messages, make sure they are not older than 14days",
KICK_CANNOT_KICK: "That person can't be kicked.",
KICK_SUCCESS_DM: "You've been **kicked** from **{guild}**, Reason: **{reason}**",
KICK_SUCCESS:
"**{tag}** was successfully kicked from the server. Reason: **{reason}**. I have also send a DM letting the person know.",
MUTE_CANNOT_MUTE: "That member cannot be muted",
MUTE_ALREADY_MUTED: "Member is already muted",
MUTE_SUCCESS_DM: "You've been **muted** from **{guild}**, Reason: **{reason}**",
MUTE_SUCCESS:
"**{tag}** was successfully muted from the server. Reason: **{reason}**. I have also send a DM letting the person know.",
BAN_BANNED_BY: "**Banned By:**",
NUKE_NUKED: "Channel was successfully nuked",
NUKE_CANCELED: "Nuke command was canceled",
NUKE_CONFIRM: "Are you sure you want to nuke this channel? y/n",
TEXT_OR_VALID_CHANNEL: "Please provide text or a valid channel!",
DEFAULT_ANNOUNCE_CHANNEL:
"You can also set a default channel using `set announce-channel <channel mention>`",
OPTION_DOES_NOT_EXIST: "{option} does not exist",
PROVIDE_COMMAND_OR_CATEGORY_NAME: "Please provide a command or category name",
COMMAND_CANNOT_DISABLED: "That command cannot be disabled",
COMMAND_ALREADY_DISABLED: "That command is already disabled",
COMMAND_DISABLED: "Successfully **disabled** {commandName}",
COMMAND_ENABLED: "Successfully **enabled** {commandName}",
COMMAND_NOT_DISABLED: "That command is not disabled",
COMMAND_OR_CATEGORY_NOT_FOUND: "Command or category was not found",
COMMAND_NOT_FOUND: "Command was not found",
CATEGORY_CANNOT_DISABLED: "That category cannot be disabled!",
CATEGORY_ALREADY_DISABLED: "That category is already disabled",
CATEGORY_DISABLED: "Successfully **disabled** {category}",
CATEGORY_ENABLED: "Successfully **enabled** {category}",
CATEGORY_NOT_DISABLED: "That category is not disabled",
DISABLED_CATEGORY: "Disabled category",
DISABLED_COMMAND: "Disabled command",
ENABLED_CATEGORY: "Enabled category",
ENABLED_COMMAND: "Enabled command",
PROVIDE_VALID_OPTION: "Please provide an valid option (`add`, `remove`)",
PROVIDE_CHANNEL: "Please provide a channel",
CHANNEL_ALREADY_IGNORED: "That channel is already ignored by the bot",
ADD_TO_IGNORED: "Added {item} to ignored channels",
CHANNEL_NOT_IGNORED: "That channel is not ignored by the bot",
REMOVE_IGNORED: "Remove {item} from ignored channels",
NOT_A_OPTION: "`{option}` is not a option",
CHANNEL_ALREADY_LOCKED: "That channel is already locked!",
REASON_LOCK_CHANNEL: "Please provide a reason to lock this channel",
LOCKED_CHANNEL_REASON: "Successfully locked {channel}, Reason: **{lockReason}**",
NO_PERMISSIONS: "Sorry, You don't have the correct permissions for this command.",
CURRENT_PREFIX:
"Current server prefix: `{guildPrefix}`\nUse `{guildPrefix}prefix <prefix>` to set a new prefix",
UPDATE_PREFIX: "Successfully updated prefix to `{prefix}`",
MY_ROLE_MUST_BE_HIGHER: "My role must be higher than **{roleName}** role!",
MY_ROLE_MUST_BE_HIGHER2: "My role must be higher than **{needsRoleTag}** highest role!",
YOUR_ROLE_NOT_HIGHT: "Your role is not high enough than **{roleName}** role!",
USER_WAS_NOT_FOUND: "User wasn't found",
REMOVED_ROLE: "Successfully removed **{roleName}** from {needsRole}",
REMOVED_ROLE_EVERYONE: "Successfully Removed **{roleName}** from Everyone",
PROVIDE_VALID_USER: "Please provide a valid user",
NO_WARNINGS: "There are no warnings",
REMOVED_ALL_WARNINGS: "Successfully removed all warnings",
},
TICKET: {
CANNOT_DO_ACTION: "This action cannot be done in a non ticket channel",
CLOSING: "Closing this ticket in 15 seconds, type `cancel` to cancel",
WILL_NOT_CLOSE: "This ticket will not be closed.",
ALREADY_ACTIVE_TICKET: "You already have an active ticket",
TICKET_FOR: "Support ticket for: {member}",
CREATED: "Successfully created ticket!",
},
EVENTS: {
CHANNEL_CREATED: "Channel Created",
CHANNEL_CREATED_MSG: "{channel_type}: **{channel}** was created",
CHANNEL_DELETED: "Channel Deleted",
CHANNEL_DELETED_MSG: "{channel_type}: **{channel}** was deleted",
CHANNEL_RENAME_MSG: "{channel_type}: **{channel}** was renamed to **{new_channel}**",
CHANNEL_RENAME: "Channel Rename",
EMOJI_CREATED_MSG: "Emoji: {emoji} was created",
EMOJI_CREATED: "New Emoji Created",
EMOJI_DELETED_MSG: "Emoji: **{emoji}** was deleted",
EMOJI_DELETED: "Emoji Deleted",
EMOJI_RENAMED_MSG: "Emoji: **{emoji_name}** was renamed to **{new_name}** ({emoji})",
},
};
|
package de.rieckpil.blog.boundary;
import java.io.Serializable;
import java.text.DecimalFormat;
import java.util.List;
import javax.annotation.PostConstruct;
import javax.faces.view.ViewScoped;
import javax.inject.Inject;
import javax.inject.Named;
import de.rieckpil.blog.control.CustomerService;
import de.rieckpil.blog.entity.Customer;
@Named
@ViewScoped
public class CustomerListBean implements Serializable {
private static final long serialVersionUID = 4773746274170179581L;
private List<Customer> customers;
private List<Customer> filteredCustomerList;
private List<Customer> selectedCustomerList;
@Inject
private CustomerService customerService;
@PostConstruct
public void init() {
customers = customerService.getCustomers();
}
public String getTotalRevenue() {
if (this.customers == null) {
return "0";
}
Long totalRevenue = customers.stream().mapToLong(Customer::getBilledRevenue).sum();
return new DecimalFormat("###,###.###").format(totalRevenue);
}
public void deleteCustomers() {
for (Customer customer : selectedCustomerList) {
this.customerService.deleteCustomer(customer);
if (filteredCustomerList != null) {
this.filteredCustomerList.remove(customer);
}
this.customers = customerService.getCustomers();
}
}
public List<Customer> getCustomers() {
return customers;
}
public void setCustomers(List<Customer> customers) {
this.customers = customers;
}
public List<Customer> getFilteredCustomerList() {
return filteredCustomerList;
}
public void setFilteredCustomerList(List<Customer> filteredCustomerList) {
this.filteredCustomerList = filteredCustomerList;
}
public List<Customer> getSelectedCustomerList() {
return selectedCustomerList;
}
public void setSelectedCustomerList(List<Customer> selectedCustomerList) {
this.selectedCustomerList = selectedCustomerList;
}
}
|
<gh_stars>10-100
/*
* Copyright (c) 1989, 1993, 1994
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* <NAME>.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef CFSTORE_NO_FNMATCH
#define CFSTORE_NO_FNMATCH
#if defined(LIBC_SCCS) && !defined(lint)
static char sccsid[] = "@(#)cfstore_fnmatch.c 8.2 (Berkeley) 4/16/94";
#endif /* LIBC_SCCS and not lint */
/* In order to support ARM toolchain, this have been removed from the original
* cfstore_fnmatch.c from newlib.
* #include <sys/cdefs.h>
*/
/*
* Function cfstore_fnmatch() as specified in POSIX 1003.2-1992, section B.6.
* Compares a filename or pathname to a pattern.
*/
#include "cfstore_fnmatch.h"
#include <ctype.h>
#include <string.h>
#include <stdio.h>
#include <limits.h>
/* code copied from the original cfstore_fnmatch.h */
#define FNM_NOESCAPE 0x01 /* Disable backslash escaping. */
#define FNM_PATHNAME 0x02 /* Slash must be matched by slash. */
#define FNM_PERIOD 0x04 /* Period must be matched by period. */
#define FNM_LEADING_DIR 0x08 /* Ignore /<tail> after Imatch. */
#define FNM_CASEFOLD 0x10 /* Case insensitive search. */
#define EOS '\0'
#define RANGE_MATCH 1
#define RANGE_NOMATCH 0
#define RANGE_ERROR (-1)
/* In order to support ARM toolchain and simplify the number of newlib posix files used,
* this have been copied from collate.c, and the license for this code has been included at the
* here:
*
* Copyright (c) 1995 <NAME> <<EMAIL>>
* at Electronni Visti IA, Kiev, Ukraine.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
int __collate_load_error = 1;
/* In order to support ARM toolchain and simplify the number of newlib posix files used,
* the following has been copied from collcmp.c, and the license for this code is
* included here:
*
* Copyright (C) 1996 by <NAME>, Moscow, Russia.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Compare two characters converting collate information
* into ASCII-compatible range, it allows to handle
* "[a-z]"-type ranges with national characters.
*/
static int __collate_range_cmp (int c1, int c2)
{
static char s1[2], s2[2];
int ret;
c1 &= UCHAR_MAX;
c2 &= UCHAR_MAX;
if (c1 == c2)
return (0);
s1[0] = c1;
s2[0] = c2;
if ((ret = strcoll(s1, s2)) != 0)
return (ret);
return (c1 - c2);
}
static int rangematch(const char *, char, int, char **);
int cfstore_fnmatch(const char *pattern, const char *string, int flags)
{
const char *stringstart;
char *newp;
char c, test;
for (stringstart = string;;)
switch (c = *pattern++) {
case EOS:
if ((flags & FNM_LEADING_DIR) && *string == '/')
return (0);
return (*string == EOS ? 0 : CFSTORE_FNM_NOMATCH);
case '?':
if (*string == EOS)
return (CFSTORE_FNM_NOMATCH);
if (*string == '/' && (flags & FNM_PATHNAME))
return (CFSTORE_FNM_NOMATCH);
if (*string == '.' && (flags & FNM_PERIOD) &&
(string == stringstart ||
((flags & FNM_PATHNAME) && *(string - 1) == '/')))
return (CFSTORE_FNM_NOMATCH);
++string;
break;
case '*':
c = *pattern;
/* Collapse multiple stars. */
while (c == '*')
c = *++pattern;
if (*string == '.' && (flags & FNM_PERIOD) &&
(string == stringstart ||
((flags & FNM_PATHNAME) && *(string - 1) == '/')))
return (CFSTORE_FNM_NOMATCH);
/* Optimize for pattern with * at end or before /. */
if (c == EOS)
if (flags & FNM_PATHNAME)
return ((flags & FNM_LEADING_DIR) ||
strchr(string, '/') == NULL ?
0 : CFSTORE_FNM_NOMATCH);
else
return (0);
else if (c == '/' && flags & FNM_PATHNAME) {
if ((string = strchr(string, '/')) == NULL)
return (CFSTORE_FNM_NOMATCH);
break;
}
/* General case, use recursion. */
while ((test = *string) != EOS) {
if (!cfstore_fnmatch(pattern, string, flags & ~FNM_PERIOD))
return (0);
if (test == '/' && flags & FNM_PATHNAME)
break;
++string;
}
return (CFSTORE_FNM_NOMATCH);
case '[':
if (*string == EOS)
return (CFSTORE_FNM_NOMATCH);
if (*string == '/' && (flags & FNM_PATHNAME))
return (CFSTORE_FNM_NOMATCH);
if (*string == '.' && (flags & FNM_PERIOD) &&
(string == stringstart ||
((flags & FNM_PATHNAME) && *(string - 1) == '/')))
return (CFSTORE_FNM_NOMATCH);
switch (rangematch(pattern, *string, flags, &newp)) {
case RANGE_ERROR:
goto norm;
case RANGE_MATCH:
pattern = newp;
break;
case RANGE_NOMATCH:
return (CFSTORE_FNM_NOMATCH);
}
++string;
break;
case '\\':
if (!(flags & FNM_NOESCAPE)) {
if ((c = *pattern++) == EOS) {
c = '\\';
--pattern;
}
}
/* FALLTHROUGH */
default:
norm:
if (c == *string)
;
else if ((flags & FNM_CASEFOLD) &&
(tolower((unsigned char)c) ==
tolower((unsigned char)*string)))
;
else
return (CFSTORE_FNM_NOMATCH);
string++;
break;
}
/* NOTREACHED */
}
static int
rangematch(const char *pattern, char test, int flags, char **newp)
{
int negate, ok;
char c, c2;
/*
* A bracket expression starting with an unquoted circumflex
* character produces unspecified results (IEEE 1003.2-1992,
* 3.13.2). This implementation treats it like '!', for
* consistency with the regular expression syntax.
* <NAME> (<EMAIL>)
*/
negate = (*pattern == '!' || *pattern == '^');
if ( negate )
++pattern;
if (flags & FNM_CASEFOLD)
test = tolower((unsigned char)test);
/*
* A right bracket shall lose its special meaning and represent
* itself in a bracket expression if it occurs first in the list.
* -- POSIX.2 2.8.3.2
*/
ok = 0;
c = *pattern++;
do {
if (c == '\\' && !(flags & FNM_NOESCAPE))
c = *pattern++;
if (c == EOS)
return (RANGE_ERROR);
if (c == '/' && (flags & FNM_PATHNAME))
return (RANGE_NOMATCH);
if (flags & FNM_CASEFOLD)
c = tolower((unsigned char)c);
if (*pattern == '-'
&& (c2 = *(pattern+1)) != EOS && c2 != ']') {
pattern += 2;
if (c2 == '\\' && !(flags & FNM_NOESCAPE))
c2 = *pattern++;
if (c2 == EOS)
return (RANGE_ERROR);
if (flags & FNM_CASEFOLD)
c2 = tolower((unsigned char)c2);
if (__collate_load_error ?
c <= test && test <= c2 :
__collate_range_cmp(c, test) <= 0
&& __collate_range_cmp(test, c2) <= 0
)
ok = 1;
} else if (c == test)
ok = 1;
} while ((c = *pattern++) != ']');
*newp = (char *)pattern;
return (ok == negate ? RANGE_NOMATCH : RANGE_MATCH);
}
#endif /* CFSTORE_NO_FNMATCH */
|
#!/bin/sh
docker build -t comum/proxy-inline-test:test -f Dockerfile.test .
|
<reponame>Thalenus/proyectowebGantz<filename>server.js
/**
* Created by Tomás on 17-04-2018.
*/
'use strict';
const mongoose = require('mongoose');
const app = require('./app');
const port = process.env.PORT || 8000;
mongoose.Promise = global.Promise;
mongoose.connect('mongodb://@localhost:27017/portalweb', {useMongoClient: true})
.then(() => {
console.log('La conexion a la base de datos se a realizado Correctamente');
app.listen(port, () => {
console.log('El servidor esta corriendo en el puerto: 8000')
})
})
.catch(err => console.log(err));
|
<filename>vendor/dist/bundle.js
webpackJsonp([0],{
/***/ 23:
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var _message = __webpack_require__(11);
var _platzi = __webpack_require__(12);
var _platzi2 = _interopRequireDefault(_platzi);
var _teachers = __webpack_require__(13);
var _teachers2 = _interopRequireDefault(_teachers);
var _renderToDom = __webpack_require__(7);
var _renderToDom2 = _interopRequireDefault(_renderToDom);
var _react = __webpack_require__(3);
var _react2 = _interopRequireDefault(_react);
var _reactDom = __webpack_require__(14);
var _teachers3 = __webpack_require__(20);
var _teachers4 = _interopRequireDefault(_teachers3);
__webpack_require__(21);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
__webpack_require__(22);
(0, _reactDom.render)(_react2.default.createElement(_teachers4.default, { data: _teachers2.default }), document.getElementById('cointainer'));
console.log(_teachers2.default);
_teachers2.default.teachers.forEach(function (teacher) {
var element = document.createElement('li');
element.textContent = teacher.name;
(0, _renderToDom2.default)(element);
});
document.write(_message.firstMessage);
(0, _message.delayedMessage)();
var img = document.createElement('img');
img.setAttribute('src', _platzi2.default);
img.setAttribute('width', 50);
img.setAttribute('height', 50);
document.body.append(img);
console.log('Hola desde webpack, en un webpack.config');
/***/ })
},[23]); |
<reponame>manvelmk/caesarCipher<filename>caesar cipher/src/CaesarCipher.java<gh_stars>0
import java.util.Scanner;
public class CaesarCipher{
public static void main(String[] args)
{
String message, offset, output;
Scanner input = new Scanner(System.in);
int offset_length;
System.out.println("Type string to encrypt");
message = input.nextLine();
System.out.println("How many letters to shift by?");
offset = input.next();
System.out.println(offset);
offset_length = Integer.parseInt(offset); //get actual integer value of offset
output = encrypt(message, offset_length);
for(;;){
System.out.println("1.Encrypt");
System.out.println("2.Decrypt");
System.out.println("3.Exit");
int choice = input.nextInt();
switch(choice){
case 1: //encrypt
output = encrypt(message, offset_length);
System.out.println("Encrypted: " + output.toUpperCase());
break;
case 2: //decrypt
//output = encrypt(message, offset_length);
System.out.println("Decrypted: " + decrypt(output, offset_length));
break;
case 3: //quit
System.exit(0);
break;
default:
System.out.println("Please choose 1-3");
}
}
}
public static String encrypt(String message, int offset_length)
{
String encrypted = ""; //initialize an empty string to populate with encrypted characters
for(int i = 0; i < message.length(); i++){
int c = message.charAt(i);
if(Character.isUpperCase(c)) //if character is uppercase
{ //then calculate ascii value accordingly looping around if necessary
c = c + (offset_length % 26);
if(c > 'Z')
c = c - 26;
}
else if(Character.isLowerCase(c))//if character is lowercase
{ //then calculate ascii value accordingly looping around if necessary
c = c + (offset_length % 26);
if(c > 'z')
c = c - 26;
}
encrypted = encrypted + (char) c;
System.out.println(encrypted); //print the process, one character at a time
try {
Thread.sleep(250);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
return encrypted;
}
public static String decrypt(String message, int offset_length)
{
String decrypted = ""; //initialize an empty string to populate with decrypted characters
for(int i = 0; i < message.length(); i++){
int c = message.charAt(i);
if(Character.isUpperCase(c)) //reverse logic to encryption function
{
c = c - (offset_length % 26);
if(c < 'A')
c = c + 26;
}
else if(Character.isLowerCase(c))
{
c = c - (offset_length % 26);
if(c < 'a')
c = c + 26;
}
decrypted = decrypted + (char) c;
System.out.println(decrypted);
try {
Thread.sleep(250);
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
}
return decrypted;
}
} |
<reponame>spade69/ADT-Algorithm
import java.util.*;
/*
* 二分查找!
* Return the index
*NotFound : -1
* */
public class BinSearch{
public int BinarySearch(int[] Table,int k){
int left,right,mid;
left=0;
right=Table.length-1;
while(left<=right){
mid=(left+right)/2;
if(Table[mid]>k)
right=mid-1;
else if(Table[mid]<k)
left=mid+1;
else
return mid;
}
return -1;
}
public static void main(String[] args){
BinSearch bin=new BinSearch();
int result;
int[] arrx={1,2,3,4,5,6};
//ArrayList arr=new ArrayList();
result=bin.BinarySearch(arrx,4);
System.out.println(result);
}
}
|
<filename>src/org/sosy_lab/cpachecker/util/ltl/formulas/BooleanConstant.java
/*
* CPAchecker is a tool for configurable software verification.
* This file is part of CPAchecker.
*
* Copyright (C) 2007-2018 <NAME>
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
* CPAchecker web page:
* http://cpachecker.sosy-lab.org
*/
package org.sosy_lab.cpachecker.util.ltl.formulas;
import org.sosy_lab.cpachecker.util.ltl.LtlFormulaVisitor;
public final class BooleanConstant implements LtlFormula {
public static final BooleanConstant FALSE = new BooleanConstant(false);
public static final BooleanConstant TRUE = new BooleanConstant(true);
private final boolean value;
public static BooleanConstant of(boolean pValue) {
return pValue ? TRUE : FALSE;
}
private BooleanConstant(boolean pValue) {
this.value = pValue;
}
@Override
public BooleanConstant not() {
return value ? FALSE : TRUE;
}
@Override
public String accept(LtlFormulaVisitor v) {
return v.visit(this);
}
@Override
public int hashCode() {
return Boolean.hashCode(value);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
BooleanConstant other = (BooleanConstant) obj;
return value == other.value;
}
@Override
public String toString() {
return value ? "true" : "false";
}
}
|
<reponame>LucestDail/JavascriptStudy
var arr1 = [undefined, 1];
var arr2 = [];
arr2[1] = 1;
console.log("arr1 : " + arr1);
console.log("arr2 : " + arr2);
console.log("arr1.forEach");
arr1.forEach(function (v, i) {
console.log(v, i);
});
console.log("arr2.forEach");
arr2.forEach(function (v, i) {
console.log(v, i);
});
console.log("arr1.map");
console.log(arr1.map(function (v, i) {
return v + i;
}));
console.log("arr2.map");
console.log(arr2.map(function (v, i) {
return v + i;
}));
console.log("arr1.filter");
console.log(arr1.filter(function (v) {
return !v;
}));
console.log("arr2.filter");
console.log(arr2.filter(function (v) {
return !v;
}));
console.log("arr1.reduce");
console.log(arr1.reduce(function (p, c, i) {
return p + c + i;
}, ''));
console.log("arr2.reduce");
console.log(arr2.reduce(function (p, c, i) {
return p + c + i;
}, '')); |
<gh_stars>1-10
public class Leftshift {
public static void main(String args[])
{
byte x = 64;
int i;
byte y;
i = x << 2;
y = (byte) (x << 2);
System.out.print(i + " " + y);
}
}
|
#!/bin/bash
# Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0,
# as published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an additional
# permission to link the program and your derivative works with the
# separately licensed software that they have included with MySQL.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#XXX fix: also kills this script, possibly before other matching processes
ps -efa | grep -i $1
for X in `ps -efa | grep -i $1 | awk {'print $2'}`; do
kill $X;
done
sleep 1
for X in `ps -efa | grep -i $1 | awk {'print $2'}`; do
kill -9 $X;
done
|
package refinedstorage.network;
import io.netty.buffer.ByteBuf;
import net.minecraft.network.PacketBuffer;
import net.minecraft.tileentity.TileEntity;
import net.minecraftforge.fml.common.network.simpleimpl.IMessage;
import net.minecraftforge.fml.common.network.simpleimpl.IMessageHandler;
import net.minecraftforge.fml.common.network.simpleimpl.MessageContext;
import refinedstorage.tile.data.TileDataManager;
import refinedstorage.tile.data.TileDataParameter;
public class MessageTileDataParameter implements IMessage, IMessageHandler<MessageTileDataParameter, IMessage> {
private TileEntity tile;
private TileDataParameter parameter;
public MessageTileDataParameter() {
}
public MessageTileDataParameter(TileEntity tile, TileDataParameter parameter) {
this.tile = tile;
this.parameter = parameter;
}
@Override
public void fromBytes(ByteBuf buf) {
int id = buf.readInt();
TileDataParameter parameter = TileDataManager.getParameter(id);
if (parameter != null) {
try {
parameter.setValue(parameter.getSerializer().read(new PacketBuffer(buf)));
} catch (Exception e) {
// NO OP
}
}
}
@Override
public void toBytes(ByteBuf buf) {
buf.writeInt(parameter.getId());
parameter.getSerializer().write((PacketBuffer) buf, parameter.getValueProducer().getValue(tile));
}
@Override
public IMessage onMessage(MessageTileDataParameter message, MessageContext ctx) {
return null;
}
}
|
/**
* The MIT License
* Copyright (c) 2014 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package cormoran.pepper.shared.logging;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.collect.ImmutableMap;
import cormoran.pepper.logging.PepperLogHelper;
public class TestPepperLogHelper {
@Test
public void lazyToString() {
// Not the String
Assert.assertNotEquals("Youpi", PepperLogHelper.lazyToString(() -> "Youpi"));
// But same .toString
Assert.assertEquals("Youpi", PepperLogHelper.lazyToString(() -> "Youpi").toString());
}
@Test
public void testLazyToString() {
// The lazyToString should not be a String
Assert.assertNotEquals("Youpi", PepperLogHelper.lazyToString(() -> "Youpi"));
Assert.assertEquals("Youpi", PepperLogHelper.lazyToString(() -> "Youpi").toString());
}
@Test
public void getPercentage() {
Assert.assertEquals("10%", PepperLogHelper.getNicePercentage(100, 1000).toString());
}
@Test
public void getPercentageDivideBy0() {
Assert.assertEquals("-%", PepperLogHelper.getNicePercentage(100, 0).toString());
}
@Test
public void getSmallPercentage() {
Assert.assertEquals("0.3%", PepperLogHelper.getNicePercentage(3, 1000).toString());
}
@Test
public void getVerySmallPercentage() {
Assert.assertEquals("0.03%", PepperLogHelper.getNicePercentage(3, 10000).toString());
}
@Test
public void getMediumPercentage() {
Assert.assertEquals("2.66%", PepperLogHelper.getNicePercentage(6, 225).toString());
}
@Test
public void getProgressAboveMax() {
Assert.assertEquals("1000%", PepperLogHelper.getNicePercentage(1000, 100).toString());
}
@Test
public void testBigTimeLowRate() {
Assert.assertEquals("1#/days", PepperLogHelper.getNiceRate(10, 10, TimeUnit.DAYS).toString());
}
@Test
public void testBigTimeVeryLowRate() {
Assert.assertEquals("10#/sec", PepperLogHelper.getNiceRate(1, 100, TimeUnit.MILLISECONDS).toString());
}
@Test
public void testBigTimeVeryLowRate1() {
Assert.assertEquals("30#/min", PepperLogHelper.getNiceRate(5, 10 * 1000, TimeUnit.MILLISECONDS).toString());
}
@Test
public void testBigTimeHIghRate() {
Assert.assertEquals("2#/ms", PepperLogHelper.getNiceRate(Integer.MAX_VALUE, 10, TimeUnit.DAYS).toString());
}
@Test
public void testLowTimeLowRate() {
Assert.assertEquals("1#/ms", PepperLogHelper.getNiceRate(10, 10, TimeUnit.MILLISECONDS).toString());
}
@Test
public void testLowTimeHighRate() {
Assert.assertEquals("214#/ns",
PepperLogHelper.getNiceRate(Integer.MAX_VALUE, 10, TimeUnit.MILLISECONDS).toString());
}
@Test
public void testRightUnderRatePerSecond() {
Assert.assertEquals("999#/sec", PepperLogHelper.getNiceRate(999, 1000, TimeUnit.MILLISECONDS).toString());
}
@Test
public void testZeroTime() {
Assert.assertEquals("999#/0SECONDS", PepperLogHelper.getNiceRate(999, 0, TimeUnit.SECONDS).toString());
}
@Test
public void testPercentageNoDecimals() {
Assert.assertEquals("100370%", PepperLogHelper.getNicePercentage(123456, 123).toString());
}
@Test
public void testPercentage() {
Assert.assertEquals("100370%", PepperLogHelper.getNicePercentage(123456, 123).toString());
Assert.assertEquals("0.09%", PepperLogHelper.getNicePercentage(123, 123456).toString());
}
@Test
public void testPercentage2() {
Assert.assertEquals("9.80%", PepperLogHelper.getNicePercentage(98, 1000).toString());
}
@Test
public void testPercentage3() {
Assert.assertEquals("9.81%", PepperLogHelper.getNicePercentage(981, 10000).toString());
}
@Test
public void testGetNiceTimeMillis() {
Assert.assertEquals("912ms", PepperLogHelper.getNiceTime(912).toString());
}
@Test
public void testGetNiceTimeSecondsAndMillis() {
Assert.assertEquals("9sec 600ms", PepperLogHelper.getNiceTime(9600).toString());
}
@Test
public void testGetNiceTimeSecondsAndMillis_NoHundredsInMillis() {
Assert.assertEquals("9sec 60ms", PepperLogHelper.getNiceTime(9060).toString());
}
@Test
public void testGetNiceTimeMinAndSeconds() {
Assert.assertEquals("2min 11sec", PepperLogHelper.getNiceTime(131, TimeUnit.SECONDS).toString());
}
@Test
public void testGetNiceTimeRoundMinutes() {
Assert.assertEquals("2min", PepperLogHelper.getNiceTime(120, TimeUnit.SECONDS).toString());
}
@Test
public void testGetNiceTimeHoursAndMinutes() {
Assert.assertEquals("2hours 11min", PepperLogHelper.getNiceTime(131, TimeUnit.MINUTES).toString());
}
@Test
public void testGetNiceDays() {
Assert.assertEquals("5days", PepperLogHelper.getNiceTime(5, TimeUnit.DAYS).toString());
}
@Test
public void testGetNiceDaysAndHours() {
Assert.assertEquals("4days 4hours", PepperLogHelper.getNiceTime(100, TimeUnit.HOURS).toString());
}
@Test
public void testGetNiceTimeFromNanos() {
Assert.assertEquals("1sec",
PepperLogHelper.getNiceTime(TimeUnit.SECONDS.toNanos(1), TimeUnit.NANOSECONDS).toString());
}
@Test
public void testCollectionLimit_under() {
Assert.assertEquals("[0, 1]", PepperLogHelper.getToStringWithLimit(Arrays.asList(0, 1), 3).toString());
}
@Test
public void testCollectionLimit_same() {
Assert.assertEquals("[0, 1, 2]", PepperLogHelper.getToStringWithLimit(Arrays.asList(0, 1, 2), 3).toString());
}
@Test
public void testCollectionLimit_above() {
Assert.assertEquals("[0, 1, (3 more elements)]",
PepperLogHelper.getToStringWithLimit(Arrays.asList(0, 1, 2, 3, 4), 2).toString());
}
@Test
public void testLimitChars() {
Assert.assertEquals("'12345...(4 more chars)'", PepperLogHelper.getFirstChars("123456789", 5).toString());
}
@Test
public void testLimitChars_underlimit() {
Assert.assertEquals("123456789", PepperLogHelper.getFirstChars("123456789", 15).toString());
}
@Test
public void testLimitChars_null() {
Assert.assertEquals("null", PepperLogHelper.getFirstChars(null, 5).toString());
}
@Test
public void testRemoveNewLines() {
Assert.assertEquals("a b", PepperLogHelper.removeNewLines("a\rb").toString());
Assert.assertEquals("a b", PepperLogHelper.removeNewLines("a\nb").toString());
Assert.assertEquals("a b", PepperLogHelper.removeNewLines("a\r\nb").toString());
// \n\r leads to 2 whitespaces
Assert.assertEquals("a b", PepperLogHelper.removeNewLines("a\n\rb").toString());
Assert.assertEquals(" a b c ", PepperLogHelper.removeNewLines("\na\rb\r\nc\r").toString());
}
@Test
public void testEscapeNewLines() {
Assert.assertEquals("a\\rb", PepperLogHelper.escapeNewLines("a\rb").toString());
Assert.assertEquals("a\\nb", PepperLogHelper.escapeNewLines("a\nb").toString());
Assert.assertEquals("a\\r\\nb", PepperLogHelper.escapeNewLines("a\r\nb").toString());
}
@Test
public void testObjectAndClass() {
Assert.assertEquals("{k=v(java.lang.String), k2=2(java.lang.Long)}",
PepperLogHelper.getObjectAndClass(ImmutableMap.of("k", "v", "k2", 2L)).toString());
}
@Test
public void testObjectAndClass_recursive() {
Map<Object, Object> map = new LinkedHashMap<>();
Assert.assertEquals("{}", PepperLogHelper.getObjectAndClass(map).toString());
// Add itself as value
map.put("k", map);
// Legimitate use-case as handle by AsbtractMap.toString()
Assert.assertEquals("{k=(this Map)}", map.toString());
Assert.assertEquals("{k=(this Map)}", PepperLogHelper.getObjectAndClass(map).toString());
// Add another value
map.put("k2", "v2");
Assert.assertEquals("{k=(this Map), k2=v2(java.lang.String)}",
PepperLogHelper.getObjectAndClass(map).toString());
}
@Test
public void testHumanBytes() {
Assert.assertEquals("789B", PepperLogHelper.humanBytes(789L).toString());
Assert.assertEquals("607KB", PepperLogHelper.humanBytes(789L * 789).toString());
Assert.assertEquals("468MB", PepperLogHelper.humanBytes(789L * 789 * 789).toString());
Assert.assertEquals("360GB", PepperLogHelper.humanBytes(789L * 789 * 789 * 789).toString());
Assert.assertEquals("278TB", PepperLogHelper.humanBytes(789L * 789 * 789 * 789 * 789).toString());
Assert.assertEquals("214PB", PepperLogHelper.humanBytes(789L * 789 * 789 * 789 * 789 * 789).toString());
}
@Test
public void testGetNiceDouble_null() {
Assert.assertEquals("null", PepperLogHelper.getNiceDouble(null).toString());
}
@Test
public void testGetNiceDouble_NaN() {
Assert.assertEquals("NaN", PepperLogHelper.getNiceDouble(Double.NaN).toString());
}
@Test
public void testGetNiceDouble_Infinity() {
Assert.assertEquals("Infinity", PepperLogHelper.getNiceDouble(Double.POSITIVE_INFINITY).toString());
}
@Test
public void testGetNiceDouble_MediumAndVeryPrecise() {
Assert.assertEquals("12.35", PepperLogHelper.getNiceDouble(12.3456789123).toString());
}
@Test
public void testGetNiceDouble_MediumAndVeryPrecise_Negative() {
Assert.assertEquals("-12.35", PepperLogHelper.getNiceDouble(-12.3456789123).toString());
}
@Test
public void testGetNiceDouble_BigAndPrecise() {
Assert.assertEquals("123456789.12", PepperLogHelper.getNiceDouble(123456789.123456789D).toString());
}
@Test
public void testGetNiceDouble_BigAndNotPrecise() {
Assert.assertEquals("1230000000000.0", PepperLogHelper.getNiceDouble(123e10D).toString());
}
@Test
public void testGetNiceDouble_veryNearZero() {
Assert.assertEquals("0.00000000012", PepperLogHelper.getNiceDouble(0.000000000123456789D).toString());
}
@Test
public void testGetNiceDouble_Zero() {
Assert.assertEquals("0.0", PepperLogHelper.getNiceDouble(0D).toString());
}
@Test
public void testGetNiceDouble_NextToZero() {
Assert.assertEquals("0.0", PepperLogHelper.getNiceDouble(Double.MIN_NORMAL).toString());
}
@Test
public void testGetNiceDouble_NextToZero_Negative() {
Assert.assertEquals("-0.0", PepperLogHelper.getNiceDouble(-1 * Double.MIN_NORMAL).toString());
}
@Test
public void testGetNiceDouble_FrenchLocal() {
// France by default have a ',' as decimal separator
Locale.setDefault(Locale.Category.FORMAT, Locale.FRANCE);
Assert.assertEquals("123.46", PepperLogHelper.getNiceDouble(123.456D).toString());
}
}
|
tail -1|awk '
{
for(i=1;i<=NF;i++){
s+=$i
}
ave=int(s/NF)
if(s%NF!=0){
print -1
}else if(ave==0){
print 0
}else{
s=0
ans=0
k=0
for(i=1;i<=NF;i++){
s+=$i
k+=ave
if(s==k){
ans+=(k/ave-1)
k=0;s=0
}
}
if(s!=0){
print -1
}else{
print ans
}
}
}
' |
#!/bin/bash
# Copyright (c) 2019-2020 P3TERX <https://p3terx.com>
#
# 修改openwrt登陆地址,把下面的192.168.2.2修改成你想要的就可以了
sed -i 's/192.168.1.1/192.168.99.100/g' package/base-files/files/bin/config_generate
# 修改主机名字,把OpenWrt-123修改你喜欢的就行(不能纯数字或者使用中文)
sed -i '/uci commit system/i\uci set system.@system[0].hostname='OpenWrt-Neo2'' package/lean/default-settings/files/zzz-default-settings
# 版本号里显示一个自己的名字(281677160 build $(TZ=UTC-8 date "+%Y.%m.%d") @ 这些都是后增加的)
sed -i "s/OpenWrt /Ryan build $(TZ=UTC-8 date "+%Y.%m.%d") @ OpenWrt /g" package/lean/default-settings/files/zzz-default-settings
# 修改 argon 为默认主题,可根据你喜欢的修改成其他的(不选择那些会自动改变为默认主题的主题才有效果)
sed -i 's/luci-theme-bootstrap/luci-theme-argon/g' feeds/luci/collections/luci/Makefile
# 设置密码为空(安装固件时无需密码登陆,然后自己修改想要的密码)
sed -i 's@.*CYXluq4wUazHjmCDBCqXF*@#&@g' package/lean/default-settings/files/zzz-default-settings
# 修改插件名字(修改名字后不知道会不会对插件功能有影响,自己多测试)
sed -i 's/"BaiduPCS Web"/"百度网盘"/g' package/lean/luci-app-baidupcs-web/luasrc/controller/baidupcs-web.lua
sed -i 's/cbi("qbittorrent"),_("qBittorrent")/cbi("qbittorrent"),_("BT下载")/g' package/lean/luci-app-qbittorrent/luasrc/controller/qbittorrent.lua
sed -i 's/"aMule设置"/"电驴下载"/g' package/lean/luci-app-amule/po/zh-cn/amule.po
sed -i 's/"网络存储"/"存储"/g' package/lean/luci-app-amule/po/zh-cn/amule.po
sed -i 's/"网络存储"/"存储"/g' package/lean/luci-app-vsftpd/po/zh-cn/vsftpd.po
sed -i 's/"Turbo ACC 网络加速"/"网络加速"/g' package/lean/luci-app-flowoffload/po/zh-cn/flowoffload.po
sed -i 's/"Turbo ACC 网络加速"/"网络加速"/g' package/lean/luci-app-sfe/po/zh-cn/sfe.po
sed -i 's/"实时流量监测"/"流量"/g' package/lean/luci-app-wrtbwmon/po/zh-cn/wrtbwmon.po
sed -i 's/"KMS 服务器"/"KMS激活"/g' package/lean/luci-app-vlmcsd/po/zh-cn/vlmcsd.zh-cn.po
sed -i 's/"TTYD 终端"/"命令窗"/g' package/lean/luci-app-ttyd/po/zh-cn/terminal.po
sed -i 's/"USB 打印服务器"/"打印服务"/g' package/lean/luci-app-usb-printer/po/zh-cn/usb-printer.po
sed -i 's/"网络存储"/"存储"/g' package/lean/luci-app-usb-printer/po/zh-cn/usb-printer.po
sed -i 's/"Web 管理"/"Web"/g' package/lean/luci-app-webadmin/po/zh-cn/webadmin.po
sed -i 's/"管理权"/"改密码"/g' feeds/luci/modules/luci-base/po/zh-cn/base.po
sed -i 's/"带宽监控"/"监视"/g' feeds/luci/applications/luci-app-nlbwmon/po/zh-cn/nlbwmon.po
|
package operate
import (
"github.com/gogf/gf/container/gmap"
"github.com/gogf/gf/database/gdb"
"github.com/gogf/gf/frame/g"
"github.com/gogf/gf/os/glog"
"github.com/gogf/gf/os/gtime"
"github.com/gogf/gf/util/gconv"
"github.com/xinjiayu/SimServerUnicom/app/model/analyse"
"github.com/xinjiayu/SimServerUnicom/app/model/datamodel"
"github.com/xinjiayu/SimServerUnicom/app/model/unicommodel"
"github.com/xinjiayu/SimServerUnicom/app/service/collect"
"github.com/xinjiayu/SimServerUnicom/library/utils"
"time"
)
const plan01 = "831WLW016555_MON-FLEX_1024M_SP"
const plan02 = "831WLW016555_MON-FLEX_2048M_SP"
const plan03 = "831WLW016555_MON-FLEX_3072M_SP"
type AutoChangePlan struct {
PlanInfo map[string]analyse.PlanInfo
PlanListSimList *gmap.AnyAnyMap
}
//AutoSetupInit 自动初始化sim卡的设置 ,为1g套餐
func (ac *AutoChangePlan) AutoSetupPlanInit() (error, int) {
num := 0
simCardList, err := datamodel.SimUnicom{}.GetUnicomSimInfoList()
for _, v := range simCardList {
simInfo := new(unicommodel.SimInfo)
v.Struct(simInfo)
if simInfo.RatePlan != plan01 {
glog.Info(simInfo.Iccid, plan01)
go startToChangePlan(simInfo.Iccid, plan01)
num++
}
}
return err, num
}
func (ac *AutoChangePlan) GetNot01PlanNum() int {
num := 0
simCardList, _ := datamodel.SimUnicom{}.GetUnicomSimInfoList()
for _, v := range simCardList {
simInfo := new(unicommodel.SimInfo)
v.Struct(simInfo)
if simInfo.RatePlan != plan01 {
num++
}
}
return num
}
//AutoSetupPlan 自动设置sim卡套餐
func (ac *AutoChangePlan) toSetupPlan(planNum int) (map[string]datamodel.SimUnicom, int) {
//需求调整的
var simPlan02List = make(map[string]datamodel.SimUnicom)
//是否继续进行调节
continuePlan := 0
aListData := ac.PlanListSimList.Get(getPlanName(planNum))
if aListData == nil {
return nil, 0
}
aList := gconv.Map(aListData)
planInfo := ac.PlanInfo[getPlanName(planNum)]
if planInfo.OutFlow > 0 {
var newListAllFlow int64 = 0
var newListAllNum int64 = 0
switch planNum {
case 1:
for k1, v1 := range aList {
simInfo := datamodel.SimUnicom{}
gconv.Struct(v1, &simInfo)
f1 := simInfo.CtdDataUsage / utils.MB1
if f1 > 3072 {
simInfo.RatePlan = plan02
simPlan02List[simInfo.Iccid] = simInfo
newListAllNum++
newListAllFlow = newListAllFlow + simInfo.CtdDataUsage
delete(aList, k1)
}
}
p := ac.PlanInfo[getPlanName(planNum)]
if p.OutFlow > 0 {
continuePlan = 2
}
glog.Info("计划一:", getPlanName(1), p)
case 2:
for k1, v1 := range aList {
simInfo := datamodel.SimUnicom{}
gconv.Struct(v1, &simInfo)
f1 := simInfo.CtdDataUsage / utils.MB1
if f1 > 4096 {
simInfo.RatePlan = plan03
simPlan02List[simInfo.Iccid] = simInfo
newListAllNum++
newListAllFlow = newListAllFlow + simInfo.CtdDataUsage
delete(aList, k1)
}
}
p := ac.PlanInfo[getPlanName(planNum)]
if p.OutFlow > 0 {
continuePlan = 3
}
glog.Info("计划二:", getPlanName(2), p)
case 3:
var simnum1 int64 = 1
outFlowNumTmp1 := planInfo.OutFlow / utils.MB1
outFlowNum1 := outFlowNumTmp1 / 3
if outFlowNum1 > 0 {
simnum1 = outFlowNum1 + 1
}
var cnt int64 = 0
//a0, _ := ac.PlanListSimList[plan01]
a1 := ac.PlanListSimList.Get(plan01)
if a1 == nil {
return nil, 0
}
aList := gconv.Map(a1)
for k1, v1 := range aList {
simInfo := datamodel.SimUnicom{}
gconv.Struct(v1, &simInfo)
if simInfo.CtdDataUsage/utils.MB1 < 500 {
if cnt < simnum1 {
simInfo.RatePlan = plan03
simPlan02List[simInfo.Iccid] = simInfo
delete(aList, k1)
}
cnt++
}
}
p := ac.PlanInfo[getPlanName(planNum)]
if p.OutFlow > 0 {
continuePlan = 3
}
glog.Info("计划三有超出:", getPlanName(3), p)
}
}
return simPlan02List, continuePlan
}
func getPlanName(num int) string {
switch num {
case 1:
return plan01
case 2:
return plan02
case 3:
return plan03
}
return ""
}
func (ac *AutoChangePlan) AutoSetupPlan() (error, int) {
simCardList, _ := datamodel.SimUnicom{}.GetUnicomSimInfoList()
ac.CountPlanFlow(simCardList)
glog.Info("全部卡数:", len(simCardList))
var changeSimcardList = make(map[string]string)
for a := 1; a < 4; a++ {
aList, c := ac.toSetupPlan(a)
for _, v := range aList {
changeSimcardList[v.Iccid] = v.RatePlan
}
if c > 0 {
bList, _ := ac.toSetupPlan(c)
for _, v1 := range bList {
changeSimcardList[v1.Iccid] = v1.RatePlan
}
}
}
for cid, plan := range changeSimcardList {
glog.Info(cid, plan)
go startToChangePlan(cid, plan)
}
glog.Info("sim卡计划变更数:", len(changeSimcardList))
return nil, len(changeSimcardList)
}
func toChangePlan(iccid, ratePlan string) *unicommodel.PutResultData {
//延时处理
apiurl := g.Config().Get("unicom.api_url")
APIURL := apiurl.(string)
getURL := APIURL + "devices/" + iccid
searchStr := "{\"ratePlan\":\"" + ratePlan + "\"}"
glog.Info("提交", searchStr)
dataModel := new(unicommodel.PutResultData)
collect.PutAPIData(getURL, searchStr, dataModel)
return dataModel
}
func startToChangePlan(iccid, ratePlan string) {
time.Sleep(1e9)
prd := toChangePlan(iccid, ratePlan)
if prd.ErrorCode == "40000029" {
startToChangePlan(iccid, ratePlan)
}
}
//根据流量排序
func Sort(array []analyse.PlanSimCardInfo) []analyse.PlanSimCardInfo {
for i := 0; i < len(array)-1; i++ {
for j := 0; j < len(array)-1-i; j++ {
//根据流量排序
if array[j].Flow > array[j+1].Flow { // >升序 <降序
array[j], array[j+1] = array[j+1], array[j]
}
}
}
return array
}
//计算流量池的真实大小:流量池中,当前计算周期新激活卡,是从卡激活的日期到计算结束日按每天流量累计的。
func (ac *AutoChangePlan) CountPlanFlow(simList gdb.Result) {
ac.PlanInfo = make(map[string]analyse.PlanInfo)
//ac.PlanListSimList = make(map[string][]datamodel.SimUnicom)
var simCordList1 = make(map[string]datamodel.SimUnicom)
var simCordList2 = make(map[string]datamodel.SimUnicom)
var simCordList3 = make(map[string]datamodel.SimUnicom)
ac.PlanListSimList = gmap.NewAnyAnyMap()
var flowPoolSize = make(map[string]int64) //池子总量
var useFlow = make(map[string]int64) //池子使用量
var simNum = make(map[string]int) //sim卡的数量
monthNum := gconv.Int64(gtime.Now().Format("n"))
yearNum := gconv.Int64(gtime.Now().Format("Y"))
toDayNum := gconv.Int64(gtime.Now().Format("j"))
startYear := yearNum //计费周期开始的年
startMonth := monthNum - 1 //计划周期开始的月
if monthNum == 1 {
if toDayNum <= 26 {
startYear = startYear - 1
startMonth = 12
}
}
endYear := yearNum //计费周期结束的年
endMonth := monthNum //计划周期结束的月
if toDayNum > 26 {
endMonth = endMonth + 1
if monthNum == 12 {
endYear = endYear + 1
endMonth = 1
}
}
//计费周期开始日期
startDayStr := gconv.String(startYear) + "-" + gconv.String(startMonth) + "-27" //计周期开始日期
nowDayStr := gconv.String(gtime.Now().Format("Y-m-d")) //今天的日期
endDayStr := gconv.String(endYear) + "-" + gconv.String(endMonth) + "-26" //计费周期结束的日期
//计算计费周期还剩余的天数
t1, _ := time.Parse("2006-01-02", endDayStr)
t2, _ := time.Parse("2006-01-02", nowDayStr)
surplusFlowDayNumTmp := utils.TimeSub(t1, t2)
surplusDayNum := gconv.Int64(surplusFlowDayNumTmp)
//计算已使用的天数
a, _ := time.Parse("2006-01-02", nowDayStr)
b, _ := time.Parse("2006-01-02", startDayStr)
useDayNumtmp := utils.TimeSub(a, b)
//已使用的天数
useDayNum := gconv.Int64(useDayNumtmp)
var G1DayFlow int64 = utils.G1 / 30
var G2DayFlow int64 = utils.G1 * 2 / 30
var G3DayFlow int64 = utils.G1 * 3 / 30
for _, v := range simList {
simInfo := datamodel.SimUnicom{}
v.Struct(&simInfo)
//计算当前周期内激活卡的池子流量
var chargingDayNum int64 = 30
tmpTime := gconv.Int64(simInfo.DateActivated)
simCardActivatedYear := gconv.Int64(gtime.NewFromTimeStamp(tmpTime).Format("Y"))
simCardActivatedMonth := gconv.Int64(gtime.NewFromTimeStamp(tmpTime).Format("n"))
if simCardActivatedYear == yearNum && simCardActivatedMonth == monthNum {
simCardActivatedDate := gtime.NewFromTimeStamp(tmpTime).Format("Y-m-d")
//计算剩余的天数
a2, _ := time.Parse("2006-01-02", endDayStr)
b2, _ := time.Parse("2006-01-02", simCardActivatedDate)
chargingDayNumTmp := utils.TimeSub(a2, b2)
chargingDayNum = gconv.Int64(chargingDayNumTmp)
}
switch simInfo.RatePlan {
case plan01:
flowPoolSize[plan01] = flowPoolSize[plan01] + chargingDayNum*G1DayFlow
useFlow[plan01] = useFlow[plan01] + simInfo.CtdDataUsage
simCordList1[simInfo.Iccid] = simInfo
ac.PlanListSimList.Set(plan01, simCordList1)
simNum[plan01]++
case plan02:
flowPoolSize[plan02] = flowPoolSize[plan02] + chargingDayNum*G2DayFlow
useFlow[plan02] = useFlow[plan02] + simInfo.CtdDataUsage
simCordList2[simInfo.Iccid] = simInfo
ac.PlanListSimList.Set(plan02, simCordList2)
simNum[plan02]++
case plan03:
flowPoolSize[plan03] = flowPoolSize[plan03] + chargingDayNum*G3DayFlow
useFlow[plan03] = useFlow[plan03] + simInfo.CtdDataUsage
simCordList3[simInfo.Iccid] = simInfo
ac.PlanListSimList.Set(plan03, simCordList3)
simNum[plan03]++
}
}
//1G套餐统计 ===============================
planInfo1 := analyse.PlanInfo{}
planInfo1.PlanName = plan01
planInfo1.AllFlow = flowPoolSize[plan01] / utils.MB1 //池子总量
planInfo1.UseFlow = useFlow[plan01] / utils.MB1 //使用量
//计算剩余流量
surplusFlow := flowPoolSize[plan01] - useFlow[plan01]
var outFlow int64 = 0
if surplusFlow < 0 {
outFlow = utils.Abs(surplusFlow)
surplusFlow = 0
}
planInfo1.SurplusFlow = surplusFlow / utils.MB1 //余量
planInfo1.OutFlow = outFlow / utils.MB1 //超出量
planInfo1.AveDayFlow = planInfo1.UseFlow / useDayNum / utils.MB1 //每天的使用流量
planInfo1.Num = simNum[plan01] //sim卡数量
planInfo1.AveSimUseFlow = 0
if simNum[plan01] > 0 {
planInfo1.AveSimUseFlow = planInfo1.AveDayFlow / gconv.Int64(simNum[plan01]) / utils.MB1 //计算每天每卡的平均量
}
planInfo1.SurplusDayNum = surplusDayNum //剩余的天数
planInfo1.ExpectFlow = planInfo1.AveDayFlow * surplusDayNum / utils.MB1
ac.PlanInfo[plan01] = planInfo1
//2G套餐统计 ===============================
planInfo2 := analyse.PlanInfo{}
planInfo2.PlanName = plan02
planInfo2.AllFlow = flowPoolSize[plan02] / utils.MB1 //池子总量
planInfo2.UseFlow = useFlow[plan02] / utils.MB1 //使用量
//计算剩余流量
surplusFlow2 := flowPoolSize[plan02] - useFlow[plan02]
var outFlow2 int64 = 0
if surplusFlow2 < 0 {
outFlow2 = utils.Abs(surplusFlow2)
surplusFlow2 = 0
}
planInfo2.SurplusFlow = surplusFlow2 / utils.MB1 //余量
planInfo2.OutFlow = outFlow2 / utils.MB1 //超出量
planInfo2.AveDayFlow = planInfo2.UseFlow / useDayNum / utils.MB1 //每天的使用流量
planInfo2.Num = simNum[plan02] //sim卡数量
planInfo2.AveSimUseFlow = 0
if simNum[plan02] > 0 {
planInfo2.AveSimUseFlow = planInfo2.AveDayFlow / gconv.Int64(simNum[plan02]) / utils.MB1 //计算每天每卡的平均量
}
planInfo2.SurplusDayNum = surplusDayNum //剩余的天数
planInfo2.ExpectFlow = planInfo2.AveDayFlow * surplusDayNum / utils.MB1
ac.PlanInfo[plan02] = planInfo2
//3G套餐统计 ===============================
planInfo3 := analyse.PlanInfo{}
planInfo3.PlanName = plan03
planInfo3.AllFlow = flowPoolSize[plan03] / utils.MB1 //池子总量
planInfo3.UseFlow = useFlow[plan03] / utils.MB1 //使用量
//计算剩余流量
surplusFlow3 := flowPoolSize[plan03] - useFlow[plan03]
var outFlow3 int64 = 0
if surplusFlow3 < 0 {
outFlow3 = utils.Abs(surplusFlow3)
surplusFlow3 = 0
}
planInfo3.SurplusFlow = surplusFlow3 / utils.MB1 //余量
planInfo3.OutFlow = outFlow3 / utils.MB1 //超出量
planInfo3.AveDayFlow = planInfo3.UseFlow / useDayNum / utils.MB1 //每天的使用流量
planInfo3.Num = simNum[plan03] //sim卡数量
planInfo3.AveSimUseFlow = 0
if simNum[plan03] > 0 {
planInfo3.AveSimUseFlow = planInfo3.AveDayFlow / gconv.Int64(simNum[plan03]) / utils.MB1 //计算每天每卡的平均量
}
planInfo3.SurplusDayNum = surplusDayNum //剩余的天数
planInfo3.ExpectFlow = planInfo3.AveDayFlow * surplusDayNum / utils.MB1
ac.PlanInfo[plan03] = planInfo3
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.