text stringlengths 1 1.05M |
|---|
#!/system/bin/sh
# Author: Matthew Stapleton (Capsicum Corporation) <matthew@capsicumcorp.com>
# Copyright: Capsicum Corporation 2016
# This file is part of Capsicum Web Server which is part of the iOmy project.
# iOmy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# iOmy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with iOmy. If not, see <http://www.gnu.org/licenses/>.
# Some code copied from DroidPHP: https://github.com/DroidPHP/DroidPHP
# DroidPHP is licensed under the <a href="http://www.apache.org/licenses/LICENSE-2.0">Apache License, Version 2.0 (the "License");</a>.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Arguments: <apppathname>
# Get the storage path from the first parameter
if [ "$1" == "" ] ; then
echo "Error: First parameter needs to be the storage path"
exit 1
fi
export app="$1"
shift
export sbin="${app}/components"
# Set app permissions so can execute the programs
chmod 0755 $sbin/bin/armeabi/*
chmod 0755 $sbin/bin/armeabi/pie/* 2>/dev/null
chmod 0755 $app/scripts/manage_services.sh 2> /dev/null
chmod 0755 $app/scripts/run_ffmpeg.sh 2> /dev/null
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
df = pd.read_csv('...')
X = df.drop('winner', axis=1)
y = df['winner']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = RandomForestClassifier()
model.fit(X_train, y_train)
predictions = model.predict(X_test) |
<reponame>UNIMIBInside/Business-Event-Exchange-Ontology
package it.disco.unimib.model;
import com.fasterxml.jackson.annotation.JsonSubTypes;
import com.fasterxml.jackson.annotation.JsonSubTypes.Type;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
/**
* OneOfEventLocation
*/
//@JsonDeserialize(as = PostalAddress.class)
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, defaultImpl = Object.class)
@JsonSubTypes({
@Type(value = Place.class, name = "ews:Place"),
@Type(value = PostalAddress.class, name = "ews:PostalAddress")
})
public interface OneOfEventLocation {
}
|
#!/usr/bin/env bash
YW=`echo "\033[33m"`
BL=`echo "\033[36m"`
RD=`echo "\033[01;31m"`
CM='\xE2\x9C\x94\033'
GN=`echo "\033[1;92m"`
CL=`echo "\033[m"`
while true; do
read -p "This will create a New Ubuntu 21.10 LXC. Proceed(y/n)?" yn
case $yn in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
clear
function header_info {
echo -e "${YW}
_ _ _ _
| | | | | | |
| | | | |__ _ _ _ __ | |_ _ _
| | | | _ \| | | | _ \| __| | | |
| |__| | |_) | |_| | | | | |_| |_| |
\____/|_.__/ \__,_|_| |_|\__|\__,_|
${CL}"
}
header_info
show_menu(){
printf " ${YW} 1)${YW} Privileged ${CL}\n"
printf " ${YW} 2)${GN} Unprivileged ${CL}\n"
printf "Please choose a Install Method and hit enter or ${RD}x${CL} to exit."
read opt
}
option_picked(){
message=${@:-"${CL}Error: No message passed"}
printf " ${YW}${message}${CL}\n"
}
show_menu
while [ $opt != '' ]
do
if [ $opt = '' ]; then
exit;
else
case $opt in
1) clear;
header_info;
option_picked "Using Privileged Install";
IM=0
break;
;;
2) clear;
header_info;
option_picked "Using Unprivileged Install";
IM=1
break;
;;
x)exit;
;;
\n)exit;
;;
*)clear;
option_picked "Please choose a Install Method from the menu";
show_menu;
;;
esac
fi
done
set -o errexit
set -o errtrace
set -o nounset
set -o pipefail
shopt -s expand_aliases
alias die='EXIT=$? LINE=$LINENO error_exit'
trap die ERR
trap cleanup EXIT
function error_exit() {
trap - ERR
local DEFAULT='Unknown failure occured.'
local REASON="\e[97m${1:-$DEFAULT}\e[39m"
local FLAG="\e[91m[ERROR] \e[93m$EXIT@$LINE"
msg "$FLAG $REASON"
[ ! -z ${CTID-} ] && cleanup_ctid
exit $EXIT
}
function warn() {
local REASON="\e[97m$1\e[39m"
local FLAG="\e[93m[WARNING]\e[39m"
msg "$FLAG $REASON"
}
function info() {
local REASON="$1"
local FLAG="\e[36m[INFO]\e[39m"
msg "$FLAG $REASON"
}
function msg() {
local TEXT="$1"
echo -e "$TEXT"
}
function cleanup_ctid() {
if $(pct status $CTID &>/dev/null); then
if [ "$(pct status $CTID | awk '{print $2}')" == "running" ]; then
pct stop $CTID
fi
pct destroy $CTID
elif [ "$(pvesm list $STORAGE --vmid $CTID)" != "" ]; then
pvesm free $ROOTFS
fi
}
function cleanup() {
popd >/dev/null
rm -rf $TEMP_DIR
}
if [ "$IM" == "1" ]; then
FEATURES="nesting=1,keyctl=1,mknod=1"
else
FEATURES="nesting=1"
fi
TEMP_DIR=$(mktemp -d)
pushd $TEMP_DIR >/dev/null
export CTID=$(pvesh get /cluster/nextid)
export PCT_OSTYPE=ubuntu
export PCT_OSVERSION=21.10
export PCT_DISK_SIZE=2
export PCT_OPTIONS="
-features $FEATURES
-hostname ubuntu
-net0 name=eth0,bridge=vmbr0,ip=dhcp
-onboot 1
-cores 1
-memory 512
-unprivileged ${IM}
"
bash -c "$(wget -qLO - https://raw.githubusercontent.com/tteck/Proxmox/main/ct/create_lxc.sh)" || exit
STORAGE_TYPE=$(pvesm status -storage $(pct config $CTID | grep rootfs | awk -F ":" '{print $2}') | awk 'NR>1 {print $2}')
if [ "$STORAGE_TYPE" == "zfspool" ]; then
warn "Some addons may not work due to ZFS not supporting 'fallocate'."
fi
echo -en "${GN} Starting LXC Container... "
pct start $CTID
echo -e "${CM}${CL} \r"
alias lxc-cmd="lxc-attach -n $CTID --"
lxc-cmd bash -c "$(wget -qLO - https://raw.githubusercontent.com/tteck/Proxmox/main/setup/ubuntu-install.sh)" || exit
IP=$(pct exec $CTID ip a s dev eth0 | sed -n '/inet / s/\// /p' | awk '{print $2}')
echo -e "${GN}Successfully created Ubuntu 21.10 LXC to${CL} ${BL}$CTID${CL}. \n"
|
#!/usr/bin/env bash
set -euo pipefail
IPV6=${IPV6:-false}
DUAL_STACK=${DUAL_STACK:-false}
ENABLE_SSL=${ENABLE_SSL:-false}
ENABLE_VLAN=${ENABLE_VLAN:-false}
CHECK_GATEWAY=${CHECK_GATEWAY:-true}
LOGICAL_GATEWAY=${LOGICAL_GATEWAY:-false}
ENABLE_MIRROR=${ENABLE_MIRROR:-false}
VLAN_NIC=${VLAN_NIC:-}
HW_OFFLOAD=${HW_OFFLOAD:-false}
ENABLE_LB=${ENABLE_LB:-true}
ENABLE_NP=${ENABLE_NP:-true}
ENABLE_EXTERNAL_VPC=${ENABLE_EXTERNAL_VPC:-true}
MULTICAST_PRIVILEGED=${MULTICAST_PRIVILEGED:-false}
# The nic to support container network can be a nic name or a group of regex
# separated by comma, if empty will use the nic that the default route use
IFACE=${IFACE:-}
CNI_CONF_DIR="/etc/cni/net.d"
CNI_BIN_DIR="/opt/cni/bin"
REGISTRY="kubeovn"
VERSION="v1.9.0"
IMAGE_PULL_POLICY="IfNotPresent"
POD_CIDR="10.16.0.0/16" # Do NOT overlap with NODE/SVC/JOIN CIDR
POD_GATEWAY="10.16.0.1"
SVC_CIDR="10.96.0.0/12" # Do NOT overlap with NODE/POD/JOIN CIDR
JOIN_CIDR="100.64.0.0/16" # Do NOT overlap with NODE/POD/SVC CIDR
PINGER_EXTERNAL_ADDRESS="114.114.114.114" # Pinger check external ip probe
PINGER_EXTERNAL_DOMAIN="alauda.cn" # Pinger check external domain probe
SVC_YAML_IPFAMILYPOLICY=""
if [ "$IPV6" = "true" ]; then
POD_CIDR="fd00:10:16::/64" # Do NOT overlap with NODE/SVC/JOIN CIDR
POD_GATEWAY="fd00:10:16::1"
SVC_CIDR="fd00:10:96::/112" # Do NOT overlap with NODE/POD/JOIN CIDR
JOIN_CIDR="fd00:100:64::/64" # Do NOT overlap with NODE/POD/SVC CIDR
PINGER_EXTERNAL_ADDRESS="2400:3200::1"
PINGER_EXTERNAL_DOMAIN="google.com"
fi
if [ "$DUAL_STACK" = "true" ]; then
POD_CIDR="10.16.0.0/16,fd00:10:16::/64" # Do NOT overlap with NODE/SVC/JOIN CIDR
POD_GATEWAY="10.16.0.1,fd00:10:16::1"
SVC_CIDR="10.96.0.0/12" # Do NOT overlap with NODE/POD/JOIN CIDR
JOIN_CIDR="100.64.0.0/16,fd00:100:64::/64" # Do NOT overlap with NODE/POD/SVC CIDR
PINGER_EXTERNAL_ADDRESS="114.114.114.114,2400:3200::1"
PINGER_EXTERNAL_DOMAIN="google.com"
SVC_YAML_IPFAMILYPOLICY="ipFamilyPolicy: PreferDualStack"
fi
EXCLUDE_IPS="" # EXCLUDE_IPS for default subnet
LABEL="node-role.kubernetes.io/master" # The node label to deploy OVN DB
NETWORK_TYPE="geneve" # geneve or vlan
TUNNEL_TYPE="geneve" # geneve, vxlan or stt. ATTENTION: some networkpolicy cannot take effect when using vxlan and stt need custom compile ovs kernel module
POD_NIC_TYPE="veth-pair" # veth-pair or internal-port
# VLAN Config only take effect when NETWORK_TYPE is vlan
PROVIDER_NAME="provider"
VLAN_INTERFACE_NAME=""
VLAN_NAME="ovn-vlan"
VLAN_ID="100"
if [ "$ENABLE_VLAN" = "true" ]; then
NETWORK_TYPE="vlan"
if [ "$VLAN_NIC" != "" ]; then
VLAN_INTERFACE_NAME="$VLAN_NIC"
fi
fi
# DPDK
DPDK="false"
DPDK_SUPPORTED_VERSIONS=("19.11")
DPDK_VERSION=""
DPDK_CPU="1000m" # Default CPU configuration for if --dpdk-cpu flag is not included
DPDK_MEMORY="2Gi" # Default Memory configuration for it --dpdk-memory flag is not included
display_help() {
echo "Usage: $0 [option...]"
echo
echo " -h, --help Print Help (this message) and exit"
echo " --with-dpdk=<version> Install Kube-OVN with OVS-DPDK instead of kernel OVS"
echo " --dpdk-cpu=<amount>m Configure DPDK to use a specific amount of CPU"
echo " --dpdk-memory=<amount>Gi Configure DPDK to use a specific amount of memory"
echo
exit 0
}
if [ -n "${1-}" ]
then
set +u
while :; do
case $1 in
-h|--help)
display_help
;;
--with-dpdk=*)
DPDK=true
DPDK_VERSION="${1#*=}"
if [[ ! "${DPDK_SUPPORTED_VERSIONS[@]}" = "${DPDK_VERSION}" ]] || [[ -z "${DPDK_VERSION}" ]]; then
echo "Unsupported DPDK version: ${DPDK_VERSION}"
echo "Supported DPDK versions: ${DPDK_SUPPORTED_VERSIONS[*]}"
exit 1
fi
;;
--dpdk-cpu=*)
DPDK_CPU="${1#*=}"
if [[ $DPDK_CPU =~ ^[0-9]+(m)$ ]]
then
echo "CPU $DPDK_CPU"
else
echo "$DPDK_CPU is not valid, please use the format --dpdk-cpu=<amount>m"
exit 1
fi
;;
--dpdk-memory=*)
DPDK_MEMORY="${1#*=}"
if [[ $DPDK_MEMORY =~ ^[0-9]+(Gi)$ ]]
then
echo "MEMORY $DPDK_MEMORY"
else
echo "$DPDK_MEMORY is not valid, please use the format --dpdk-memory=<amount>Gi"
exit 1
fi
;;
-?*)
echo "Unknown argument $1"
exit 1
;;
*) break
esac
shift
done
set -u
fi
if [[ $ENABLE_SSL = "true" ]];then
echo "[Step 0/6] Generate SSL key and cert"
exist=$(kubectl get secret -n kube-system kube-ovn-tls --ignore-not-found)
if [[ $exist == "" ]];then
docker run --rm -v "$PWD":/etc/ovn $REGISTRY/kube-ovn:$VERSION bash generate-ssl.sh
kubectl create secret generic -n kube-system kube-ovn-tls --from-file=cacert=cacert.pem --from-file=cert=ovn-cert.pem --from-file=key=ovn-privkey.pem
rm -rf cacert.pem ovn-cert.pem ovn-privkey.pem ovn-req.pem
fi
echo "-------------------------------"
echo ""
fi
echo "[Step 1/6] Label kube-ovn-master node"
count=$(kubectl get no -l$LABEL --no-headers -o wide | wc -l | sed 's/ //g')
if [ "$count" = "0" ]; then
echo "ERROR: No node with label $LABEL"
exit 1
fi
kubectl label no -lbeta.kubernetes.io/os=linux kubernetes.io/os=linux --overwrite
kubectl label no -l$LABEL kube-ovn/role=master --overwrite
echo "-------------------------------"
echo ""
echo "[Step 2/6] Install OVN components"
addresses=$(kubectl get no -lkube-ovn/role=master --no-headers -o wide | awk '{print $6}' | tr \\n ',')
echo "Install OVN DB in $addresses"
cat <<EOF > kube-ovn-crd.yaml
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: vpc-nat-gateways.kubeovn.io
spec:
group: kubeovn.io
names:
plural: vpc-nat-gateways
singular: vpc-nat-gateway
shortNames:
- vpc-nat-gw
kind: VpcNatGateway
listKind: VpcNatGatewayList
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .spec.vpc
name: Vpc
type: string
- jsonPath: .spec.subnet
name: Subnet
type: string
- jsonPath: .spec.lanIp
name: LanIP
type: string
name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
dnatRules:
type: array
items:
type: object
properties:
eip:
type: string
externalPort:
type: string
internalIp:
type: string
internalPort:
type: string
protocol:
type: string
eips:
type: array
items:
type: object
properties:
eipCIDR:
type: string
gateway:
type: string
floatingIpRules:
type: array
items:
type: object
properties:
eip:
type: string
internalIp:
type: string
lanIp:
type: string
snatRules:
type: array
items:
type: object
properties:
eip:
type: string
internalCIDR:
type: string
subnet:
type: string
vpc:
type: string
subresources:
status: {}
conversion:
strategy: None
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: vpcs.kubeovn.io
spec:
group: kubeovn.io
versions:
- additionalPrinterColumns:
- jsonPath: .status.standby
name: Standby
type: boolean
- jsonPath: .status.subnets
name: Subnets
type: string
- jsonPath: .spec.namespaces
name: Namespaces
type: string
name: v1
schema:
openAPIV3Schema:
properties:
spec:
properties:
namespaces:
items:
type: string
type: array
staticRoutes:
items:
properties:
policy:
type: string
cidr:
type: string
nextHopIP:
type: string
type: object
type: array
policyRoutes:
items:
properties:
priority:
type: integer
action:
type: string
match:
type: string
nextHopIP:
type: string
type: object
type: array
type: object
status:
properties:
conditions:
items:
properties:
lastTransitionTime:
type: string
lastUpdateTime:
type: string
message:
type: string
reason:
type: string
status:
type: string
type:
type: string
type: object
type: array
default:
type: boolean
defaultLogicalSwitch:
type: string
router:
type: string
standby:
type: boolean
subnets:
items:
type: string
type: array
tcpLoadBalancer:
type: string
tcpSessionLoadBalancer:
type: string
udpLoadBalancer:
type: string
udpSessionLoadBalancer:
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}
names:
kind: Vpc
listKind: VpcList
plural: vpcs
shortNames:
- vpc
singular: vpc
scope: Cluster
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ips.kubeovn.io
spec:
group: kubeovn.io
versions:
- name: v1
served: true
storage: true
additionalPrinterColumns:
- name: V4IP
type: string
jsonPath: .spec.v4IpAddress
- name: V6IP
type: string
jsonPath: .spec.v6IpAddress
- name: Mac
type: string
jsonPath: .spec.macAddress
- name: Node
type: string
jsonPath: .spec.nodeName
- name: Subnet
type: string
jsonPath: .spec.subnet
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
podName:
type: string
namespace:
type: string
subnet:
type: string
attachSubnets:
type: array
items:
type: string
nodeName:
type: string
ipAddress:
type: string
v4IpAddress:
type: string
v6IpAddress:
type: string
attachIps:
type: array
items:
type: string
macAddress:
type: string
attachMacs:
type: array
items:
type: string
containerID:
type: string
scope: Cluster
names:
plural: ips
singular: ip
kind: IP
shortNames:
- ip
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: subnets.kubeovn.io
spec:
group: kubeovn.io
versions:
- name: v1
served: true
storage: true
subresources:
status: {}
additionalPrinterColumns:
- name: Provider
type: string
jsonPath: .spec.provider
- name: Vpc
type: string
jsonPath: .spec.vpc
- name: Protocol
type: string
jsonPath: .spec.protocol
- name: CIDR
type: string
jsonPath: .spec.cidrBlock
- name: Private
type: boolean
jsonPath: .spec.private
- name: NAT
type: boolean
jsonPath: .spec.natOutgoing
- name: Default
type: boolean
jsonPath: .spec.default
- name: GatewayType
type: string
jsonPath: .spec.gatewayType
- name: V4Used
type: number
jsonPath: .status.v4usingIPs
- name: V4Available
type: number
jsonPath: .status.v4availableIPs
- name: V6Used
type: number
jsonPath: .status.v6usingIPs
- name: V6Available
type: number
jsonPath: .status.v6availableIPs
- name: ExcludeIPs
type: string
jsonPath: .spec.excludeIps
schema:
openAPIV3Schema:
type: object
properties:
status:
type: object
properties:
v4availableIPs:
type: number
v4usingIPs:
type: number
v6availableIPs:
type: number
v6usingIPs:
type: number
activateGateway:
type: string
conditions:
type: array
items:
type: object
properties:
type:
type: string
status:
type: string
reason:
type: string
message:
type: string
lastUpdateTime:
type: string
lastTransitionTime:
type: string
spec:
type: object
properties:
vpc:
type: string
default:
type: boolean
protocol:
type: string
cidrBlock:
type: string
namespaces:
type: array
items:
type: string
gateway:
type: string
provider:
type: string
excludeIps:
type: array
items:
type: string
gatewayType:
type: string
allowSubnets:
type: array
items:
type: string
gatewayNode:
type: string
natOutgoing:
type: boolean
externalEgressGateway:
type: string
policyRoutingPriority:
type: integer
minimum: 1
maximum: 32765
policyRoutingTableID:
type: integer
minimum: 1
maximum: 2147483647
not:
enum:
- 252 # compat
- 253 # default
- 254 # main
- 255 # local
private:
type: boolean
vlan:
type: string
logicalGateway:
type: boolean
disableGatewayCheck:
type: boolean
disableInterConnection:
type: boolean
htbqos:
type: string
scope: Cluster
names:
plural: subnets
singular: subnet
kind: Subnet
shortNames:
- subnet
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: vlans.kubeovn.io
spec:
group: kubeovn.io
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
id:
type: integer
minimum: 0
maximum: 4095
provider:
type: string
vlanId:
type: integer
description: Deprecated in favor of id
providerInterfaceName:
type: string
description: Deprecated in favor of provider
required:
- provider
status:
type: object
properties:
subnets:
type: array
items:
type: string
additionalPrinterColumns:
- name: ID
type: string
jsonPath: .spec.id
- name: Provider
type: string
jsonPath: .spec.provider
scope: Cluster
names:
plural: vlans
singular: vlan
kind: Vlan
shortNames:
- vlan
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: provider-networks.kubeovn.io
spec:
group: kubeovn.io
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
metadata:
type: object
properties:
name:
type: string
maxLength: 12
not:
enum:
- int
- external
spec:
type: object
properties:
defaultInterface:
type: string
maxLength: 15
pattern: '^[^/\s]+$'
customInterfaces:
type: array
items:
type: object
properties:
interface:
type: string
maxLength: 15
pattern: '^[^/\s]+$'
nodes:
type: array
items:
type: string
excludeNodes:
type: array
items:
type: string
required:
- defaultInterface
status:
type: object
properties:
ready:
type: boolean
readyNodes:
type: array
items:
type: string
vlans:
type: array
items:
type: string
conditions:
type: array
items:
type: object
properties:
node:
type: string
type:
type: string
status:
type: string
reason:
type: string
message:
type: string
lastUpdateTime:
type: string
lastTransitionTime:
type: string
additionalPrinterColumns:
- name: DefaultInterface
type: string
jsonPath: .spec.defaultInterface
- name: Ready
type: boolean
jsonPath: .status.ready
scope: Cluster
names:
plural: provider-networks
singular: provider-network
kind: ProviderNetwork
listKind: ProviderNetworkList
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: security-groups.kubeovn.io
spec:
group: kubeovn.io
names:
plural: security-groups
singular: security-group
shortNames:
- sg
kind: SecurityGroup
listKind: SecurityGroupList
scope: Cluster
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
ingressRules:
type: array
items:
type: object
properties:
ipVersion:
type: string
protocol:
type: string
priority:
type: integer
remoteType:
type: string
remoteAddress:
type: string
remoteSecurityGroup:
type: string
portRangeMin:
type: integer
portRangeMax:
type: integer
policy:
type: string
egressRules:
type: array
items:
type: object
properties:
ipVersion:
type: string
protocol:
type: string
priority:
type: integer
remoteType:
type: string
remoteAddress:
type: string
remoteSecurityGroup:
type: string
portRangeMin:
type: integer
portRangeMax:
type: integer
policy:
type: string
allowSameGroupTraffic:
type: boolean
status:
type: object
properties:
portGroup:
type: string
allowSameGroupTraffic:
type: boolean
ingressMd5:
type: string
egressMd5:
type: string
ingressLastSyncSuccess:
type: boolean
egressLastSyncSuccess:
type: boolean
subresources:
status: {}
conversion:
strategy: None
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: htbqoses.kubeovn.io
spec:
group: kubeovn.io
versions:
- name: v1
served: true
storage: true
additionalPrinterColumns:
- name: PRIORITY
type: string
jsonPath: .spec.priority
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
priority:
type: string # Value in range 0 to 4,294,967,295.
scope: Cluster
names:
plural: htbqoses
singular: htbqos
kind: HtbQos
shortNames:
- htbqos
EOF
if $DPDK; then
cat <<EOF > ovn.yaml
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: kube-ovn
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
spec:
privileged: true
allowPrivilegeEscalation: true
allowedCapabilities:
- '*'
volumes:
- '*'
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ovn-config
namespace: kube-system
data:
defaultNetworkType: '$NETWORK_TYPE'
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ovn
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.k8s.io/system-only: "true"
name: system:ovn
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- kube-ovn
- apiGroups:
- "kubeovn.io"
resources:
- subnets
- subnets/status
- ips
- vlans
- provider-networks
- provider-networks/status
- security-groups
- security-groups/status
- htbqoses
verbs:
- "*"
- apiGroups:
- ""
resources:
- pods
- pods/exec
- namespaces
- nodes
- configmaps
verbs:
- create
- get
- list
- watch
- patch
- update
- apiGroups:
- "k8s.cni.cncf.io"
resources:
- network-attachment-definitions
verbs:
- create
- delete
- get
- list
- update
- apiGroups:
- ""
- networking.k8s.io
- apps
- extensions
resources:
- networkpolicies
- services
- endpoints
- statefulsets
- daemonsets
- deployments
verbs:
- create
- delete
- update
- patch
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ovn
roleRef:
name: system:ovn
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: ovn
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
name: ovn-nb
namespace: kube-system
spec:
ports:
- name: ovn-nb
protocol: TCP
port: 6641
targetPort: 6641
type: ClusterIP
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: ovn-central
ovn-nb-leader: "true"
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: ovn-sb
namespace: kube-system
spec:
ports:
- name: ovn-sb
protocol: TCP
port: 6642
targetPort: 6642
type: ClusterIP
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: ovn-central
ovn-sb-leader: "true"
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: ovn-northd
namespace: kube-system
spec:
ports:
- name: ovn-northd
protocol: TCP
port: 6643
targetPort: 6643
type: ClusterIP
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: ovn-central
ovn-northd-leader: "true"
sessionAffinity: None
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: ovn-central
namespace: kube-system
annotations:
kubernetes.io/description: |
OVN components: northd, nb and sb.
spec:
replicas: $count
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: ovn-central
template:
metadata:
labels:
app: ovn-central
component: network
type: infra
spec:
tolerations:
- operator: Exists
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: ovn-central
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
containers:
- name: ovn-central
image: "$REGISTRY/kube-ovn:$VERSION"
imagePullPolicy: $IMAGE_PULL_POLICY
command: ["/kube-ovn/start-db.sh"]
securityContext:
capabilities:
add: ["SYS_NICE"]
env:
- name: ENABLE_SSL
value: "$ENABLE_SSL"
- name: NODE_IPS
value: $addresses
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
requests:
cpu: 300m
memory: 300Mi
limits:
cpu: 3
memory: 3Gi
volumeMounts:
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /etc/localtime
name: localtime
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- bash
- /kube-ovn/ovn-is-leader.sh
periodSeconds: 3
timeoutSeconds: 45
livenessProbe:
exec:
command:
- bash
- /kube-ovn/ovn-healthcheck.sh
initialDelaySeconds: 30
periodSeconds: 7
failureThreshold: 5
timeoutSeconds: 45
nodeSelector:
kubernetes.io/os: "linux"
kube-ovn/role: "master"
volumes:
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-sys
hostPath:
path: /sys
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-config-ovn
hostPath:
path: /etc/origin/ovn
- name: host-log-ovs
hostPath:
path: /var/log/openvswitch
- name: host-log-ovn
hostPath:
path: /var/log/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: ovs-ovn
namespace: kube-system
annotations:
kubernetes.io/description: |
This daemon set launches the openvswitch daemon.
spec:
selector:
matchLabels:
app: ovs
updateStrategy:
type: OnDelete
template:
metadata:
labels:
app: ovs
component: network
type: infra
spec:
tolerations:
- operator: Exists
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
hostPID: true
containers:
- name: openvswitch
image: "kubeovn/kube-ovn-dpdk:$DPDK_VERSION-$VERSION"
imagePullPolicy: $IMAGE_PULL_POLICY
command: ["/kube-ovn/start-ovs-dpdk.sh"]
securityContext:
runAsUser: 0
privileged: true
env:
- name: ENABLE_SSL
value: "$ENABLE_SSL"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: OVN_DB_IPS
value: $addresses
volumeMounts:
- mountPath: /lib/modules
name: host-modules
readOnly: true
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/cni/net.d
name: cni-conf
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /opt/ovs-config
name: host-config-ovs
- mountPath: /dev/hugepages
name: hugepage
- mountPath: /etc/localtime
name: localtime
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- bash
- /kube-ovn/ovs-dpdk-healthcheck.sh
periodSeconds: 5
timeoutSeconds: 45
livenessProbe:
exec:
command:
- bash
- /kube-ovn/ovs-dpdk-healthcheck.sh
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 5
timeoutSeconds: 45
resources:
requests:
cpu: $DPDK_CPU
memory: $DPDK_MEMORY
limits:
cpu: $DPDK_CPU
memory: $DPDK_MEMORY
hugepages-1Gi: 1Gi
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: host-modules
hostPath:
path: /lib/modules
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-sys
hostPath:
path: /sys
- name: cni-conf
hostPath:
path: /etc/cni/net.d
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-config-ovn
hostPath:
path: /etc/origin/ovn
- name: host-log-ovs
hostPath:
path: /var/log/openvswitch
- name: host-log-ovn
hostPath:
path: /var/log/ovn
- name: host-config-ovs
hostPath:
path: /opt/ovs-config
type: DirectoryOrCreate
- name: hugepage
emptyDir:
medium: HugePages
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
EOF
else
cat <<EOF > ovn.yaml
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: kube-ovn
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
spec:
privileged: true
allowPrivilegeEscalation: true
allowedCapabilities:
- '*'
volumes:
- '*'
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ovn-config
namespace: kube-system
data:
defaultNetworkType: '$NETWORK_TYPE'
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ovn
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.k8s.io/system-only: "true"
name: system:ovn
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- kube-ovn
- apiGroups:
- "kubeovn.io"
resources:
- vpcs
- vpcs/status
- vpc-nat-gateways
- subnets
- subnets/status
- ips
- vlans
- provider-networks
- provider-networks/status
- networks
- security-groups
- security-groups/status
- htbqoses
verbs:
- "*"
- apiGroups:
- ""
resources:
- pods
- pods/exec
- namespaces
- nodes
- configmaps
verbs:
- create
- get
- list
- watch
- patch
- update
- apiGroups:
- ""
- networking.k8s.io
- apps
- extensions
resources:
- networkpolicies
- services
- endpoints
- statefulsets
- daemonsets
- deployments
verbs:
- create
- delete
- update
- patch
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- "k8s.cni.cncf.io"
resources:
- network-attachment-definitions
verbs:
- create
- delete
- get
- list
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ovn
roleRef:
name: system:ovn
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: ovn
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
name: ovn-nb
namespace: kube-system
spec:
ports:
- name: ovn-nb
protocol: TCP
port: 6641
targetPort: 6641
type: ClusterIP
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: ovn-central
ovn-nb-leader: "true"
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: ovn-sb
namespace: kube-system
spec:
ports:
- name: ovn-sb
protocol: TCP
port: 6642
targetPort: 6642
type: ClusterIP
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: ovn-central
ovn-sb-leader: "true"
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: ovn-northd
namespace: kube-system
spec:
ports:
- name: ovn-northd
protocol: TCP
port: 6643
targetPort: 6643
type: ClusterIP
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: ovn-central
ovn-northd-leader: "true"
sessionAffinity: None
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: ovn-central
namespace: kube-system
annotations:
kubernetes.io/description: |
OVN components: northd, nb and sb.
spec:
replicas: $count
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: ovn-central
template:
metadata:
labels:
app: ovn-central
component: network
type: infra
spec:
tolerations:
- operator: Exists
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: ovn-central
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
containers:
- name: ovn-central
image: "$REGISTRY/kube-ovn:$VERSION"
imagePullPolicy: $IMAGE_PULL_POLICY
command: ["/kube-ovn/start-db.sh"]
securityContext:
capabilities:
add: ["SYS_NICE"]
env:
- name: ENABLE_SSL
value: "$ENABLE_SSL"
- name: NODE_IPS
value: $addresses
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
requests:
cpu: 300m
memory: 200Mi
limits:
cpu: 3
memory: 3Gi
volumeMounts:
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /etc/localtime
name: localtime
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- bash
- /kube-ovn/ovn-is-leader.sh
periodSeconds: 3
timeoutSeconds: 45
livenessProbe:
exec:
command:
- bash
- /kube-ovn/ovn-healthcheck.sh
initialDelaySeconds: 30
periodSeconds: 7
failureThreshold: 5
timeoutSeconds: 45
nodeSelector:
kubernetes.io/os: "linux"
kube-ovn/role: "master"
volumes:
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-sys
hostPath:
path: /sys
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-config-ovn
hostPath:
path: /etc/origin/ovn
- name: host-log-ovs
hostPath:
path: /var/log/openvswitch
- name: host-log-ovn
hostPath:
path: /var/log/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: ovs-ovn
namespace: kube-system
annotations:
kubernetes.io/description: |
This daemon set launches the openvswitch daemon.
spec:
selector:
matchLabels:
app: ovs
updateStrategy:
type: OnDelete
template:
metadata:
labels:
app: ovs
component: network
type: infra
spec:
tolerations:
- operator: Exists
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
hostPID: true
containers:
- name: openvswitch
image: "$REGISTRY/kube-ovn:$VERSION"
imagePullPolicy: $IMAGE_PULL_POLICY
command: ["/kube-ovn/start-ovs.sh"]
securityContext:
runAsUser: 0
privileged: true
env:
- name: ENABLE_SSL
value: "$ENABLE_SSL"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: HW_OFFLOAD
value: "$HW_OFFLOAD"
- name: TUNNEL_TYPE
value: "$TUNNEL_TYPE"
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: OVN_DB_IPS
value: $addresses
volumeMounts:
- mountPath: /lib/modules
name: host-modules
readOnly: true
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/cni/net.d
name: cni-conf
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /etc/localtime
name: localtime
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- bash
- -c
- LOG_ROTATE=true /kube-ovn/ovs-healthcheck.sh
periodSeconds: 5
timeoutSeconds: 45
livenessProbe:
exec:
command:
- bash
- /kube-ovn/ovs-healthcheck.sh
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 5
timeoutSeconds: 45
resources:
requests:
cpu: 200m
memory: 200Mi
limits:
cpu: 1000m
memory: 800Mi
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: host-modules
hostPath:
path: /lib/modules
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-sys
hostPath:
path: /sys
- name: cni-conf
hostPath:
path: /etc/cni/net.d
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-config-ovn
hostPath:
path: /etc/origin/ovn
- name: host-log-ovs
hostPath:
path: /var/log/openvswitch
- name: host-log-ovn
hostPath:
path: /var/log/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
EOF
fi
kubectl apply -f kube-ovn-crd.yaml
kubectl apply -f ovn.yaml
kubectl rollout status deployment/ovn-central -n kube-system
echo "-------------------------------"
echo ""
echo "[Step 3/6] Install Kube-OVN"
cat <<EOF > kube-ovn.yaml
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: kube-ovn-controller
namespace: kube-system
annotations:
kubernetes.io/description: |
kube-ovn controller
spec:
replicas: $count
selector:
matchLabels:
app: kube-ovn-controller
strategy:
rollingUpdate:
maxSurge: 0%
maxUnavailable: 100%
type: RollingUpdate
template:
metadata:
labels:
app: kube-ovn-controller
component: network
type: infra
spec:
tolerations:
- operator: Exists
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: kube-ovn-controller
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
containers:
- name: kube-ovn-controller
image: "$REGISTRY/kube-ovn:$VERSION"
imagePullPolicy: $IMAGE_PULL_POLICY
command:
- /kube-ovn/start-controller.sh
args:
- --default-cidr=$POD_CIDR
- --default-gateway=$POD_GATEWAY
- --default-gateway-check=$CHECK_GATEWAY
- --default-logical-gateway=$LOGICAL_GATEWAY
- --default-exclude-ips=$EXCLUDE_IPS
- --node-switch-cidr=$JOIN_CIDR
- --service-cluster-ip-range=$SVC_CIDR
- --network-type=$NETWORK_TYPE
- --default-interface-name=$VLAN_INTERFACE_NAME
- --default-vlan-id=$VLAN_ID
- --pod-nic-type=$POD_NIC_TYPE
- --enable-lb=$ENABLE_LB
- --enable-np=$ENABLE_NP
- --enable-external-vpc=$ENABLE_EXTERNAL_VPC
- --multicast-privileged=$MULTICAST_PRIVILEGED
- --logtostderr=false
- --alsologtostderr=true
- --log_file=/var/log/kube-ovn/kube-ovn-controller.log
env:
- name: ENABLE_SSL
value: "$ENABLE_SSL"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: KUBE_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: OVN_DB_IPS
value: $addresses
volumeMounts:
- mountPath: /etc/localtime
name: localtime
- mountPath: /var/log/kube-ovn
name: kube-ovn-log
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- bash
- /kube-ovn/kube-ovn-controller-healthcheck.sh
periodSeconds: 3
timeoutSeconds: 45
livenessProbe:
exec:
command:
- bash
- /kube-ovn/kube-ovn-controller-healthcheck.sh
initialDelaySeconds: 300
periodSeconds: 7
failureThreshold: 5
timeoutSeconds: 45
resources:
requests:
cpu: 200m
memory: 200Mi
limits:
cpu: 1000m
memory: 1Gi
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-log
hostPath:
path: /var/log/kube-ovn
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: kube-ovn-cni
namespace: kube-system
annotations:
kubernetes.io/description: |
This daemon set launches the kube-ovn cni daemon.
spec:
selector:
matchLabels:
app: kube-ovn-cni
template:
metadata:
labels:
app: kube-ovn-cni
component: network
type: infra
spec:
tolerations:
- operator: Exists
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
hostPID: true
initContainers:
- name: install-cni
image: "$REGISTRY/kube-ovn:$VERSION"
imagePullPolicy: $IMAGE_PULL_POLICY
command: ["/kube-ovn/install-cni.sh"]
securityContext:
runAsUser: 0
privileged: true
volumeMounts:
- mountPath: /opt/cni/bin
name: cni-bin
containers:
- name: cni-server
image: "$REGISTRY/kube-ovn:$VERSION"
imagePullPolicy: $IMAGE_PULL_POLICY
command:
- bash
- /kube-ovn/start-cniserver.sh
args:
- --enable-mirror=$ENABLE_MIRROR
- --encap-checksum=true
- --service-cluster-ip-range=$SVC_CIDR
- --iface=${IFACE}
- --network-type=$NETWORK_TYPE
- --default-interface-name=$VLAN_INTERFACE_NAME
- --logtostderr=false
- --alsologtostderr=true
- --log_file=/var/log/kube-ovn/kube-ovn-cni.log
securityContext:
runAsUser: 0
privileged: true
env:
- name: ENABLE_SSL
value: "$ENABLE_SSL"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /etc/openvswitch
name: systemid
- mountPath: /etc/cni/net.d
name: cni-conf
- mountPath: /run/openvswitch
name: host-run-ovs
- mountPath: /run/ovn
name: host-run-ovn
- mountPath: /var/run/netns
name: host-ns
mountPropagation: HostToContainer
- mountPath: /var/log/kube-ovn
name: kube-ovn-log
- mountPath: /etc/localtime
name: localtime
readinessProbe:
exec:
command:
- nc
- -z
- -w3
- 127.0.0.1
- "10665"
periodSeconds: 3
livenessProbe:
exec:
command:
- nc
- -z
- -w3
- 127.0.0.1
- "10665"
initialDelaySeconds: 30
periodSeconds: 7
failureThreshold: 5
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 1000m
memory: 1Gi
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: systemid
hostPath:
path: /etc/origin/openvswitch
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: cni-conf
hostPath:
path: $CNI_CONF_DIR
- name: cni-bin
hostPath:
path: $CNI_BIN_DIR
- name: host-ns
hostPath:
path: /var/run/netns
- name: kube-ovn-log
hostPath:
path: /var/log/kube-ovn
- name: localtime
hostPath:
path: /etc/localtime
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: kube-ovn-pinger
namespace: kube-system
annotations:
kubernetes.io/description: |
This daemon set launches the openvswitch daemon.
spec:
selector:
matchLabels:
app: kube-ovn-pinger
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: kube-ovn-pinger
component: network
type: infra
spec:
serviceAccountName: ovn
hostPID: true
containers:
- name: pinger
image: "$REGISTRY/kube-ovn:$VERSION"
command:
- /kube-ovn/kube-ovn-pinger
args:
- --external-address=$PINGER_EXTERNAL_ADDRESS
- --external-dns=$PINGER_EXTERNAL_DOMAIN
- --logtostderr=false
- --alsologtostderr=true
- --log_file=/var/log/kube-ovn/kube-ovn-pinger.log
imagePullPolicy: $IMAGE_PULL_POLICY
securityContext:
runAsUser: 0
privileged: false
env:
- name: ENABLE_SSL
value: "$ENABLE_SSL"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_IPS
valueFrom:
fieldRef:
fieldPath: status.podIPs
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /lib/modules
name: host-modules
readOnly: true
- mountPath: /run/openvswitch
name: host-run-ovs
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /var/log/kube-ovn
name: kube-ovn-log
- mountPath: /etc/localtime
name: localtime
- mountPath: /var/run/tls
name: kube-ovn-tls
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 200m
memory: 400Mi
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: host-modules
hostPath:
path: /lib/modules
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-sys
hostPath:
path: /sys
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-log-ovs
hostPath:
path: /var/log/openvswitch
- name: kube-ovn-log
hostPath:
path: /var/log/kube-ovn
- name: host-log-ovn
hostPath:
path: /var/log/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: kube-ovn-monitor
namespace: kube-system
annotations:
kubernetes.io/description: |
Metrics for OVN components: northd, nb and sb.
spec:
replicas: $count
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: kube-ovn-monitor
template:
metadata:
labels:
app: kube-ovn-monitor
component: network
type: infra
spec:
tolerations:
- operator: Exists
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: kube-ovn-monitor
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
containers:
- name: kube-ovn-monitor
image: "$REGISTRY/kube-ovn:$VERSION"
imagePullPolicy: $IMAGE_PULL_POLICY
command: ["/kube-ovn/start-ovn-monitor.sh"]
securityContext:
runAsUser: 0
privileged: false
env:
- name: ENABLE_SSL
value: "$ENABLE_SSL"
- name: NODE_IPS
value: $addresses
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
resources:
requests:
cpu: 200m
memory: 200Mi
limits:
cpu: 200m
memory: 200Mi
volumeMounts:
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /etc/localtime
name: localtime
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- cat
- /var/run/ovn/ovnnb_db.pid
periodSeconds: 3
timeoutSeconds: 45
livenessProbe:
exec:
command:
- cat
- /var/run/ovn/ovn-nbctl.pid
initialDelaySeconds: 30
periodSeconds: 10
failureThreshold: 5
timeoutSeconds: 45
nodeSelector:
kubernetes.io/os: "linux"
kube-ovn/role: "master"
volumes:
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-sys
hostPath:
path: /sys
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-config-ovn
hostPath:
path: /etc/origin/ovn
- name: host-log-ovs
hostPath:
path: /var/log/openvswitch
- name: host-log-ovn
hostPath:
path: /var/log/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
---
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-monitor
namespace: kube-system
labels:
app: kube-ovn-monitor
spec:
ports:
- name: metrics
port: 10661
type: ClusterIP
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: kube-ovn-monitor
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-pinger
namespace: kube-system
labels:
app: kube-ovn-pinger
spec:
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: kube-ovn-pinger
ports:
- port: 8080
name: metrics
---
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-controller
namespace: kube-system
labels:
app: kube-ovn-controller
spec:
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: kube-ovn-controller
ports:
- port: 10660
name: metrics
---
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-cni
namespace: kube-system
labels:
app: kube-ovn-cni
spec:
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: kube-ovn-cni
ports:
- port: 10665
name: metrics
EOF
kubectl apply -f kube-ovn.yaml
kubectl rollout status deployment/kube-ovn-controller -n kube-system
kubectl rollout status daemonset/kube-ovn-cni -n kube-system
echo "-------------------------------"
echo ""
echo "[Step 4/6] Delete pod that not in host network mode"
for ns in $(kubectl get ns --no-headers -o custom-columns=NAME:.metadata.name); do
for pod in $(kubectl get pod --no-headers -n "$ns" --field-selector spec.restartPolicy=Always -o custom-columns=NAME:.metadata.name,HOST:spec.hostNetwork | awk '{if ($2!="true") print $1}'); do
kubectl delete pod "$pod" -n "$ns" --ignore-not-found
done
done
kubectl rollout status daemonset/kube-ovn-pinger -n kube-system
kubectl rollout status deployment/coredns -n kube-system
echo "-------------------------------"
echo ""
echo "[Step 5/6] Install kubectl plugin"
mkdir -p /usr/local/bin
cat <<\EOF > /usr/local/bin/kubectl-ko
#!/bin/bash
set -euo pipefail
KUBE_OVN_NS=kube-system
OVN_NB_POD=
OVN_SB_POD=
showHelp(){
echo "kubectl ko {subcommand} [option...]"
echo "Available Subcommands:"
echo " [nb|sb] [status|kick|backup] ovn-db operations show cluster status, kick stale server or backup database"
echo " nbctl [ovn-nbctl options ...] invoke ovn-nbctl"
echo " sbctl [ovn-sbctl options ...] invoke ovn-sbctl"
echo " vsctl {nodeName} [ovs-vsctl options ...] invoke ovs-vsctl on the specified node"
echo " ofctl {nodeName} [ovs-ofctl options ...] invoke ovs-ofctl on the specified node"
echo " dpctl {nodeName} [ovs-dpctl options ...] invoke ovs-dpctl on the specified node"
echo " appctl {nodeName} [ovs-appctl options ...] invoke ovs-appctl on the specified node"
echo " tcpdump {namespace/podname} [tcpdump options ...] capture pod traffic"
echo " trace {namespace/podname} {target ip address} {icmp|tcp|udp} [target tcp or udp port] trace ovn microflow of specific packet"
echo " diagnose {all|node} [nodename] diagnose connectivity of all nodes or a specific node"
echo " reload restart all kube-ovn components"
}
tcpdump(){
namespacedPod="$1"; shift
namespace=$(echo "$namespacedPod" | cut -d "/" -f1)
podName=$(echo "$namespacedPod" | cut -d "/" -f2)
if [ "$podName" = "$namespacedPod" ]; then
namespace="default"
fi
nodeName=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.nodeName})
hostNetwork=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.hostNetwork})
if [ -z "$nodeName" ]; then
echo "Pod $namespacedPod not exists on any node"
exit 1
fi
ovnCni=$(kubectl get pod -n $KUBE_OVN_NS -o wide| grep kube-ovn-cni| grep " $nodeName " | awk '{print $1}')
if [ -z "$ovnCni" ]; then
echo "kube-ovn-cni not exist on node $nodeName"
exit 1
fi
if [ "$hostNetwork" = "true" ]; then
set -x
kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- tcpdump -nn "$@"
else
nicName=$(kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- ovs-vsctl --data=bare --no-heading --columns=name find interface external-ids:iface-id="$podName"."$namespace" | tr -d '\r')
if [ -z "$nicName" ]; then
echo "nic doesn't exist on node $nodeName"
exit 1
fi
podNicType=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/pod_nic_type})
podNetNs=$(kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- ovs-vsctl --data=bare --no-heading get interface "$nicName" external-ids:pod_netns | tr -d '\r' | sed -e 's/^"//' -e 's/"$//')
set -x
if [ "$podNicType" = "internal-port" ]; then
kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- nsenter --net="$podNetNs" tcpdump -nn -i "$nicName" "$@"
else
kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- nsenter --net="$podNetNs" tcpdump -nn -i eth0 "$@"
fi
fi
}
trace(){
namespacedPod="$1"
namespace=$(echo "$1" | cut -d "/" -f1)
podName=$(echo "$1" | cut -d "/" -f2)
if [ "$podName" = "$1" ]; then
namespace="default"
fi
dst="$2"
if [ -z "$dst" ]; then
echo "need a target ip address"
exit 1
fi
af="4"
nw="nw"
proto=""
if [[ "$dst" =~ .*:.* ]]; then
af="6"
nw="ipv6"
proto="6"
fi
podIPs=($(kubectl get pod "$podName" -n "$namespace" -o jsonpath="{.status.podIPs[*].ip}"))
mac=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/mac_address})
ls=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/logical_switch})
hostNetwork=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.hostNetwork})
nodeName=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.nodeName})
if [ "$hostNetwork" = "true" ]; then
echo "Can not trace host network pod"
exit 1
fi
if [ -z "$ls" ]; then
echo "pod address not ready"
exit 1
fi
podIP=""
for ip in ${podIPs[@]}; do
if [ "$af" = "4" ]; then
if [[ ! "$ip" =~ .*:.* ]]; then
podIP=$ip
break
fi
elif [[ "$ip" =~ .*:.* ]]; then
podIP=$ip
break
fi
done
if [ -z "$podIP" ]; then
echo "Pod has no IPv$af address"
exit 1
fi
gwMac=""
vlan=$(kubectl get subnet "$ls" -o jsonpath={.spec.vlan})
logicalGateway=$(kubectl get subnet "$ls" -o jsonpath={.spec.logicalGateway})
if [ ! -z "$vlan" -a "$logicalGateway" != "true" ]; then
gateway=$(kubectl get subnet "$ls" -o jsonpath={.spec.gateway})
if [[ "$gateway" =~ .*,.* ]]; then
if [ "$af" = "4" ]; then
gateway=${gateway%%,*}
else
gateway=${gateway##*,}
fi
fi
ovnCni=$(kubectl get pod -n $KUBE_OVN_NS -o wide | grep -w kube-ovn-cni | grep " $nodeName " | awk '{print $1}')
if [ -z "$ovnCni" ]; then
echo "No kube-ovn-cni Pod running on node $nodeName"
exit 1
fi
nicName=$(kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- ovs-vsctl --data=bare --no-heading --columns=name find interface external-ids:iface-id="$podName"."$namespace" | tr -d '\r')
if [ -z "$nicName" ]; then
echo "nic doesn't exist on node $nodeName"
exit 1
fi
podNicType=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/pod_nic_type})
podNetNs=$(kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- ovs-vsctl --data=bare --no-heading get interface "$nicName" external-ids:pod_netns | tr -d '\r' | sed -e 's/^"//' -e 's/"$//')
if [ "$podNicType" != "internal-port" ]; then
nicName="eth0"
fi
if [[ "$gateway" =~ .*:.* ]]; then
cmd="ndisc6 -q $gateway $nicName"
output=$(kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- nsenter --net="$podNetNs" ndisc6 -q "$gateway" "$nicName")
else
cmd="arping -c3 -C1 -i1 -I $nicName $gateway"
output=$(kubectl exec "$ovnCni" -n $KUBE_OVN_NS -- nsenter --net="$podNetNs" arping -c3 -C1 -i1 -I "$nicName" "$gateway")
fi
if [ $? -ne 0 ]; then
echo "failed to run '$cmd' in Pod's netns"
exit 1
fi
gwMac=$(echo "$output" | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}')
else
lr=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/logical_router})
if [ -z "$lr" ]; then
lr=$(kubectl get subnet "$ls" -o jsonpath={.spec.vpc})
fi
gwMac=$(kubectl exec $OVN_NB_POD -n $KUBE_OVN_NS -c ovn-central -- ovn-nbctl --data=bare --no-heading --columns=mac find logical_router_port name="$lr"-"$ls" | tr -d '\r')
fi
if [ -z "$gwMac" ]; then
echo "get gw mac failed"
exit 1
fi
type="$3"
case $type in
icmp)
set -x
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovn-trace --ct=new "$ls" "inport == \"$podName.$namespace\" && ip.ttl == 64 && icmp && eth.src == $mac && ip$af.src == $podIP && eth.dst == $gwMac && ip$af.dst == $dst"
;;
tcp|udp)
set -x
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovn-trace --ct=new "$ls" "inport == \"$podName.$namespace\" && ip.ttl == 64 && eth.src == $mac && ip$af.src == $podIP && eth.dst == $gwMac && ip$af.dst == $dst && $type.src == 10000 && $type.dst == $4"
;;
*)
echo "type $type not supported"
echo "kubectl ko trace {namespace/podname} {target ip address} {icmp|tcp|udp} [target tcp or udp port]"
exit 1
;;
esac
set +x
echo "--------"
echo "Start OVS Tracing"
echo ""
echo ""
ovsPod=$(kubectl get pod -n $KUBE_OVN_NS -o wide | grep " $nodeName " | grep ovs-ovn | awk '{print $1}')
if [ -z "$ovsPod" ]; then
echo "ovs pod doesn't exist on node $nodeName"
exit 1
fi
inPort=$(kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-vsctl --format=csv --data=bare --no-heading --columns=ofport find interface external_id:iface-id="$podName"."$namespace")
case $type in
icmp)
set -x
kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-appctl ofproto/trace br-int "in_port=$inPort,icmp$proto,${nw}_src=$podIP,${nw}_dst=$dst,dl_src=$mac,dl_dst=$gwMac"
;;
tcp|udp)
set -x
kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-appctl ofproto/trace br-int "in_port=$inPort,$type$proto,${nw}_src=$podIP,${nw}_dst=$dst,dl_src=$mac,dl_dst=$gwMac,${type}_src=1000,${type}_dst=$4"
;;
*)
echo "type $type not supported"
echo "kubectl ko trace {namespace/podname} {target ip address} {icmp|tcp|udp} [target tcp or udp port]"
exit 1
;;
esac
}
xxctl(){
subcommand="$1"; shift
nodeName="$1"; shift
kubectl get no "$nodeName" > /dev/null
ovsPod=$(kubectl get pod -n $KUBE_OVN_NS -o wide | grep " $nodeName " | grep ovs-ovn | awk '{print $1}')
if [ -z "$ovsPod" ]; then
echo "ovs pod doesn't exist on node $nodeName"
exit 1
fi
kubectl exec "$ovsPod" -n $KUBE_OVN_NS -- ovs-$subcommand "$@"
}
checkLeader(){
component="$1"; shift
count=$(kubectl get ep ovn-$component -n $KUBE_OVN_NS -o yaml | grep ip | wc -l)
if [ $count -eq 0 ]; then
echo "no ovn-$component exists !!"
exit 1
fi
if [ $count -gt 1 ]; then
echo "ovn-$component has more than one leader !!"
exit 1
fi
echo "ovn-$component leader check ok"
}
diagnose(){
kubectl get crd vpcs.kubeovn.io
kubectl get crd vpc-nat-gateways.kubeovn.io
kubectl get crd subnets.kubeovn.io
kubectl get crd ips.kubeovn.io
kubectl get crd vlans.kubeovn.io
kubectl get crd provider-networks.kubeovn.io
kubectl get svc kube-dns -n kube-system
kubectl get svc kubernetes -n default
kubectl get sa -n kube-system ovn
kubectl get clusterrole system:ovn
kubectl get clusterrolebinding ovn
kubectl get no -o wide
kubectl ko nbctl show
kubectl ko nbctl lr-policy-list ovn-cluster
kubectl ko nbctl lr-route-list ovn-cluster
kubectl ko nbctl ls-lb-list ovn-default
kubectl ko nbctl list address_set
kubectl ko nbctl list acl
kubectl ko sbctl show
checkKubeProxy
checkDeployment ovn-central
checkDeployment kube-ovn-controller
checkDaemonSet kube-ovn-cni
checkDaemonSet ovs-ovn
checkDeployment coredns
checkLeader nb
checkLeader sb
checkLeader northd
type="$1"
case $type in
all)
echo "### kube-ovn-controller recent log"
set +e
kubectl logs -n $KUBE_OVN_NS -l app=kube-ovn-controller --tail=100 | grep E$(date +%m%d)
set -e
echo ""
pingers=$(kubectl -n $KUBE_OVN_NS get po --no-headers -o custom-columns=NAME:.metadata.name -l app=kube-ovn-pinger)
for pinger in $pingers
do
nodeName=$(kubectl get pod "$pinger" -n "$KUBE_OVN_NS" -o jsonpath={.spec.nodeName})
echo "### start to diagnose node $nodeName"
echo "#### ovn-controller log:"
kubectl exec -n $KUBE_OVN_NS "$pinger" -- tail /var/log/ovn/ovn-controller.log
echo ""
echo "#### ovs-vswitchd log:"
kubectl exec -n $KUBE_OVN_NS "$pinger" -- tail /var/log/openvswitch/ovs-vswitchd.log
echo ""
echo "#### ovs-vsctl show results:"
kubectl exec -n $KUBE_OVN_NS "$pinger" -- ovs-vsctl show
echo ""
echo "#### pinger diagnose results:"
kubectl exec -n $KUBE_OVN_NS "$pinger" -- /kube-ovn/kube-ovn-pinger --mode=job
echo "### finish diagnose node $nodeName"
echo ""
done
;;
node)
nodeName="$2"
kubectl get no "$nodeName" > /dev/null
pinger=$(kubectl -n $KUBE_OVN_NS get po -l app=kube-ovn-pinger -o 'jsonpath={.items[?(@.spec.nodeName=="'$nodeName'")].metadata.name}')
if [ ! -n "$pinger" ]; then
echo "Error: No kube-ovn-pinger running on node $nodeName"
exit 1
fi
echo "### start to diagnose node $nodeName"
echo "#### ovn-controller log:"
kubectl exec -n $KUBE_OVN_NS "$pinger" -- tail /var/log/ovn/ovn-controller.log
echo ""
echo "#### ovs-vswitchd log:"
kubectl exec -n $KUBE_OVN_NS "$pinger" -- tail /var/log/openvswitch/ovs-vswitchd.log
echo ""
kubectl exec -n $KUBE_OVN_NS "$pinger" -- /kube-ovn/kube-ovn-pinger --mode=job
echo "### finish diagnose node $nodeName"
echo ""
;;
*)
echo "type $type not supported"
echo "kubectl ko diagnose {all|node} [nodename]"
;;
esac
}
getOvnCentralPod(){
NB_POD=$(kubectl get pod -n $KUBE_OVN_NS -l ovn-nb-leader=true | grep ovn-central | head -n 1 | awk '{print $1}')
if [ -z "$NB_POD" ]; then
echo "nb leader not exists"
exit 1
fi
OVN_NB_POD=$NB_POD
SB_POD=$(kubectl get pod -n $KUBE_OVN_NS -l ovn-sb-leader=true | grep ovn-central | head -n 1 | awk '{print $1}')
if [ -z "$SB_POD" ]; then
echo "nb leader not exists"
exit 1
fi
OVN_SB_POD=$SB_POD
}
checkDaemonSet(){
name="$1"
currentScheduled=$(kubectl get ds -n $KUBE_OVN_NS "$name" -o jsonpath={.status.currentNumberScheduled})
desiredScheduled=$(kubectl get ds -n $KUBE_OVN_NS "$name" -o jsonpath={.status.desiredNumberScheduled})
available=$(kubectl get ds -n $KUBE_OVN_NS "$name" -o jsonpath={.status.numberAvailable})
ready=$(kubectl get ds -n $KUBE_OVN_NS "$name" -o jsonpath={.status.numberReady})
if [ "$currentScheduled" = "$desiredScheduled" ] && [ "$desiredScheduled" = "$available" ] && [ "$available" = "$ready" ]; then
echo "ds $name ready"
else
echo "Error ds $name not ready"
exit 1
fi
}
checkDeployment(){
name="$1"
ready=$(kubectl get deployment -n $KUBE_OVN_NS "$name" -o jsonpath={.status.readyReplicas})
updated=$(kubectl get deployment -n $KUBE_OVN_NS "$name" -o jsonpath={.status.updatedReplicas})
desire=$(kubectl get deployment -n $KUBE_OVN_NS "$name" -o jsonpath={.status.replicas})
available=$(kubectl get deployment -n $KUBE_OVN_NS "$name" -o jsonpath={.status.availableReplicas})
if [ "$ready" = "$updated" ] && [ "$updated" = "$desire" ] && [ "$desire" = "$available" ]; then
echo "deployment $name ready"
else
echo "Error deployment $name not ready"
exit 1
fi
}
checkKubeProxy(){
dsMode=`kubectl get ds -n kube-system | grep kube-proxy || true`
if [ -z "$dsMode" ]; then
nodeIps=`kubectl get node -o wide | grep -v "INTERNAL-IP" | awk '{print $6}'`
for node in $nodeIps
do
healthResult=`curl -g -6 -sL -w %{http_code} http://[$node]:10256/healthz -o /dev/null | grep -v 200 || true`
if [ -n "$healthResult" ]; then
echo "$node kube-proxy's health check failed"
exit 1
fi
done
else
checkDaemonSet kube-proxy
fi
echo "kube-proxy ready"
}
dbtool(){
suffix=$(date +%m%d%H%M%s)
component="$1"; shift
action="$1"; shift
case $component in
nb)
case $action in
status)
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/status OVN_Northbound
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovs-appctl -t /var/run/ovn/ovnnb_db.ctl ovsdb-server/get-db-storage-status OVN_Northbound
;;
kick)
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovs-appctl -t /var/run/ovn/ovnnb_db.ctl cluster/kick OVN_Northbound "$1"
;;
backup)
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovsdb-tool cluster-to-standalone /etc/ovn/ovnnb_db.$suffix.backup /etc/ovn/ovnnb_db.db
kubectl cp $KUBE_OVN_NS/$OVN_NB_POD:/etc/ovn/ovnnb_db.$suffix.backup $(pwd)/ovnnb_db.$suffix.backup
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -c ovn-central -- rm -f /etc/ovn/ovnnb_db.$suffix.backup
echo "backup $component to $(pwd)/ovnnb_db.$suffix.backup"
;;
*)
echo "unknown action $action"
esac
;;
sb)
case $action in
status)
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/status OVN_Southbound
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovs-appctl -t /var/run/ovn/ovnsb_db.ctl ovsdb-server/get-db-storage-status OVN_Southbound
;;
kick)
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovs-appctl -t /var/run/ovn/ovnsb_db.ctl cluster/kick OVN_Southbound "$1"
;;
backup)
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovsdb-tool cluster-to-standalone /etc/ovn/ovnsb_db.$suffix.backup /etc/ovn/ovnsb_db.db
kubectl cp $KUBE_OVN_NS/$OVN_SB_POD:/etc/ovn/ovnsb_db.$suffix.backup $(pwd)/ovnsb_db.$suffix.backup
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- rm -f /etc/ovn/ovnsb_db.$suffix.backup
echo "backup $component to $(pwd)/ovnsb_db.$suffix.backup"
;;
*)
echo "unknown action $action"
esac
;;
*)
echo "unknown subcommand $component"
esac
}
reload(){
kubectl delete pod -n kube-system -l app=ovn-central
kubectl rollout status deployment/ovn-central -n kube-system
kubectl delete pod -n kube-system -l app=ovs
kubectl delete pod -n kube-system -l app=kube-ovn-controller
kubectl rollout status deployment/kube-ovn-controller -n kube-system
kubectl delete pod -n kube-system -l app=kube-ovn-cni
kubectl rollout status daemonset/kube-ovn-cni -n kube-system
kubectl delete pod -n kube-system -l app=kube-ovn-pinger
kubectl rollout status daemonset/kube-ovn-pinger -n kube-system
kubectl delete pod -n kube-system -l app=kube-ovn-monitor
kubectl rollout status deployment/kube-ovn-monitor -n kube-system
}
if [ $# -lt 1 ]; then
showHelp
exit 0
else
subcommand="$1"; shift
fi
getOvnCentralPod
case $subcommand in
nbctl)
kubectl exec "$OVN_NB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovn-nbctl "$@"
;;
sbctl)
kubectl exec "$OVN_SB_POD" -n $KUBE_OVN_NS -c ovn-central -- ovn-sbctl "$@"
;;
vsctl|ofctl|dpctl|appctl)
xxctl "$subcommand" "$@"
;;
nb|sb)
dbtool "$subcommand" "$@"
;;
tcpdump)
tcpdump "$@"
;;
trace)
trace "$@"
;;
diagnose)
diagnose "$@"
;;
reload)
reload
;;
*)
showHelp
;;
esac
EOF
chmod +x /usr/local/bin/kubectl-ko
echo "-------------------------------"
echo ""
if ! sh -c "echo \":$PATH:\" | grep -q \":/usr/local/bin:\""; then
echo "Tips:Please join the /usr/local/bin to your PATH. Temporarily, we do it for this execution."
export PATH=/usr/local/bin:$PATH
echo "-------------------------------"
echo ""
fi
echo "[Step 6/6] Run network diagnose"
kubectl ko diagnose all
echo "-------------------------------"
echo "
,,,,
,::,
,,::,,,,
,,,,,::::::::::::,,,,,
,,,::::::::::::::::::::::,,,
,,::::::::::::::::::::::::::::,,
,,::::::::::::::::::::::::::::::::,,
,::::::::::::::::::::::::::::::::::::,
,:::::::::::::,, ,,:::::,,,::::::::::,
,,:::::::::::::, ,::, ,:::::::::,
,:::::::::::::, :x, ,:: :, ,:::::::::,
,:::::::::::::::, ,,, ,::, ,, ,::::::::::,
,:::::::::::::::::,,,,,,:::::,,,,::::::::::::, ,:, ,:, ,xx, ,:::::, ,:, ,:: :::, ,x
,::::::::::::::::::::::::::::::::::::::::::::, :x: ,:xx: , :xx, :xxxxxxxxx, :xx, ,xx:,xxxx, :x
,::::::::::::::::::::::::::::::::::::::::::::, :xxxxx:, ,xx, :x: :xxx:x::, ::xxxx: :xx:, ,:xxx :xx, ,xx: ,xxxxx:, :x
,::::::::::::::::::::::::::::::::::::::::::::, :xxxxx, :xx, :x: :xxx,,:xx,:xx:,:xx, ,,,,,,,,,xxx, ,xx: :xx:xx: ,xxx,:xx::x
,::::::,,::::::::,,::::::::,,:::::::,,,::::::, :x:,xxx: ,xx, :xx :xx: ,xx,xxxxxx:, ,xxxxxxx:,xxx:, ,xxx, :xxx: ,xxx, :xxxx
,::::, ,::::, ,:::::, ,,::::, ,::::, :x: ,:xx,,:xx::xxxx,,xxx::xx: :xx::::x: ,,,,,, ,xxxxxxxxx, ,xx: ,xxx, :xxx
,::::, ,::::, ,::::, ,::::, ,::::, ,:, ,:, ,,::,,:, ,::::,, ,:::::, ,,:::::, ,, :x: ,::
,::::, ,::::, ,::::, ,::::, ,::::,
,,,,, ,::::, ,::::, ,::::, ,:::, ,,,,,,,,,,,,,
,::::, ,::::, ,::::, ,:::, ,,,:::::::::::::::,
,::::, ,::::, ,::::, ,::::, ,,,,:::::::::,,,,,,,:::,
,::::, ,::::, ,::::, ,::::::::::::,,,,,
,,,, ,::::, ,,,, ,,,::::,,,,
,::::,
,,::,
"
echo "Thanks for choosing Kube-OVN!
For more advanced features, please read https://github.com/kubeovn/kube-ovn#documents
If you have any question, please file an issue https://github.com/kubeovn/kube-ovn/issues/new/choose"
|
import React from 'react';
import { connect } from 'dva';
import { Tree } from 'antd';
import PropTypes from 'prop-types';
const TreeNode = Tree.TreeNode;
const ResTree = ({ restree, onDrop } ) => {
const loop = data => data.map((item) => {
if (item.children && item.children.length) {
return <TreeNode key={item.key} title={item.title}>{loop(item.children)}</TreeNode>;
}
return <TreeNode showIcon={true} key={item.key} title={item.title} />;
});
const onDragEnter = (info) => {
debugger;
console.log(info)
}
return (
<Tree
className="draggable-tree"
draggable
onDrop={onDrop}
onDragEnter={onDragEnter}
>
{loop(restree)}
</Tree>
);
}
ResTree.propTypes = {
restree: PropTypes.array.isRequired,
onDrop: PropTypes.func.isRequired
}
export default connect(({ restree }) => ({
restree
}))( ResTree );
|
using System;
using System.Collections.Generic;
public class BitmapPixelChanges
{
private Dictionary<int, Color> pixelChanges;
public BitmapPixelChanges(int[] coordinates, Color[] colors)
{
if (coordinates.Length != colors.Length)
{
throw new ArgumentException("Coordinate and color arrays must have the same length.");
}
pixelChanges = new Dictionary<int, Color>();
for (int i = 0; i < coordinates.Length; i++)
{
pixelChanges.Add(coordinates[i], colors[i]);
}
}
public Dictionary<int, Color> ProcessChanges()
{
return pixelChanges;
}
} |
# This scripts deploys the yelb application on a single cloud instance.
# It is enough to open port 80 on this instance and connect to its IP/FQDN.
# Note some of these scripts require you to input the proper endpoints.
# However these scripts have a default to "localhost" should no variable be set, so they by default works on a single instance deployment.
#!/bin/bash
curl https://raw.githubusercontent.com/mreferre/yelb/master/deployments/platformdeployment/Linux/redis-server.sh | bash
curl https://raw.githubusercontent.com/mreferre/yelb/master/deployments/platformdeployment/Linux/yelb-db.sh | bash
curl https://raw.githubusercontent.com/mreferre/yelb/master/deployments/platformdeployment/Linux/yelb-appserver.sh | bash
export YELB_APPSERVER_ENDPOINT=$(curl http://169.254.169.254/latest/meta-data/public-hostname)
curl https://raw.githubusercontent.com/mreferre/yelb/master/deployments/platformdeployment/Linux/yelb-ui.sh | bash
|
<gh_stars>1-10
// 226. 翻转二叉树
// https://leetcode-cn.com/problems/invert-binary-tree/
package question226
import (
"testing"
)
func Test_invertTree(t *testing.T) {
t1 := &TreeNode{
Val: 4,
Left: &TreeNode{
Val: 2,
Left: &TreeNode{
Val: 1,
},
Right: &TreeNode{
Val: 3,
},
},
Right: &TreeNode{
Val: 7,
Left: &TreeNode{
Val: 6,
},
Right: &TreeNode{
Val: 9,
},
},
}
var printTree func(root *TreeNode)
printTree = func(root *TreeNode) {
if root == nil {
return
}
t.Log(root.Val)
printTree(root.Left)
printTree(root.Right)
}
printTree(invertTree(t1))
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func invertTree(root *TreeNode) *TreeNode {
if root == nil {
return nil
}
invertTree(root.Left)
invertTree(root.Right)
root.Left, root.Right = root.Right, root.Left
return root
}
|
/**
* Created by <EMAIL> on 2019/3/14.
*/
import "./style.less";
import React,{PureComponent} from 'react';
export default class Steps extends PureComponent{
constructor(props){
super(props);
}
render(){
const {total,idx} = this.props;
return (
<div className="steps-wrapper">
{
[...Array(total)].map((item,key)=>{
return <div key={key} className={idx === key ? "active" : ""}/>
})
}
</div>
)
}
}
|
require 'rails_helper'
RSpec.describe "inventory_adjustments/show", type: :view do
before(:each) do
@inventory_adjustment = assign(:inventory_adjustment, InventoryAdjustment.create!(
:inventory_tally => nil,
:purchase => nil,
:box_item => nil,
:total_cost => 2,
:adjustment_quantity => 5
))
end
it "renders attributes in <p>" do
render
expect(rendered).to match(//)
expect(rendered).to match(//)
expect(rendered).to match(//)
expect(rendered).to match(/2/)
expect(rendered).to match(/5/)
end
end
|
#!/bin/sh
##
## Copyright (c) 2014 The WebM project authors. All Rights Reserved.
##
## Use of this source code is governed by a BSD-style license
## that can be found in the LICENSE file in the root of the source
## tree. An additional intellectual property rights grant can be found
## in the file PATENTS. All contributing project authors may
## be found in the AUTHORS file in the root of the source tree.
##
## This file tests the libvpx postproc example code. To add new tests to this
## file, do the following:
## 1. Write a shell function (this is your test).
## 2. Add the function to postproc_tests (on a new line).
##
. $(dirname $0)/tools_common.sh
# Environment check: Make sure input is available:
# $VP8_IVF_FILE and $VP9_IVF_FILE are required.
postproc_verify_environment() {
if [ ! -e "${VP8_IVF_FILE}" ] || [ ! -e "${VP9_IVF_FILE}" ]; then
echo "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
return 1
fi
}
# Runs postproc using $1 as input file. $2 is the codec name, and is used
# solely to name the output file.
postproc() {
local decoder="${LIBVPX_BIN_PATH}/postproc${VPX_TEST_EXE_SUFFIX}"
local input_file="$1"
local codec="$2"
local output_file="${VPX_TEST_OUTPUT_DIR}/postproc_${codec}.raw"
if [ ! -x "${decoder}" ]; then
elog "${decoder} does not exist or is not executable."
return 1
fi
eval "${VPX_TEST_PREFIX}" "${decoder}" "${input_file}" "${output_file}" \
${devnull} || return 1
[ -e "${output_file}" ] || return 1
}
postproc_vp8() {
if [ "$(vp8_decode_available)" = "yes" ]; then
postproc "${VP8_IVF_FILE}" vp8 || return 1
fi
}
postproc_vp9() {
if [ "$(vpx_config_option_enabled CONFIG_VP9_POSTPROC)" = "yes" ]; then
if [ "$(vp9_decode_available)" = "yes" ]; then
postproc "${VP9_IVF_FILE}" vp9 || return 1
fi
fi
}
postproc_tests="postproc_vp8
postproc_vp9"
run_tests postproc_verify_environment "${postproc_tests}"
|
list = [x for x in range(1,101) if x % 3 == 0]
print(list) |
<gh_stars>1-10
/* Author: <NAME>
* Created: 01-01-2021 13:02:07
*/
#include <stdio.h>
int main()
{
printf("Hello World\n");
return 0;
} |
def to_palindrome(string):
if len(string) == 0:
return string
mid = len(string) // 2
left = string[:mid]
right = string[mid+1:]
# reverse the right string
right = right[::-1]
return left + string[mid] + right |
<gh_stars>1-10
import { Container } from "./container";
import { Measure } from "../measure";
/**
* Class used to create a 2D stack panel container
*/
export declare class StackPanel extends Container {
name?: string | undefined;
private _isVertical;
private _manualWidth;
private _manualHeight;
private _doNotTrackManualChanges;
/**
* Gets or sets a boolean indicating that layou warnings should be ignored
*/
ignoreLayoutWarnings: boolean;
/** Gets or sets a boolean indicating if the stack panel is vertical or horizontal*/
get isVertical(): boolean;
set isVertical(value: boolean);
/**
* Gets or sets panel width.
* This value should not be set when in horizontal mode as it will be computed automatically
*/
set width(value: string | number);
get width(): string | number;
/**
* Gets or sets panel height.
* This value should not be set when in vertical mode as it will be computed automatically
*/
set height(value: string | number);
get height(): string | number;
/**
* Creates a new StackPanel
* @param name defines control name
*/
constructor(name?: string | undefined);
protected _getTypeName(): string;
/** @hidden */
protected _preMeasure(parentMeasure: Measure, context: CanvasRenderingContext2D): void;
protected _additionalProcessing(parentMeasure: Measure, context: CanvasRenderingContext2D): void;
protected _postMeasure(): void;
}
|
#!/bin/bash
#
# Copyright contributors to the ibm-storage-odf-operator project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
source hack/common.sh
source hack/ensure-opm.sh
echo "Creating an index image with the Operator bundle image injected..."
${OPM_BIN} -u docker -p docker index add --bundles "${BUNDLE_FULL_IMAGE_NAME}" --tag "${CATALOG_FULL_IMAGE_NAME}"
echo
echo "Pushing the index image to image registry..."
docker push "${CATALOG_FULL_IMAGE_NAME}"
|
#!/bin/bash
# default values
hostName="localhost"
userName="Administrator"
password="password"
bucketName="cloudpass"
bucketPort="11211"
cbDefaultBucketName="cloudpass"
sessionBucketName="appsession"
# verify default and exit on missing values
if [ $# -eq 0 ] || [ "$1" == "" ]; then
echo "No parameter passed, use default for all."
else
hostName = $1
if [ "$2" != "" ]; then
userName = $2
fi
if [ "$3" != "" ]; then
password = $3
fi
fi
# echo the final decision on values
echo "==========================="
echo "Using the following"
echo ""
echo " host = $hostName"
echo " user = $userName"
echo " password = $password"
echo " bucketName = $bucketName"
echo " bucketPort = $bucketPort"
# process
echo "==========================="
echo "Buckets on Host : $hostName"
echo ""
couchbase-cli bucket-list -c $hostName:8091
RESULT=$?
if [ $RESULT -ne 0 ]; then
echo "hostName ($hostName) is invalid."
exit 1
fi
echo "==========================="
echo "Delete bucket : $cbDefaultBucketName"
echo ""
couchbase-cli bucket-delete \
-c $hostName:8091 \
--user=$userName \
--password=$password \
--bucket=$cbDefaultBucketName
RESULT=$?
if [ $RESULT -ne 0 ]; then
echo "Delete bucket failed."
fi
echo "==========================="
echo "Delete bucket : $sessionBucketName"
echo ""
couchbase-cli bucket-delete \
-c $hostName:8091 \
--user=$userName \
--password=$password \
--bucket=$sessionBucketName \
RESULT=$?
if [ $RESULT -ne 0 ]; then
echo "Delete bucket failed."
fi
couchbase-cli bucket-list -c $hostName:8091
|
void insertionSort (int arr[], int n)
{
int i, key, j;
for (i = 1; i < n; i++)
{
key = arr[i];
j = i - 1;
while (j >= 0 && arr[j] > key)
{
arr[j + 1] = arr[j];
j = j - 1;
}
arr[j + 1] = key;
}
}
int main()
{
int arr[] = {5, 2, 1, 8, 3};
int n = sizeof(arr) / sizeof(arr[0]);
insertionSort(arr, n);
for (int i = 0; i < n; i++)
{
cout << arr[i] << " "
}
return 0;
} |
package ru.job4j.search;
import org.junit.Test;
import java.util.ArrayList;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.*;
/**
* PhoneDictionaryTest.
*
* @author <NAME> (<EMAIL>)
* @version $Id$
* @since 0.1
*/
public class PhoneDictionaryTest {
@Test
public void whenFindByName() {
var phones = new PhoneDictionary();
phones.add(
new Person("Petr", "Arsentev", "534872", "Bryansk")
);
var persons = phones.find("Petr");
assertThat(persons.iterator().next().getSurname(), is("Arsentev"));
}
@Test
public void whenFindBySequenceThen2Occurrences() {
var phones = new PhoneDictionary();
phones.add(
new Person("Petr", "Arsentev", "534872", "Bryansk")
);
phones.add(
new Person("Ivan", "Andreev", "3222233", "Omsk")
);
phones.add(
new Person("Sergey", "Ivanov", "715471", "Moscow")
);
var persons = phones.find("sk");
var founded = new ArrayList<String>();
for (var it = persons.iterator(); it.hasNext();) {
founded.add(it.next().getSurname());
}
var result = new String[founded.size()];
result = founded.toArray(result);
assertArrayEquals(new String[] {"Arsentev", "Andreev"}, result);
}
} |
<reponame>julianhyde/clapham
package net.hydromatic.clapham.parser;
import net.hydromatic.clapham.graph.Grammar;
import net.hydromatic.clapham.graph.Graph;
import net.hydromatic.clapham.graph.Node;
import net.hydromatic.clapham.graph.NodeType;
import net.hydromatic.clapham.graph.Symbol;
/**
* TODO:
*
* @author <NAME>
* @version $Id$
* @since Mar 5, 2010
*/
public class PredicateNode extends BaseEbnfNode {
public final String s;
public final EbnfNode node;
public PredicateNode(String s, EbnfNode node) {
this.s = s;
this.node = node;
}
public Graph toGraph(Grammar grammar) {
final Graph g2 = node.toGraph(grammar);
Symbol symbol = new Symbol(NodeType.PREDICATE, s);
// grammar.symbolMap.put(symbol.name, symbol);
Graph g1 = new Graph(new Node(grammar, symbol));
grammar.makePredicate(g1, g2);
return g1;
}
public void toString(StringBuilder buf) {
StringBuilder buf1 = new StringBuilder();;
node.toString(buf1 );
buf.append("PredicateNode(").append(s).append(", ").append(buf1).append(")");
}
public String toEbnf(EbnfDecorator decorator) {
return s + " " + node.toEbnf(decorator);
}
} |
from copy import deepcopy
from typing import Any
def clone_object(obj: Any) -> Any:
"""
Create a deep copy of the given object to prevent alteration of its defaults.
Args:
obj: Any Python object (e.g., list, dictionary, class instance)
Returns:
A deep copy of the input object
"""
return deepcopy(obj) |
// Copyright 2017-2021, University of Colorado Boulder
/**
* Bounds2 tests
*
* @author <NAME> (PhET Interactive Simulations)
* @author <NAME> (PhET Interactive Simulations)
*/
import Bounds2 from './Bounds2.js';
import Matrix3 from './Matrix3.js';
import Rectangle from './Rectangle.js';
import Vector2 from './Vector2.js';
QUnit.module( 'Bounds2' );
const epsilon = 0.00000001;
function approximateBoundsEquals( assert, a, b, msg ) {
assert.ok( Math.abs( a.minX - b.minX ) < epsilon, `${msg} minX: expected: ${b.minX}, result: ${a.minX}` );
assert.ok( Math.abs( a.minY - b.minY ) < epsilon, `${msg} minY: expected: ${b.minY}, result: ${a.minY}` );
assert.ok( Math.abs( a.maxX - b.maxX ) < epsilon, `${msg} maxX: expected: ${b.maxX}, result: ${a.maxX}` );
assert.ok( Math.abs( a.maxY - b.maxY ) < epsilon, `${msg} maxY: expected: ${b.maxY}, result: ${a.maxY}` );
}
QUnit.test( 'Rectangle', assert => {
assert.ok( new Bounds2( -2, -4, 2, 4 ).equals( new Rectangle( -2, -4, 4, 8 ) ), 'Bounds2-Rectangle equivalence' );
} );
QUnit.test( 'Basic', assert => {
const bounds = new Bounds2( 1, 2, 3, 4 );
assert.ok( bounds.minX === 1, 'minX' );
assert.ok( bounds.minY === 2, 'minY' );
assert.ok( bounds.maxX === 3, 'maxX' );
assert.ok( bounds.maxY === 4, 'maxY' );
assert.ok( bounds.width === 2, 'width' );
assert.ok( bounds.height === 2, 'height' );
assert.ok( bounds.x === 1, 'x' );
assert.ok( bounds.y === 2, 'y' );
assert.ok( bounds.centerX === 2, 'centerX' );
assert.ok( bounds.centerY === 3, 'centerY' );
} );
QUnit.test( 'Coordinates', assert => {
const bounds = new Bounds2( 1, 2, 3, 4 );
assert.ok( !bounds.isEmpty(), 'isEmpty' );
assert.ok( !bounds.containsCoordinates( 0, 0 ), 'coordinates #1' );
assert.ok( !bounds.containsCoordinates( 2, 0 ), 'coordinates #2' );
assert.ok( bounds.containsCoordinates( 2, 2 ), 'coordinates #3 (on boundary)' );
assert.ok( !bounds.containsCoordinates( 4, 2 ), 'coordinates #4' );
assert.ok( !Bounds2.NOTHING.containsBounds( bounds ), 'nothing.contains' );
assert.ok( Bounds2.EVERYTHING.containsBounds( bounds ), 'everything.contains' );
assert.ok( bounds.equals( bounds ), 'reflexive' );
assert.ok( !bounds.equals( Bounds2.NOTHING ), 'reflexive' );
assert.ok( !Bounds2.NOTHING.equals( bounds ), 'reflexive' );
assert.ok( bounds.intersectsBounds( new Bounds2( 2, 3, 4, 5 ) ), 'intersect #1' );
assert.ok( bounds.intersectsBounds( new Bounds2( 3, 4, 5, 6 ) ), 'intersect #2 (boundary point)' );
assert.ok( !bounds.intersectsBounds( new Bounds2( 4, 5, 6, 7 ) ), 'intersect #3' );
assert.ok( Bounds2.NOTHING.isEmpty(), 'Bounds2.NOTHING.isEmpty()' );
assert.ok( !Bounds2.EVERYTHING.isEmpty(), '!Bounds2.EVERYTHING.isEmpty()' );
} );
function A() { return new Bounds2( 0, 0, 2, 3 ); }
function B() { return new Bounds2( 1, 1, 5, 4 ); }
function C() { return new Bounds2( 1.5, 1.2, 5.7, 4.8 ); }
QUnit.test( 'Mutable / immutable versions', assert => {
approximateBoundsEquals( assert, A().union( B() ), A().includeBounds( B() ), 'union / includeBounds' );
approximateBoundsEquals( assert, A().intersection( B() ), A().constrainBounds( B() ), 'intersection / constrainBounds' );
approximateBoundsEquals( assert, A().withCoordinates( 10, 12 ), A().addCoordinates( 10, 12 ), 'withCoordinates / addCoordinates' );
approximateBoundsEquals( assert, A().withPoint( new Vector2( 10, 12 ) ), A().addPoint( new Vector2( 10, 12 ) ), 'withPoint / addPoint' );
approximateBoundsEquals( assert, A().withMinX( 1.5 ), A().setMinX( 1.5 ), 'withMinX / setMinX' );
approximateBoundsEquals( assert, A().withMinY( 1.5 ), A().setMinY( 1.5 ), 'withMinY / setMinY' );
approximateBoundsEquals( assert, A().withMaxX( 1.5 ), A().setMaxX( 1.5 ), 'withMaxX / setMaxX' );
approximateBoundsEquals( assert, A().withMaxY( 1.5 ), A().setMaxY( 1.5 ), 'withMaxY / setMaxY' );
approximateBoundsEquals( assert, C().roundedOut(), C().roundOut(), 'roundedOut / roundOut' );
approximateBoundsEquals( assert, C().roundedIn(), C().roundIn(), 'roundedIn / roundIn' );
const matrix = Matrix3.rotation2( Math.PI / 4 ).timesMatrix( Matrix3.translation( 11, -13 ) ).timesMatrix( Matrix3.scale( 2, 3.5 ) );
approximateBoundsEquals( assert, A().transformed( matrix ), A().transform( matrix ), 'transformed / transform' );
approximateBoundsEquals( assert, A().dilated( 1.5 ), A().dilate( 1.5 ), 'dilated / dilate' );
approximateBoundsEquals( assert, A().eroded( 1.5 ), A().erode( 1.5 ), 'eroded / erode' );
approximateBoundsEquals( assert, A().shiftedX( 1.5 ), A().shiftX( 1.5 ), 'shiftedX / shiftX' );
approximateBoundsEquals( assert, A().shiftedY( 1.5 ), A().shiftY( 1.5 ), 'shiftedY / shiftY' );
approximateBoundsEquals( assert, A().shiftedXY( 1.5, 2 ), A().shiftXY( 1.5, 2 ), 'shifted / shift' );
} );
QUnit.test( 'Bounds transforms', assert => {
approximateBoundsEquals( assert, A().transformed( Matrix3.translation( 10, 20 ) ), new Bounds2( 10, 20, 12, 23 ), 'translation' );
approximateBoundsEquals( assert, A().transformed( Matrix3.rotation2( Math.PI / 2 ) ), new Bounds2( -3, 0, 0, 2 ), 'rotation' );
approximateBoundsEquals( assert, A().transformed( Matrix3.scale( 3, 2 ) ), new Bounds2( 0, 0, 6, 6 ), 'scale' );
} );
QUnit.test( 'Equality', assert => {
assert.ok( new Bounds2( 0, 1, 2, 3 ).equals( new Bounds2( 0, 1, 2, 3 ) ), 'Without epsilon: regular - reflexive' );
assert.ok( new Bounds2( 0, 1, 2, 3 ).equalsEpsilon( new Bounds2( 0, 1, 2, 3 ), epsilon ), 'With epsilon: regular - reflexive' );
assert.ok( !new Bounds2( 0, 1, 2, 3 ).equals( new Bounds2( 0, 1, 2, 5 ) ), 'Without epsilon: regular - different' );
assert.ok( !new Bounds2( 0, 1, 2, 3 ).equalsEpsilon( new Bounds2( 0, 1, 2, 5 ), epsilon ), 'With epsilon: regular - different' );
assert.ok( Bounds2.NOTHING.equals( Bounds2.NOTHING ), 'Without epsilon: Nothing - reflexive' );
assert.ok( Bounds2.NOTHING.equalsEpsilon( Bounds2.NOTHING, epsilon ), 'With epsilon: Nothing - reflexive' );
assert.ok( Bounds2.NOTHING.equals( Bounds2.NOTHING.copy() ), 'Without epsilon: Nothing - copy - reflexive' );
assert.ok( Bounds2.NOTHING.equalsEpsilon( Bounds2.NOTHING.copy(), epsilon ), 'With epsilon: Nothing - copy - reflexive' );
assert.ok( Bounds2.EVERYTHING.equals( Bounds2.EVERYTHING ), 'Without epsilon: Everything - reflexive' );
assert.ok( Bounds2.EVERYTHING.equalsEpsilon( Bounds2.EVERYTHING, epsilon ), 'With epsilon: Everything - reflexive' );
assert.ok( Bounds2.EVERYTHING.equals( Bounds2.EVERYTHING.copy() ), 'Without epsilon: Everything - copy - reflexive' );
assert.ok( Bounds2.EVERYTHING.equalsEpsilon( Bounds2.EVERYTHING.copy(), epsilon ), 'With epsilon: Everything - copy - reflexive' );
assert.ok( !Bounds2.NOTHING.equals( Bounds2.EVERYTHING ), 'Without epsilon: Nothing !== Everything' );
assert.ok( !Bounds2.NOTHING.equalsEpsilon( Bounds2.EVERYTHING, epsilon ), 'With epsilon: Nothing !== Everything' );
assert.ok( new Bounds2( 0, 0, 5, Number.POSITIVE_INFINITY ).equals( new Bounds2( 0, 0, 5, Number.POSITIVE_INFINITY ) ), 'Without epsilon: Mixed finite-ness - reflexive' );
assert.ok( new Bounds2( 0, 0, 5, Number.POSITIVE_INFINITY ).equalsEpsilon( new Bounds2( 0, 0, 5, Number.POSITIVE_INFINITY ), epsilon ), 'With epsilon: Mixed finite-ness - reflexive' );
assert.ok( !new Bounds2( 0, 0, 5, Number.POSITIVE_INFINITY ).equals( new Bounds2( 0, 0, 5, Number.NEGATIVE_INFINITY ) ), 'Without epsilon: Mixed finite-ness - swapped infinity' );
assert.ok( !new Bounds2( 0, 0, 5, Number.POSITIVE_INFINITY ).equalsEpsilon( new Bounds2( 0, 0, 5, Number.NEGATIVE_INFINITY ), epsilon ), 'With epsilon: Mixed finite-ness - swapped infinity' );
assert.ok( !new Bounds2( 0, 0, 5, Number.POSITIVE_INFINITY ).equals( new Bounds2( 0, 0, 6, Number.POSITIVE_INFINITY ) ), 'Without epsilon: Mixed finite-ness - different finite number' );
assert.ok( !new Bounds2( 0, 0, 5, Number.POSITIVE_INFINITY ).equalsEpsilon( new Bounds2( 0, 0, 6, Number.POSITIVE_INFINITY ), epsilon ), 'With epsilon: Mixed finite-ness - different finite number' );
} ); |
def power(x, n):
result = 1;
# Multiply the number n times
for _ in range(n):
result *= x;
return result;
x = 5
n = 3
power = power(x, n);
print("The value of {} raised to the power {} is {}.".format(x, n, power)) |
<reponame>proletarius101/git-config-user-profiles
import * as sgit from "simple-git/promise";
import { workspace, window } from "vscode";
import { getProfile } from "./../config";
import * as gitconfig from "gitconfiglocal";
import { Profile } from "../models";
import { Messages } from "../constants";
import { Logger } from "../util";
export async function isGitRepository(path: string): Promise<boolean> {
try {
return await sgit(path).checkIsRepo();
} catch (error) {
return false;
}
}
export async function isValidWorkspace(): Promise<{ isValid: boolean; message: string; folder?: string }> {
if (workspace.workspaceFolders) {
let foldersCount = workspace.workspaceFolders.length;
if (foldersCount > 1) {
return {
message: Messages.DOES_NOT_SUPPORT_MULTI_ROOT,
isValid: false
};
}
if (foldersCount === 0) {
return {
message: Messages.OPEN_REPO_FIRST,
isValid: false
};
}
if (foldersCount === 1) {
let folderPath = workspace.workspaceFolders[0].uri.fsPath;
let validGitRepo = await isGitRepository(folderPath);
if (!validGitRepo) {
return {
message: Messages.NOT_A_VALID_REPO,
isValid: false
};
}
return {
message: "",
isValid: true,
folder: folderPath
};
}
}
return {
message: Messages.NOT_A_VALID_REPO,
isValid: false
};
}
export function isEmpty(str: string | undefined | null) {
return !str || 0 === str.length;
}
export async function getCurrentConfig(gitFolder: string): Promise<{ userName: string; email: string, signingKey: string }> {
Logger.instance.logInfo(`Getting details from config file in ${gitFolder}`);
return await new Promise((resolve, reject) => {
gitconfig(gitFolder, (error, config) => {
if (config.user && config.user.name && config.user.email && config.user.signingkey) {
let currentConfig = {
userName: config.user.name,
email: config.user.email,
signingKey: config.user.signingkey
};
Logger.instance.logInfo(`Config details found: ${JSON.stringify(currentConfig)}`);
resolve(currentConfig);
} else {
Logger.instance.logInfo(`No config details found.`);
resolve({ userName: "", email: "", signingKey: ""});
}
});
});
}
export function trimLabelIcons(str: string) {
if (str) {
return str.replace("$(check)", "").trim();
} else {
return str;
}
}
export function isBlank(str: string) {
return !str || /^\s*$/.test(str);
}
export function validateProfileName(input: string, checkForDuplicates: boolean = true) {
if (isEmpty(input) || isBlank(input)) {
return Messages.ENTER_A_VALID_STRING;
}
if (checkForDuplicates) {
let existingProfile = getProfile(input);
if (existingProfile) {
return `Oops! Profile with the same name '${input}' already exists!`;
}
}
return undefined;
}
export function validateUserName(input: string) {
if (isEmpty(input) || isBlank(input)) {
return Messages.ENTER_A_VALID_STRING;
}
return undefined;
}
export function validateEmail(input: string) {
let validEmail = /^[^\s@]+@[^\s@]+\.[^\s@]+$/;
if (!validEmail.test(input)) {
return Messages.NOT_A_VALID_EMAIL;
}
return undefined;
}
export function validateSigningKey(input: string) {
let validSigningKey = /(^[^\s@]+@[^\s@]+\.[^\s@]+$)|[A-Z0-9]{16}|[A-Z0-9]{8}/;
if (input !== "" && !validSigningKey.test(input)) {
return Messages.NOT_A_VALID_SIGNING_KEY;
}
return undefined;
}
export function trimProperties(profile: Profile): Profile {
return <Profile>{
label: profile.label.replace("$(check)", "").trim(),
email: profile.email.trim(),
userName: profile.userName.trim(),
signingKey: profile.signingKey.trim(),
selected: profile.selected,
detail: undefined
};
}
|
<reponame>pulsar-chem/BPModule
import pulsar as psr
def load_ref_system():
""" Returns beta-l-lyxopyranose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 1.4299 0.2461 0.7896
O 0.4230 1.0739 1.3901
C -0.9416 0.6206 1.4010
C -1.3780 0.0314 0.0512
C -0.4016 -1.0958 -0.3576
O -0.7829 -1.6098 -1.6359
C 1.0038 -0.4852 -0.5172
O 0.9231 0.4645 -1.5809
O -2.6663 -0.5389 0.3219
O 2.4097 1.2044 0.4043
H -1.0615 -0.1113 2.2208
H -1.4941 1.5476 1.6502
H 1.8808 -0.4028 1.5639
H 1.7537 -1.2585 -0.8006
H -0.3995 -1.9182 0.3913
H -1.4467 0.7936 -0.7574
H 2.0470 2.1205 0.5463
H 1.7591 1.0051 -1.5910
H -1.3277 -2.4163 -1.5204
H -3.1571 -0.6576 -0.5199
""")
|
const express = require('express')
const path = require('path')
const port = 3000
const app = express()
// serve static js file from dist dir
app.use(express.static('dist'))
// if not a static file from dist, serve default index.html file for any request
app.get('*', (req, res) => {
res.sendFile(path.resolve(__dirname, 'index.html'), err => {
if (err) {
console.log(err)
}
})
})
app.listen(port, () => {
console.log(`Listening on port ${port}`)
})
|
import requests
# Get the GitHub user data from the repo
url = 'https://api.github.com/repos/user/example-repo/contents/data.json'
r = requests.get(url)
data = r.json()
# Print data to console
for d in data:
print(d) |
<gh_stars>0
declare const _default: (req: any, res: any) => Promise<void>;
/**
* @oas [delete] /regions/{id}/countries/{country_code}
* operationId: "PostRegionsRegionCountriesCountry"
* summary: "Remove Country"
* x-authenticated: true
* description: "Removes a Country from the list of Countries in a Region"
* parameters:
* - (path) id=* {string} The id of the Region.
* - (path) country_code=* {string} The 2 character ISO code for the Country.
* tags:
* - Region
* responses:
* 200:
* description: OK
* content:
* application/json:
* schema:
* properties:
* region:
* $ref: "#/components/schemas/region"
*/
export default _default;
|
package os.failsafe.executor;
import org.junit.jupiter.api.Test;
import java.time.LocalDateTime;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
class TaskShould {
@Test
void return_false_on_cancel_if_task_is_not_cancelable_because_it_is_locked() {
Task task = new Task("id", "name", "parameter", LocalDateTime.now(), LocalDateTime.now(), LocalDateTime.now(), null, 0, 0L);
assertFalse(task.isCancelable());
}
@Test
void return_false_on_retry_if_task_is_not_retryable() {
Task task = new Task("id", "name", "parameter", LocalDateTime.now(), LocalDateTime.now(), null, null, 0, 0L);
assertFalse(task.isRetryable());
}
@Test
void print_its_internal_state() {
LocalDateTime dateTime = LocalDateTime.of(2020, 5, 5, 10, 30);
Task task = new Task("id", "name", "parameter", dateTime, dateTime, dateTime, new ExecutionFailure(dateTime, "exceptionMsg", "stackTrace"), 0, 0L);
assertEquals("Task{id='id', parameter='parameter', name='name', creationTime=2020-05-05T10:30, plannedExecutionTime=2020-05-05T10:30, lockTime=2020-05-05T10:30, executionFailure=ExecutionFailure{failTime=2020-05-05T10:30, exceptionMessage='exceptionMsg', stackTrace='stackTrace'}, version=0}",
task.toString());
}
}
|
def print_items(items):
""" Print each item in the list. """
for item in items:
if type(item) == int or type(item) == str:
print(item)
else:
raise TypeError("Input cannot be of type {type(item)}") |
#!/bin/bash
# set the environment to be fully automated
export DEBIAN_FRONTEND="noninteractive"
# update system
apt-get update
apt-get upgrade -y
apt-get install -y wget curl unzip unzip wget daemon python-setuptools \
software-properties-common git-core ca-certificates
# Install OpenJDK 8
# Sets language to UTF8 : this works in pretty much all cases
locale-gen en_US.UTF-8
# add repo, update, install
add-apt-repository -y ppa:openjdk-r/ppa 2>&1
apt-get update
apt-get install -y openjdk-8-jre-headless
export JAVA_HOME="/usr/lib/jvm/java-8-openjdk-amd64/"
# Install Jenkins
# @see https://wiki.jenkins-ci.org/display/JENKINS/Installing+Jenkins+on+Ubuntu
wget -q -O - http://pkg.jenkins-ci.org/debian-stable/jenkins-ci.org.key | \
apt-key add -
echo "deb http://pkg.jenkins-ci.org/debian-stable binary/" | \
sudo tee /etc/apt/sources.list.d/jenkins.list
add-apt-repository -y ppa:openjdk-r/ppa 2>&1
apt-get update
apt-get install -y jenkins
# hackish way to install a specific version of Jenkins
# dpkg --install /vagrant/jenkins_1.642_all.deb
# service jenkins restart
# Install Jenkins plugins
# @see http://updates.jenkins-ci.org/download/plugins/
# @see /var/lib/jenkins/plugins/
# @see https://github.com/jenkinsci/workflow-aggregator-plugin/blob/master/demo/plugins.txt
# install the Jenkins plugins
echo "INFO: Installing Jenkins plugins..."
mkdir -p /var/lib/jenkins/plugins/
chmod -R 0777 /var/lib/jenkins/plugins
/vagrant/jenkins_install_plugins.sh /vagrant/jenkins_plugins.txt
# clear the logs, set folder permissions, restart
chmod -R 0777 /var/lib/jenkins/plugins
rm -f /var/log/jenkins/jenkins.log
echo "INFO: Done installing Jenkins plugins."
|
#! /bin/bash
# This script updates the the code repos on Raspbian for Robots.
################################################
######## Parsing Command Line Arguments ########
################################################
# definitions needed for standalone call
PIHOME=/home/pi
DEXTER=Dexter
DEXTER_PATH=$PIHOME/$DEXTER
RASPBIAN=$PIHOME/di_update/Raspbian_For_Robots
GROVEPI_DIR=$DEXTER_PATH/GrovePi
DEXTERSCRIPT=$DEXTER_PATH/lib/Dexter/script_tools
# the top-level module name of grovepi package
# used for detecting whether it's installed or not
REPO_PACKAGE=grovepi
# called way down bellow
check_if_run_with_pi() {
## if not running with the pi user then exit
if [ $(id -ur) -ne $(id -ur pi) ]; then
echo "GrovePi installer script must be run with \"pi\" user. Exiting."
exit 6
fi
}
# called way down below
parse_cmdline_arguments() {
# whether to install the dependencies or not (avrdude, apt-get, wiringpi, and so on)
installdependencies=true
updaterepo=true
install_rfrtools=true
install_pkg_rfrtools=true
install_rfrtools_gui=true
# the following 3 options are mutually exclusive
systemwide=true
userlocal=false
envlocal=false
usepython3exec=true
# the following option tells which branch has to be used
selectedbranch="master"
declare -ga rfrtools_options=("--system-wide")
# iterate through bash arguments
for i; do
case "$i" in
--no-dependencies)
installdependencies=false
;;
--no-update-aptget)
updaterepo=false
;;
--bypass-rfrtools)
install_rfrtools=false
;;
--bypass-python-rfrtools)
install_pkg_rfrtools=false
;;
--bypass-gui-installation)
install_rfrtools_gui=false
;;
--user-local)
userlocal=true
systemwide=false
declare -ga rfrtools_options=("--user-local")
;;
--env-local)
envlocal=true
systemwide=false
declare -ga rfrtools_options=("--env-local")
;;
--system-wide)
;;
develop|feature/*|hotfix/*|fix/*|DexterOS*|v*)
selectedbranch="$i"
;;
esac
done
# show some feedback on the console
if [ -f $DEXTERSCRIPT/functions_library.sh ]; then
source $DEXTERSCRIPT/functions_library.sh
# show some feedback for the GrovePi
if [[ quiet_mode -eq 0 ]]; then
echo " _____ _ ";
echo " | __ \ | | ";
echo " | | | | _____ _| |_ ___ _ __ ";
echo " | | | |/ _ \ \/ / __/ _ \ '__| ";
echo " | |__| | __/> <| || __/ | ";
echo " |_____/ \___/_/\_\\\__\___|_| _ ";
echo " |_ _| | | | | (_) ";
echo " | | _ __ __| |_ _ ___| |_ _ __ _ ___ ___ ";
echo " | | | '_ \ / _\ | | | / __| __| '__| |/ _ \/ __| ";
echo " _| |_| | | | (_| | |_| \__ \ |_| | | | __/\__ \ ";
echo " |_____|_| |_|\__,_|\__,_|___/\__|_| |_|\___||___/ ";
echo " ";
echo " ";
echo " _____ _____ _ "
echo " / ____| | __ (_) "
echo "| | __ _ __ _____ _____| |__) | "
echo "| | |_ | '__/ _ \ \ / / _ \ ___/ | "
echo "| |__| | | | (_) \ V / __/ | | | "
echo " \_____|_| \___/ \_/ \___|_| |_| "
echo " "
fi
feedback "Welcome to GrovePi Installer."
else
echo "Welcome to GrovePi Installer."
fi
echo "Updating GrovePi for $selectedbranch branch with the following options:"
([[ $installdependencies = "true" ]] && echo " --no-dependencies=false") || echo " --no-dependencies=true"
([[ $updaterepo = "true" ]] && echo " --no-update-aptget=false") || echo " --no-update-aptget=true"
([[ $install_rfrtools = "true" ]] && echo " --bypass-rfrtools=false") || echo " --bypass-rfrtools=true"
([[ $install_pkg_rfrtools = "true" ]] && echo " --bypass-python-rfrtools=false") || echo " --bypass-python-rfrtools=true"
([[ $install_rfrtools_gui = "true" ]] && echo " --bypass-gui-installation=false") || echo " --bypass-gui-installation=true"
echo " --user-local=$userlocal"
echo " --env-local=$envlocal"
echo " --system-wide=$systemwide"
# create rest of list of arguments for rfrtools call
rfrtools_options+=("$selectedbranch")
[[ $usepython3exec = "true" ]] && rfrtools_options+=("--use-python3-exe-too")
[[ $updaterepo = "true" ]] && rfrtools_options+=("--update-aptget")
[[ $installdependencies = "true" ]] && rfrtools_options+=("--install-deb-deps")
[[ $install_pkg_rfrtools = "true" ]] && rfrtools_options+=("--install-python-package")
[[ $install_rfrtools_gui = "true" ]] && rfrtools_options+=("--install-gui")
echo "Using \"$selectedbranch\" branch"
echo "Options used for RFR_Tools script: \"${rfrtools_options[@]}\""
}
################################################
######## Cloning GrovePi & RFR_Tools ##########
################################################
# called in <<install_rfrtools_repo>>
check_dependencies() {
command -v git >/dev/null 2>&1 || { echo "This script requires \"git\" but it's not installed. Error occurred with RFR_Tools installation." >&2; exit 1; }
command -v python >/dev/null 2>&1 || { echo "Executable \"python\" couldn't be found. Error occurred with RFR_Tools installation." >&2; exit 2; }
command -v pip >/dev/null 2>&1 || { echo "Executable \"pip\" couldn't be found. Error occurred with RFR_Tools installation." >&2; exit 3; }
if [[ $usepython3exec = "true" ]]; then
command -v python3 >/dev/null 2>&1 || { echo "Executable \"python3\" couldn't be found. Error occurred with RFR_Tools installation." >&2; exit 4; }
command -v pip3 >/dev/null 2>&1 || { echo "Executable \"pip3\" couldn't be found. Error occurred with RFR_Tools installation." >&2; exit 5; }
fi
if [[ ! -f $DEXTERSCRIPT/functions_library.sh ]]; then
echo "script_tools didn\'t get installed. Enable the installation of dependencies with RFR_Tools.'"
exit 8
fi
}
# called way down below
install_rfrtools_repo() {
# if rfrtools is not bypassed then install it
if [[ $install_rfrtools = "true" ]]; then
curl --silent -kL https://raw.githubusercontent.com/DexterInd/RFR_Tools/$selectedbranch/scripts/install_tools.sh > $PIHOME/.tmp_rfrtools.sh
echo "Installing RFR_Tools. This might take a while.."
bash $PIHOME/.tmp_rfrtools.sh ${rfrtools_options[@]} # > /dev/null
ret_val=$?
rm $PIHOME/.tmp_rfrtools.sh
if [[ $ret_val -ne 0 ]]; then
echo "RFR_Tools failed installing with exit code $ret_val. Exiting."
exit 7
fi
echo "Done installing RFR_Tool"
fi
# check if all deb packages have been installed with RFR_Tools
check_dependencies
source $DEXTERSCRIPT/functions_library.sh
}
# called way down bellow
clone_grovepi() {
# $DEXTER_PATH is still only available for the pi user
# shortly after this, we'll make it work for any user
sudo mkdir -p $DEXTER_PATH
sudo chown pi:pi -R $DEXTER_PATH
cd $DEXTER_PATH
# it's simpler and more reliable (for now) to just delete the repo and clone a new one
# otherwise, we'd have to deal with all the intricacies of git
sudo rm -rf $GROVEPI_DIR
git clone --quiet --depth=1 -b $selectedbranch https://github.com/poipoi/GrovePi.git
cd $GROVEPI_DIR
}
################################################
######## Install Python Packages & Deps ########
################################################
# called by <<install_python_pkgs_and_dependencies>>
install_python_packages() {
[[ $systemwide = "true" ]] && sudo python setup.py install \
&& [[ $usepython3exec = "true" ]] && sudo python3 setup.py install
[[ $userlocal = "true" ]] && python setup.py install --user \
&& [[ $usepython3exec = "true" ]] && python3 setup.py install --user
[[ $envlocal = "true" ]] && python setup.py install \
&& [[ $usepython3exec = "true" ]] && python3 setup.py install
}
# called by <<install_python_pkgs_and_dependencies>>
remove_python_packages() {
# the 1st and only argument
# takes the name of the package that needs to removed
rm -f $PIHOME/.pypaths
# get absolute path to python package
# saves output to file because we want to have the syntax highlight working
# does this for both root and the current user because packages can be either system-wide or local
# later on the strings used with the python command can be put in just one string that gets used repeatedly
python -c "import pkgutil; import os; \
eggs_loader = pkgutil.find_loader('$1'); found = eggs_loader is not None; \
output = os.path.dirname(os.path.realpath(eggs_loader.get_filename('$1'))) if found else ''; print(output);" >> $PIHOME/.pypaths
sudo python -c "import pkgutil; import os; \
eggs_loader = pkgutil.find_loader('$1'); found = eggs_loader is not None; \
output = os.path.dirname(os.path.realpath(eggs_loader.get_filename('$1'))) if found else ''; print(output);" >> $PIHOME/.pypaths
if [[ $usepython3exec = "true" ]]; then
python3 -c "import pkgutil; import os; \
eggs_loader = pkgutil.find_loader('$1'); found = eggs_loader is not None; \
output = os.path.dirname(os.path.realpath(eggs_loader.get_filename('$1'))) if found else ''; print(output);" >> $PIHOME/.pypaths
sudo python3 -c "import pkgutil; import os; \
eggs_loader = pkgutil.find_loader('$1'); found = eggs_loader is not None; \
output = os.path.dirname(os.path.realpath(eggs_loader.get_filename('$1'))) if found else ''; print(output);" >> $PIHOME/.pypaths
fi
# removing eggs for $1 python package
# ideally, easy-install.pth needs to be adjusted too
# but pip seems to know how to handle missing packages, which is okay
while read path;
do
if [ ! -z "${path}" -a "${path}" != " " ]; then
echo "Removing ${path} egg"
sudo rm -f "${path}"
fi
done < $PIHOME/.pypaths
}
# called by <<install_python_pkgs_and_dependencies>>
install_deb_dependencies() {
feedback "Installing dependencies for the GrovePi"
# in order for nodejs to be installed, the repo for it
# needs to be in; this is all done in script_tools while doing an apt-get update
sudo apt-get install --no-install-recommends -y nodejs\
git libi2c-dev i2c-tools \
python-setuptools python-pip python-smbus python-dev python-serial python-rpi.gpio python-numpy python-scipy \
python3-setuptools python3-pip python3-smbus python3-dev python3-serial python3-rpi.gpio python3-numpy python3-scipy
feedback "Dependencies for the GrovePi installed"
}
# called way down bellow
install_python_pkgs_and_dependencies() {
# installing dependencies if required
if [[ $installdependencies = "true" ]]; then
feedback "Installing GrovePi dependencies. This might take a while.."
install_deb_dependencies
pushd $GROVEPI_DIR/Script > /dev/null
sudo bash ./install.sh
popd > /dev/null
fi
# feedback "Removing \"$REPO_PACKAGE\" and \"$DHT_PACKAGE\" to make space for new ones"
feedback "Removing \"$REPO_PACKAGE\" to make space for the new one"
remove_python_packages "$REPO_PACKAGE"
# remove_python_packages "$DHT_PACKAGE"
# installing the package itself
pushd $GROVEPI_DIR/Software/Python > /dev/null
install_python_packages
popd > /dev/null
}
################################################
######## Aggregating all function calls ########
################################################
check_if_run_with_pi
parse_cmdline_arguments "$@"
install_rfrtools_repo
clone_grovepi
install_python_pkgs_and_dependencies
exit 0
|
#!/usr/bin/env zsh
#
# Script for bootstraping your shell environment.
#
# Author:
# Larry Gordon
#
# License:
# The MIT License (MIT) <http://psyrendust.mit-license.org/2014/license.html>
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# GET THE LOCATION OF THIS SCRIPT RELATIVE TO THE CWD
# ------------------------------------------------------------------------------
alf_migrate="$0"
# While the filename in $alf_migrate is a symlink
while [ -L "$alf_migrate" ]; do
# similar to above, but -P forces a change to the physical not symbolic directory
alf_migrate_cwd="$( cd -P "$( dirname "$alf_migrate" )" && pwd )"
# Get the value of symbolic link
# If $alf_migrate is relative (doesn't begin with /), resolve relative
# path where symlink lives
alf_migrate="$(readlink -f "$alf_migrate")" && alf_migrate="$alf_migrate_cwd/$alf_migrate"
done
alf_migrate_cwd="$( cd -P "$( dirname "$alf_migrate" )" && pwd )"
alf_migrate_root="${alf_migrate_cwd%/*}"
# ------------------------------------------------------------------------------
# Helper functions
# ------------------------------------------------------------------------------
# Check if a formula is installed in homebrew
_brew-is-installed() {
echo $(brew list 2>/dev/null | grep "^${1}$")
}
# Check if a cask application is installed
_cask-is-installed() {
echo $(brew cask list 2>/dev/null | grep "^${1}$")
}
# Check if a formula is tapped in homebrew
_brew-is-tapped() {
echo $(brew tap 2>/dev/null | grep "^${1}$")
}
# Print pretty colors to stdout in Cyan.
ppinfo() {
while getopts ":i" opt; do
[[ $opt == "i" ]] && has_option=1
done
if [[ -n $has_option ]]; then
shift && printf '\033[0;36m%s\033[0m' "$@"
else
printf '\033[0;36m%s\033[0m\n' "$@"
fi
}
# Print pretty colors to stdout in Green.
ppsuccess() {
while getopts ":i" opt; do
[[ $opt == "i" ]] && has_option=1
done
if [[ -n $has_option ]]; then
shift && printf '\033[0;32m%s\033[0m' "$@"
else
printf '\033[0;32m%s\033[0m\n' "$@"
fi
}
# Print pretty colors to stdout in Brown.
ppwarning() {
while getopts ":i" opt; do
[[ $opt == "i" ]] && has_option=1
done
if [[ -n $has_option ]]; then
shift && printf '\033[0;33m%s\033[0m' "$@"
else
printf '\033[0;33m%s\033[0m\n' "$@"
fi
}
# Print pretty colors to stdout in Red.
ppdanger() {
while getopts ":i" opt; do
[[ $opt == "i" ]] && has_option=1
done
if [[ -n $has_option ]]; then
shift && printf '\033[0;31m%s\033[0m' "$@"
else
printf '\033[0;31m%s\033[0m\n' "$@"
fi
}
# # ------------------------------------------------------------------------------
# # SETUP DEFAULT ZSH FILES, PATHS, AND VARS
# # ------------------------------------------------------------------------------
# cp -aR "$alf_migrate_root/templates/home/." "$HOME/"
# # Source .zshenv to get global paths and vars
# source $HOME/.zshenv 2>/dev/null
# Make a backup folder if it doesn't exist
export ALF_BACKUP_FOLDER="$.alf/backup/$(date '+%Y%m%d')"
[[ -d "$ALF_BACKUP_FOLDER" ]] || mkdir -p -m 775 "$ALF_BACKUP_FOLDER"
# # ------------------------------------------------------------------------------
# # vars for boostrap app installs
# # ------------------------------------------------------------------------------
# export alf_fn_config_bootstrap_apps_install_free="$ALF_SRC_TOOLS/bootstrap/apps/install-free.zsh"
# export alf_fn_config_bootstrap_apps_install_paid="$ALF_SRC_TOOLS/bootstrap/apps/install-paid.zsh"
# export alf_fn_config_bootstrap_apps_install_required_free="$ALF_SRC_TOOLS/bootstrap/apps/install-required-free.zsh"
# export alf_fn_config_bootstrap_apps_install_required_paid="$ALF_SRC_TOOLS/bootstrap/apps/install-required-paid.zsh"
# # ------------------------------------------------------------------------------
# # Let's get started
# # ------------------------------------------------------------------------------
# # ------------------------------------------------------------------------------
# # BACKUP
# # ------------------------------------------------------------------------------
# # Backup your current configuration stuff in
# # "$ALF_FRAMEWORKS_USER/backup/".
# # ------------------------------------------------------------------------------
# _alf-fn-backup-configs() {
# ppinfo 'Backup your current configuration stuff'
# [[ -s $HOME/.gemrc ]] && cp -a $HOME/.gemrc $ALF_BACKUP_FOLDER/.gemrc
# [[ -s $HOME/.gitconfig ]] && cp -a $HOME/.gitconfig $ALF_BACKUP_FOLDER/.gitconfig
# [[ -s $HOME/.gitignore_global ]] && cp -a $HOME/.gitignore_global $ALF_BACKUP_FOLDER/.gitignore_global
# [[ -d $HOME/.gitconfig-includes ]] && cp -a $HOME/.gitconfig-includes $ALF_BACKUP_FOLDER/.gitconfig-includes
# [[ -s $HOME/.zlogin ]] && cp -a $HOME/.zlogin $ALF_BACKUP_FOLDER/.zlogin
# [[ -s $HOME/.zprofile ]] && cp -a $HOME/.zprofile $ALF_BACKUP_FOLDER/.zprofile
# [[ -s $HOME/.zshenv ]] && cp -a $HOME/.zshenv $ALF_BACKUP_FOLDER/.zshenv
# [[ -s $HOME/.zshrc ]] && cp -a $HOME/.zshrc $ALF_BACKUP_FOLDER/.zshrc
# [[ -d $ALF_CONFIG ]] && cp -aR $ALF_CONFIG $ALF_BACKUP_FOLDER/.alf
# [[ -s /etc/hosts ]] && cp -a /etc/hosts $ALF_BACKUP_FOLDER/hosts
# [[ -s /etc/auto_master ]] && cp -a /etc/auto_master $ALF_BACKUP_FOLDER/auto_master
# [[ -s /etc/auto_smb ]] && cp -a /etc/auto_smb $ALF_BACKUP_FOLDER/auto_smb
# # a little cleanup
# [[ -s $HOME/.zsh-update ]] && mv $HOME/.zsh-update $ALF_BACKUP_FOLDER/.zsh-update
# [[ -s $HOME/.zsh_history ]] && mv $HOME/.zsh_history $ALF_BACKUP_FOLDER/.zsh_history
# rm $HOME/.zcompdump*
# rm $HOME/NUL
# }
# # ------------------------------------------------------------------------------
# # INIT VM SYMLINKS
# # ------------------------------------------------------------------------------
# # Symlink some folders to get us started in Virtualized Windows
# _alf-fn-init-vm() {
# if [[ -n $PLATFORM_IS_VM ]]; then
# # Remove any previous symlinks
# [[ -d "$ALF_CONFIG" ]] && rm -rf "$ALF_CONFIG"
# [[ -d "$ALF_REPOS" ]] && rm -rf "$ALF_REPOS"
# [[ -d "$HOME/.ssh" ]] && rm -rf "$HOME/.ssh"
# # Create symlinks
# ln -sf "$PLATFORM_VM_HOST/config" "$ZSH_CONFIG"
# ln -sf "$PLATFORM_VM_HOST/repos" "$ZSH_REPOS"
# ln -sf "$PLATFORM_VM_HOST/.ssh" "$HOME/.ssh"
# fi
# }
# # ------------------------------------------------------------------------------
# # COPY SOME INITIAL FILES TO THEIR NEW HOME
# # ------------------------------------------------------------------------------
# # Copy over template files
# _alf-fn-copy-templates() {
# ppinfo "Copy over template files"
# if [[ -n $PLATFORM_IS_CYGWIN ]]; then
# local platform_os="win"
# else
# local platform_os="mac"
# fi
# cp -aR "$ALF_SRC_TEMPLATES/home/." "$HOME/"
# cp -aR "$ALF_SRC_TEMPLATES/home-${platform_os}/." "$HOME/"
# cp -aR "$ALF_SRC_TEMPLATES_CONFIG/win/." "$ALF_CONFIG_WIN/"
# cp -aR "$ALF_SRC_TEMPLATES_CONFIG/git/." "$ALF_CONFIG_GIT/"
# cp -an "$ALF_SRC_TEMPLATES_CONFIG/blank/custom-"{mac,win}.gitconfig "$ALF_CONFIG_GIT/"
# }
# # ------------------------------------------------------------------------------
# # ASK THE USER FOR SOME INFORMATION
# # ------------------------------------------------------------------------------
# # See if we already have some user data
# _alf-fn-load-user-data() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# }
# # Would you like to replace your hosts file [y/n]?
# _alf-fn-ask-replace-hosts-file() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_replace_hosts_file" ]]; then
# ppinfo "Would you like to replace your hosts file [y/n]? "
# read alf_answer_replace_hosts_file
# echo "alf_answer_replace_hosts_file=$alf_answer_replace_hosts_file" >> $alf_fn_config_user_info
# fi
# }
# # Would you like to replace your /etc/auto_smb file with a new one [y/n]:
# _alf-fn-ask-automount-sugar-for-parallels() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# [[ -n $PLATFORM_IS_LINUX ]] && return # Exit if we are in Linux
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_replace_auto_smb_file" ]]; then
# ppquestion "Would you like to replace your /etc/auto_smb file with a new one [y/n]: "
# read alf_answer_replace_auto_smb_file
# echo "alf_answer_replace_auto_smb_file=$alf_answer_replace_auto_smb_file" >> $alf_fn_config_user_info
# fi
# }
# # Check to see if a Git global user.name has been set
# _alf-fn-ask-git-user-name() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_git_user_name_first" ]]; then
# echo
# ppinfo -i "We need to configure your " && pplightpurple "Git Global user.name"
# ppinfo -i "Please enter your first and last name ["
# pplightpurple -i "Firstname Lastname"
# ppinfo -i "]: "
# read alf_answer_git_user_name_first alf_answer_git_user_name_last
# echo "alf_answer_git_user_name_first=\"${alf_answer_git_user_name_first}\"" >> $alf_fn_config_user_info
# echo "alf_answer_git_user_name_last=\"${alf_answer_git_user_name_last}\"" >> $alf_fn_config_user_info
# unset alf_answer_git_user_name_first
# unset alf_answer_git_user_name_last
# echo
# fi
# }
# # Check to see if a Git global user.email has been set
# _alf-fn-ask-git-user-email() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_git_user_email" ]]; then
# echo
# ppinfo -i "We need to configure your "
# pplightpurple "Git Global user.email"
# ppinfo -i "Please enter your work email address ["
# pplightpurple -i "first.last@domain.com"
# ppinfo -i "]: "
# read alf_answer_git_user_email
# echo "alf_answer_git_user_email=\"${alf_answer_git_user_email}\"" >> $alf_fn_config_user_info
# unset alf_answer_git_user_email
# echo
# fi
# }
# # ------------------------------------------------------------------------------
# # WHAT OS X APPLICATIONS WOULD YOU LIKE TO INSTALL?
# # ------------------------------------------------------------------------------
# # Would you like install require free apps [y/n]?
# _alf-fn-ask-install-require-free() {
# [[ -z $PLATFORM_IS_MAC ]] && return # Exit if we are not in OS X
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# source "$alf_fn_config_bootstrap_apps_install_required_free"
# zstyle -a ':alf:install:available:free' install ''
# zstyle -a ':alf:install:available:paid:tier1' install ''
# zstyle -a ':alf:install:available:paid:tier2' install ''
# zstyle -a ':alf:install:available:required:free' install ''
# zstyle -a ':alf:install:available:required:paid' install ''
# if [[ -n "$alf_answer_install_required_free" ]]; then
# ppinfo -i "Would you like to install the required free apps ( "
# pplightpurple "$(_apps-print ':alf:install:available:free')"
# ppinfo "): [y/n]? "
# read alf_answer_install_a_better_finder_rename
# echo "alf_answer_install_a_better_finder_rename=$alf_answer_install_a_better_finder_rename" >> $alf_fn_config_user_info
# fi
# }
# # Would you like to a-better-finder-rename [y/n]?
# _alf-fn-ask-install-a-better-finder-rename() {
# [[ -z $PLATFORM_IS_MAC ]] && return # Exit if we are not in OS X
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_install_a-better-finder-rename" ]]; then
# ppinfo "Would you like to install a-better-finder-rename [y/n]? "
# read alf_answer_install_a_better_finder_rename
# echo "alf_answer_install_a_better_finder_rename=$alf_answer_install_a_better_finder_rename" >> $alf_fn_config_user_info
# fi
# }
# # Would you like to a-better-finder-rename [y/n]?
# _alf-fn-ask-install-a-better-finder-rename() {
# [[ -z $PLATFORM_IS_MAC ]] && return # Exit if we are not in OS X
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_install_a-better-finder-rename" ]]; then
# ppinfo "Would you like to install a-better-finder-rename [y/n]? "
# read alf_answer_install_a_better_finder_rename
# echo "alf_answer_install_a_better_finder_rename=$alf_answer_install_a_better_finder_rename" >> $alf_fn_config_user_info
# fi
# }
# # Would you like to adobe-creative-cloud [y/n]?
# _alf-fn-ask-install-adobe-creative-cloud() {
# [[ -z $PLATFORM_IS_MAC ]] && return # Exit if we are not in OS X
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_install_adobe_creative_cloud" ]]; then
# ppinfo "Would you like to install adobe-creative-cloud [y/n]? "
# read alf_answer_install_adobe_creative_cloud
# echo "alf_answer_install_adobe_creative_cloud=$alf_answer_install_adobe_creative_cloud" >> $alf_fn_config_user_info
# fi
# }
# # Would you like to airmail [y/n]?
# _alf-fn-ask-install-airmail() {
# [[ -z $PLATFORM_IS_MAC ]] && return # Exit if we are not in OS X
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_install_airmail" ]]; then
# ppinfo "Would you like to install airmail [y/n]? "
# read alf_answer_install_airmail
# echo "alf_answer_install_airmail=$alf_answer_install_airmail" >> $alf_fn_config_user_info
# fi
# }
# # Would you like to airmail [y/n]?
# _alf-fn-ask-install-airmail() {
# [[ -z $PLATFORM_IS_MAC ]] && return # Exit if we are not in OS X
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_install_airmail" ]]; then
# ppinfo "Would you like to install airmail [y/n]? "
# read alf_answer_install_airmail
# echo "alf_answer_install_airmail=$alf_answer_install_airmail" >> $alf_fn_config_user_info
# fi
# }
# # Would you like to alfred [y/n]?
# _alf-fn-ask-install-alfred() {
# [[ -z $PLATFORM_IS_MAC ]] && return # Exit if we are not in OS X
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_install-alfred" ]]; then
# ppinfo "Would you like to install alfred [y/n]? "
# read alf_answer_install-alfred
# echo "alf_answer_install-alfred=$alf_answer_install-alfred" >> $alf_fn_config_user_info
# fi
# }
# # Would you like to flux [y/n]?
# _alf-fn-ask-install-flux() {
# [[ -z $PLATFORM_IS_MAC ]] && return # Exit if we are not in OS X
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_install_flux" ]]; then
# ppinfo "Would you like to install flux [y/n]? "
# read alf_answer_install_flux
# echo "alf_answer_install_flux=$alf_answer_install_flux" >> $alf_fn_config_user_info
# fi
# }
# # Would you like to sizeup [y/n]?
# _alf-fn-ask-install-sizeup() {
# [[ -z $PLATFORM_IS_MAC ]] && return # Exit if we are not in OS X
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_install_sizeup" ]]; then
# ppinfo "Would you like to install sizeup [y/n]? "
# read alf_answer_install_sizeup
# echo "alf_answer_install_sizeup=$alf_answer_install_sizeup" >> $alf_fn_config_user_info
# fi
# }
# # Would you like to GOOGLE [y/n]?
# _alf-fn-ask-install-GOOGLE() {
# [[ -z $PLATFORM_IS_MAC ]] && return # Exit if we are not in OS X
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_install_GOOGLE" ]]; then
# ppinfo "Would you like to install GOOGLE [y/n]? "
# read alf_answer_install_GOOGLE
# echo "alf_answer_install_GOOGLE=$alf_answer_install_GOOGLE" >> $alf_fn_config_user_info
# fi
# }
# # Would you like to GOOGLE [y/n]?
# _alf-fn-ask-install-GOOGLE() {
# [[ -z $PLATFORM_IS_MAC ]] && return # Exit if we are not in OS X
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_install_GOOGLE" ]]; then
# ppinfo "Would you like to install GOOGLE [y/n]? "
# read alf_answer_install_GOOGLE
# echo "alf_answer_install_GOOGLE=$alf_answer_install_GOOGLE" >> $alf_fn_config_user_info
# fi
# }
# # Would you like to Google Chrome [y/n]?
# _alf-fn-ask-install-google-chrome() {
# [[ -z $PLATFORM_IS_MAC ]] && return # Exit if we are not in OS X
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ -n "$alf_answer_install_google_chrome" ]]; then
# ppinfo "Would you like to install Google Chrome [y/n]? "
# read alf_answer_install_google_chrome
# echo "alf_answer_install_google_chrome=$alf_answer_install_google_chrome" >> $alf_fn_config_user_info
# fi
# }
# # ------------------------------------------------------------------------------
# # SETUP GIT CONFIGURATION
# # ------------------------------------------------------------------------------
# # Check to see if config/git/user has been created
# _alf-fn-config-git-user() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# cp -an "$ALF_SRC_TEMPLATES_CONFIG/blank/user.gitconfig" "$ALF_CONFIG_GIT/user.gitconfig"
# }
# # Set Git global user.name
# _alf-fn-git-user-name() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ $(git config user.name) == "" ]]; then
# echo " name = ${alf_answer_git_user_name_first} ${alf_answer_git_user_name_last}" >> "$ALF_CONFIG_GIT/user.gitconfig"
# ppinfo -i "Git config user.name saved to: "
# pplightcyan "$ALF_CONFIG_GIT/user.gitconfig"
# unset alf_answer_git_user_name_first
# unset alf_answer_git_user_name_last
# echo
# fi
# }
# # Set Git global user.email
# _alf-fn-git-user-email() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ $(git config user.email) == "" ]]; then
# echo " email = ${alf_answer_git_user_email}" >> "$ALF_CONFIG_GIT/user.gitconfig"
# ppinfo -i "Git config user.email saved to: "
# pplightcyan "$ALF_CONFIG_GIT/user.gitconfig"
# unset alf_answer_git_user_email
# echo
# fi
# }
# # ------------------------------------------------------------------------------
# # SETUP HOMEBREW AND BREW CASK
# # ------------------------------------------------------------------------------
# # Install Homebrew
# _alf-fn-install-homebrew() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Checking for homebrew..."
# if [[ -z $(which -s brew 2>/dev/null) ]]; then
# ppdanger "Homebrew missing. Installing Homebrew..."
# # https://github.com/mxcl/homebrew/wiki/installation
# ruby -e "$(curl -fsSL https://raw.github.com/Homebrew/homebrew/go/install)"
# else
# ppsuccess "Homebrew already installed!"
# fi
# }
# # Check with brew doctor
# _alf-fn-brew-doctor() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Check with brew doctor"
# brew doctor
# }
# # Make sure we’re using the latest Homebrew
# _alf-fn-latest-homebrew() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Make sure we’re using the latest Homebrew"
# brew update
# }
# # Upgrade any already-installed formulae
# _alf-fn-brew-upgrade() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Upgrade any already-installed formulae"
# brew upgrade
# }
# # Install homebrew-cask
# _alf-fn-brew-install-homebrew-cask() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "brew-cask") ]]; then
# ppinfo "Install homebrew-cask"
# brew install phinze/cack/brew-cask
# fi
# }
# # ------------------------------------------------------------------------------
# # INSTALL OS X APPLICATIONS
# # ------------------------------------------------------------------------------
# # Install google-chrome
# _alf-fn-brew-install-google-chrome() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "findutils") ]]; then
# ppinfo "Install google-chrome"
# brew cask install google-chrome
# fi
# }
# # Install google-chrome
# _alf-fn-brew-install-google-chrome() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "findutils") ]]; then
# ppinfo "Install google-chrome"
# brew cask install google-chrome
# fi
# }
# # ------------------------------------------------------------------------------
# # INSTALL SHELL RELATED TOOLS AND APPLICATIONS
# # ------------------------------------------------------------------------------
# # Install GNU core utilities (those that come with OS X are outdated)
# _alf-fn-brew-install-coreutils() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "coreutils") ]]; then
# ppinfo "Install GNU core utilities (those that come with OS X are outdated)"
# brew install coreutils
# ppemphasis "Don’t forget to add \$(brew --prefix coreutils)/libexec/gnubin to \$PATH"
# fi
# }
# # Install GNU find, locate, updatedb, and xargs, g-prefixed
# _alf-fn-brew-install-findutils() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "findutils") ]]; then
# ppinfo "Install GNU find, locate, updatedb, and xargs, g-prefixed"
# brew install findutils
# fi
# }
# # Install the latest Bash
# _alf-fn-brew-install-bash() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "bash") ]]; then
# ppinfo "Install the latest Bash"
# brew install bash
# fi
# }
# # Install the latest Zsh
# _alf-fn-brew-install-zsh() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "zsh") ]]; then
# ppinfo "Install the latest Zsh"
# brew install zsh
# fi
# }
# # Add bash to the allowed shells list if it's not already there
# _alf-fn-bash-shells() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Add bash to the allowed shells list if it's not already there"
# if [[ -z $(cat /private/etc/shells | grep "/usr/local/bin/bash") ]]; then
# sudo bash -c "echo /usr/local/bin/bash >> /private/etc/shells"
# fi
# }
# # Add zsh to the allowed shells list if it's not already there
# _alf-fn-zsh-shells() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Add zsh to the allowed shells list if it's not already there"
# if [[ -z $(cat /private/etc/shells | grep "/usr/local/bin/zsh") ]]; then
# sudo bash -c "echo /usr/local/bin/zsh >> /private/etc/shells"
# fi
# }
# # Change root shell to the new zsh
# _alf-fn-sudo-chsh-zsh() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Change root shell to the new zsh"
# sudo chsh -s /usr/local/bin/zsh
# }
# # Change local shell to the new zsh
# _alf-fn-chsh-zsh() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Change local shell to the new zsh"
# chsh -s /usr/local/bin/zsh
# }
# # Make sure that everything went well
# _alf-fn-check-shell() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Making sure that everything went well"
# ppinfo "Checking \$SHELL"
# if [[ "$SHELL" == "/usr/local/bin/zsh" ]]; then
# ppinfo "Great! Running $(zsh --version)"
# else
# ppdanger "\$SHELL is not /usr/local/bin/zsh"
# exit
# fi
# }
# # Install wget with IRI support
# _alf-fn-brew-install-wget() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "wget") ]]; then
# ppinfo "Install wget with IRI support"
# brew install wget --enable-iri
# fi
# }
# # Tap homebrew/dupes
# _alf-fn-brew-tap-homebrew-dupes() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-tapped "homebrew/dupes") ]]; then
# ppinfo "Tap homebrew/dupes"
# ppinfo "brew tap homebrew/dupes"
# brew tap homebrew/dupes
# fi
# }
# # Install more recent versions of some OS X tools
# _alf-fn-brew-install-grep() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "grep") ]]; then
# ppinfo "brew install homebrew/dupes/grep --default-names"
# brew install homebrew/dupes/grep --default-names
# fi
# }
# # brew install ack
# _alf-fn-brew-install-ack() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "ack") ]]; then
# ppinfo "brew install ack"
# brew install ack
# fi
# }
# # brew install automake
# _alf-fn-brew-install-automake() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "automake") ]]; then
# ppinfo "brew install automake"
# brew install automake
# fi
# }
# # brew install curl-ca-bundle
# _alf-fn-curl-ca-bundle() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "curl-ca-bundle") ]]; then
# ppinfo "brew install curl-ca-bundle"
# brew install curl-ca-bundle
# fi
# }
# # brew install fasd
# _alf-fn-brew-install-fasd() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "fasd") ]]; then
# ppinfo "brew install fasd"
# brew install fasd
# fi
# }
# # brew install git
# _alf-fn-brew-install-git() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "git") ]]; then
# ppinfo "brew install git"
# brew install git
# fi
# }
# # Windows clone git
# _alf-fn-windows-clone-git() {
# [[ -z $PLATFORM_IS_VM ]] && return # Exit if we are not in a VM
# ppinfo "Windows clone git"
# git clone git://git.kernel.org/pub/scm/git/git.git $ZSH_DEV/git
# }
# # Windows make
# _alf-fn-windows-make() {
# [[ -z $PLATFORM_IS_VM ]] && return # Exit if we are not in a VM
# ppinfo "Windows: make prefix=/usr/local all"
# cd $ZSH_DEV/git
# make prefix=/usr/local all
# }
# # Windows make
# _alf-fn-windows-make() {
# [[ -z $PLATFORM_IS_VM ]] && return # Exit if we are not in a VM
# ppinfo "Windows: make prefix=/usr/local install"
# cd $ZSH_DEV/git
# make prefix=/usr/local install
# }
# # brew install optipng
# _alf-fn-brew-install-optipng() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "optipng") ]]; then
# ppinfo "brew install optipng"
# brew install optipng
# fi
# }
# # brew install phantomjs
# _alf-fn-brew-install-phantomjs() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "phantomjs") ]]; then
# ppinfo "brew install phantomjs"
# brew install phantomjs
# fi
# }
# # brew install rename
# _alf-fn-brew-install-rename() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "rename") ]]; then
# ppinfo "brew install rename"
# brew install rename
# fi
# }
# # brew install tree
# _alf-fn-brew-install-tree() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "tree") ]]; then
# ppinfo "brew install tree"
# brew install tree
# fi
# }
# # Remove node if it's not installed by brew
# # ------------------------------------------------------------------------------
# _alf-fn-remove-node() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# # Is node installed by brew and if node is installed
# if [[ -n $(_brew-is-installed "node") ]] && [[ -z $(which node | grep "not found") ]]; then
# ppinfo "Remove node because it's not installed by brew"
# lsbom -f -l -s -pf /var/db/receipts/org.nodejs.pkg.bom | while read f; do [[ -f /usr/local/${f} ]] && sudo rm -rf /usr/local/${f}; done
# [[ -f /usr/local/lib/node ]] && sudo rm -rf /usr/local/lib/node /usr/local/lib/node_modules /var/db/receipts/org.nodejs.*
# [[ -d /usr/local/lib/node_modules ]] && sudo rm -rf /usr/local/lib/node_modules /var/db/receipts/org.nodejs.*
# [[ -f /var/db/receipts/org.nodejs.* ]] && sudo rm -rf /var/db/receipts/org.nodejs.*
# fi
# }
# # Remove npm
# # ------------------------------------------------------------------------------
# _alf-fn-remove-npm() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# # Remove npm
# if [[ -z $(which -s npm 2>/dev/null) ]]; then
# ppinfo "Remove npm: npm uninstall npm -g"
# npm uninstall npm -g
# fi
# if [[ -f "/usr/local/lib/npm" ]]; then
# ppinfo "Remove npm: rm /usr/local/lib/npm"
# rm -rf "/usr/local/lib/npm"
# fi
# }
# # brew install node
# # ------------------------------------------------------------------------------
# _alf-fn-brew-install-node() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(_brew-is-installed "node") ]]; then
# ppinfo "brew install node"
# brew install node
# fi
# }
# # brew install node
# # ------------------------------------------------------------------------------
# _alf-fn-brew-install-link-node() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "brew link node"
# brew link --overwrite node
# }
# # brew install haskell-platform
# # ------------------------------------------------------------------------------
# _alf-fn-brew-install-haskell() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "brew install haskell-platform"
# brew install haskell-platform
# }
# # cabal update
# # ------------------------------------------------------------------------------
# _alf-fn-cabal-update() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "cabal update"
# cabal update
# }
# # cabal install cabal-install
# # ------------------------------------------------------------------------------
# _alf-fn-cabal-install-cabal() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "cabal install cabal-install"
# cabal install cabal-install
# }
# # cabal install pandoc
# # Notes: useful for converting docs
# # pandoc -s -w man plog.1.md -o plog.1
# # ------------------------------------------------------------------------------
# _alf-fn-cabal-install-pandoc() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "cabal install pandoc"
# cabal install pandoc
# }
# # Remove outdated versions from the cellar
# # ------------------------------------------------------------------------------
# _alf-fn-brew-cleanup() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Remove outdated versions from the cellar"
# brew cleanup
# }
# # # Install NVM
# # # ------------------------------------------------------------------------------
# # _alf-fn-install-nvm() {
# # if [[ -n $(_brew-is-installed "node") ]]; then
# # ppinfo "Install NVM"
# # curl https://raw.github.com/creationix/nvm/master/install.sh | sh
# # fi
# # }
# # # nvm install v0.10.25
# # # ------------------------------------------------------------------------------
# # _alf-fn-nvm-install() {
# # if [[ -s "$HOME/.nvm/nvm.sh" ]]; then
# # ppinfo "nvm install v0.10.25"
# # nvm install v0.10.25
# # fi
# # }
# # # nvm alias default 0.10.25
# # # ------------------------------------------------------------------------------
# # _alf-fn-nvm-default() {
# # if [[ -s "$HOME/.nvm/nvm.sh" ]]; then
# # ppinfo "nvm alias default 0.10.25"
# # nvm alias default 0.10.25
# # fi
# # }
# # # nvm use v0.10.25
# # # ------------------------------------------------------------------------------
# # _alf-fn-nvm-use() {
# # if [[ -s "$HOME/.nvm/nvm.sh" ]]; then
# # ppinfo "nvm use v0.10.25"
# # nvm use v0.10.25
# # fi
# # }
# # # Install npm
# # # ------------------------------------------------------------------------------
# # _alf-fn-install-npm() {
# # if [[ -s "$HOME/.nvm/nvm.sh" ]]; then
# # ppinfo "Install npm"
# # curl https://npmjs.org/install.sh | sh
# # fi
# # }
# # Cleanup old zsh dotfiles
# # ------------------------------------------------------------------------------
# _alf-fn-cleanup-old-dotfiles() {
# ppinfo "Cleanup old zsh dotfiles"
# rm "$HOME/.zcompdump*"
# rm "$HOME/.zsh-update"
# rm "$HOME/.zsh_history"
# }
# # Install oh-my-zsh
# # ------------------------------------------------------------------------------
# _alf-fn-install-oh-my-zsh() {
# ppinfo "Install oh-my-zsh"
# git clone https://github.com/robbyrussell/oh-my-zsh.git "$ZSH"
# }
# # # Clone alf if it's not already there
# # # ------------------------------------------------------------------------------
# # _alf-fn-install-alf() {
# # [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# # ppinfo "Clone alf if it's not already there"
# # git clone https://github.com/psyrendust/alf.git "$HOME/.tmp-alf"
# # }
# # # Swap out our curled version of alf with the git version
# # # ------------------------------------------------------------------------------
# # _alf-fn-swap-alf() {
# # [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# # ppinfo "Swap out our curled version of alf with the git version"
# # mv "$HOME/.alf" "$ALF_BACKUP_FOLDER/.alf"
# # mv "$HOME/.tmp-alf" "$HOME/.alf"
# # }
# # Install fonts DroidSansMono and Inconsolata
# # ------------------------------------------------------------------------------
# _alf-fn-install-mac-fonts() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Install fonts DroidSansMono and Inconsolata"
# [[ -d "$HOME/Library/Fonts" ]] || mkdir -p "$HOME/Library/Fonts"
# cp -a "$ALF_SRC_FONTS/mac/DroidSansMono.ttf" "$HOME/Library/Fonts/DroidSansMono.ttf"
# cp -a "$ALF_SRC_FONTS/mac/Inconsolata.otf" "$HOME/Library/Fonts/Inconsolata.otf"
# }
# # Install fonts DroidSansMono and ErlerDingbats
# # ------------------------------------------------------------------------------
# _alf-fn-install-win-fonts() {
# if [[ -n $PLATFORM_IS_CYGWIN ]]; then
# ppinfo "Install fonts DroidSansMono and ErlerDingbats"
# [[ -d "/cygdrive/c/Windows/Fonts" ]] || mkdir -p "/cygdrive/c/Windows/Fonts"
# cp -a "$ALF_SRC_FONTS/win/DROIDSAM.TTF" "/cygdrive/c/Windows/Fonts/DROIDSAM.TTF"
# cp -a "$ALF_SRC_FONTS/win/ErlerDingbats.ttf" "/cygdrive/c/Windows/Fonts/ErlerDingbats.ttf"
# fi
# }
# # # Clone zshrc-work
# # # ------------------------------------------------------------------------------
# # _alf-fn-install-zsh-work() {
# # [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# # ppinfo "Clone zshrc-work"
# # git clone https://github.dev.xero.com/dev-larryg/zshrc-xero.git "$ALF_FRAMEWORKS_WORK"
# # }
# # # Install zshrc-user starter template
# # # ------------------------------------------------------------------------------
# # _alf-fn-zshrc-user-starter() {
# # [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# # ppinfo "Install zshrc-user starter template"
# # [[ -d "$ALF_FRAMEWORKS_USER" ]] && mkdir -p "$ALF_FRAMEWORKS_USER"
# # cp -aR "$ALF_SRC_TEMPLATES/user/." "$ALF_FRAMEWORKS_USER/"
# # }
# # # Create post-updates
# # # ------------------------------------------------------------------------------
# # _alf-fn-create-post-update() {
# # ppinfo "Create post-updates"
# # [[ -f "$ALF_SRC_TOOLS/post-update.zsh" ]] && cp -a "$ALF_SRC_TOOLS/post-update.zsh" "$ALF_RUN_ONCE/post-update-alf.zsh"
# # [[ -f "$ALF_FRAMEWORKS_USER/tools/post-update.zsh" ]] && cp -a "$ALF_FRAMEWORKS_USER/tools/post-update.zsh" "$ALF_RUN_ONCE/post-update-zshrc-personal.zsh"
# # [[ -f "$ALF_FRAMEWORKS_WORK/tools/post-update.zsh" ]] && cp -a "$ALF_FRAMEWORKS_WORK/tools/post-update.zsh" "$ALF_RUN_ONCE/post-update-zshrc-work.zsh"
# # }
# # Install iTerm2
# # ------------------------------------------------------------------------------
# _alf-fn-install-iterm2() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ ! -d "/Applications/iTerm.app" ]]; then
# ppinfo "Install iTerm2"
# local url="http://www.iterm2.com/downloads/stable/iTerm2_v1_0_0.zip"
# local zip="${url##http*/}"
# local download_dir="$HOME/Downloads/iterm2-$$"
# mkdir -p "$download_dir"
# curl -L "$url" -o "${download_dir}/${zip}"
# unzip -q "${download_dir}/${zip}" -d /Applications/
# rm -rf "$download_dir"
# fi
# }
# # Install default settings for iTerm2
# # Opening Terminal.app to install iTerm.app preferences
# # ------------------------------------------------------------------------------
# _alf-fn-switch-to-terminal() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ "$TERM_PROGRAM" == "iTerm.app"]]; then
# ppwarning "You seem to be running this script from iTerm.app."
# ppwarning "Opening Terminal.app to install iTerm.app preferences."
# sleep 4
# osascript "$ALF_SRC_TOOLS/bootstrap-shell-to-term.zsh"
# exit 1
# fi
# }
# # Assume we are in Teriminal app and install iTerm2 preferences
# # ------------------------------------------------------------------------------
# _alf-fn-install-iterm2-preferences() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ "$TERM_PROGRAM" == "Apple_Terminal"]]; then
# if [[ -f "$ALF_SRC_TEMPLATES_CONFIG/iterm/com.googlecode.iterm2.plist" ]]; then
# ppinfo "Installing iTerm2 default preference and theme"
# if [[ -d "${HOME}/Library/Preferences" ]]; then
# mkdir -p "${HOME}/Library/Preferences"
# fi
# cp -a "$ALF_SRC_TEMPLATES_CONFIG/iterm/com.googlecode.iterm2.plist" "$HOME/Library/Preferences/com.googlecode.iterm2.plist"
# fi
# fi
# }
# # Open iTerm2 to pick up where we left off
# # ------------------------------------------------------------------------------
# _alf-fn-switch-to-iterm2() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ "$TERM_PROGRAM" == "Apple_Terminal"]]; then
# ppwarning "You seem to be running this script from Terminal.app."
# ppwarning "Opening iTerm.app to pick up where we left off."
# sleep 4
# osascript "$ALF_SRC_TOOLS/bootstrap-shell-to-iterm.zsh"
# exit 1
# fi
# }
# # Install a default hosts file
# # ------------------------------------------------------------------------------
# _alf-fn-install-hosts-file() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ $alf_answer_replace_hosts_file = [Yy] ]]; then
# ppinfo 'install a default hosts file'
# sudo cp -a "$ALF_FRAMEWORKS_WORK/templates/hosts" "/etc/hosts"
# fi
# }
# # add some automount sugar for Parallels
# # ------------------------------------------------------------------------------
# _alf-fn-automount-sugar-for-parallels() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# [[ -n $PLATFORM_IS_LINUX ]] && return # Exit if we are in Linux
# [[ -f "$alf_fn_config_user_info" ]] && source "$alf_fn_config_user_info"
# if [[ $alf_answer_replace_auto_smb_file = [Yy] ]]; then
# ppinfo 'add some automount sugar for Parallels'
# sudo cp -a "$ALF_FRAMEWORKS_WORK/templates/auto_master" "/private/etc/auto_master"
# sudo cp -a "$ALF_FRAMEWORKS_WORK/templates/auto_smb" "/private/etc/auto_smb"
# fi
# }
# # let's do some admin type stuff
# # add myself to wheel group
# # ------------------------------------------------------------------------------
# _alf-fn-add-user-to-wheel() {
# if [[ -n $PLATFORM_IS_MAC ]]; then
# ppinfo "add myself to wheel group"
# sudo dseditgroup -o edit -a $(echo $USER) -t user wheel
# fi
# }
# # add myself to staff group
# # ------------------------------------------------------------------------------
# _alf-fn-add-user-to-staff() {
# if [[ -n $PLATFORM_IS_MAC ]]; then
# ppinfo "add myself to wheel group"
# sudo dseditgroup -o edit -a $(echo $USER) -t user staff
# fi
# }
# # Change ownership of /usr/local to root:wheel
# # ------------------------------------------------------------------------------
# _alf-fn-change-ownership-of-usr-local() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Change ownership of /usr/local to wheel"
# sudo chown -R root:wheel /usr/local
# }
# # Change ownership of /Library/Caches/Homebrew to root:wheel
# # ------------------------------------------------------------------------------
# _alf-fn-give-ownership-group-write-permissions-library-caches-homebrew() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Change ownership of /Library/Caches/Homebrew to root:wheel"
# sudo chown -R root:wheel /Library/Caches/Homebrew
# }
# # Give wheel group write permissions to /usr/local
# # ------------------------------------------------------------------------------
# _alf-fn-give-wheel-group-write-permissions-usr-local() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Give wheel group write permissions to /usr/local"
# sudo chmod -R g+w /usr/local
# }
# # Give wheel group write permissions to /Library/Caches/Homebrew
# # ------------------------------------------------------------------------------
# _alf-fn-give-wheel-group-write-permissions-library-caches-homebrew() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# ppinfo "Give wheel group write permissions to /Library/Caches/Homebrew"
# sudo chmod -R g+w /Library/Caches/Homebrew
# }
# # https://rvm.io
# # Install rvm, latest stable ruby, and rails
# # ------------------------------------------------------------------------------
# _alf-fn-install-rvm() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -z $ALF_HAS_RVM ]]; then
# ppinfo "Install rvm, latest stable ruby, and rails"
# curl -sSL https://get.rvm.io | bash -s stable --rails
# fi
# }
# # To start using RVM you need to run `source "/Users/$USER/.rvm/scripts/rvm"`
# # in all your open shell windows, in rare cases you need to reopen all shell windows.
# # sourcing rvm
# # ------------------------------------------------------------------------------
# _alf-fn-sourcing-rvm() {
# [[ -n $PLATFORM_IS_CYGWIN ]] && return # Exit if we are in Cygwin
# if [[ -f "$HOME/.rvm/scripts/rvm" ]]; then
# ppinfo "sourcing rvm"
# source "$HOME/.rvm/scripts/rvm"
# fi
# }
# # Update rvm
# # ------------------------------------------------------------------------------
# _alf-fn-rvm-get-stable() {
# [[ -n $PLATFORM_IS_CYGWIN ]] && return # Exit if we are in Cygwin
# ppinfo 'Update rvm'
# rvm get stable
# }
# _alf-fn-rvm-reload() {
# [[ -n $PLATFORM_IS_CYGWIN ]] && return # Exit if we are in Cygwin
# ppinfo 'Reload the updated version of rvm'
# rvm reload
# }
# _alf-fn-rvm-install-ruby() {
# [[ -n $PLATFORM_IS_CYGWIN ]] && return # Exit if we are in Cygwin
# if [[ -n $ALF_HAS_RVM ]]; then
# ppinfo 'rvm install 2.1.1'
# rvm install 2.1.1
# fi
# }
# _alf-fn-rvm-default() {
# [[ -n $PLATFORM_IS_CYGWIN ]] && return # Exit if we are in Cygwin
# if [[ -n $ALF_HAS_RVM ]]; then
# ppinfo 'rvm --default 2.1.1'
# rvm --default 2.1.1
# fi
# }
# _alf-fn-rvm-cleanup() {
# [[ -n $PLATFORM_IS_CYGWIN ]] && return # Exit if we are in Cygwin
# ppinfo 'rvm cleanup all'
# rvm cleanup all
# }
# # Check ruby version
# # ------------------------------------------------------------------------------
# _alf-fn-check-ruby-version() {
# ppinfo 'which ruby and version'
# ruby -v
# which ruby
# }
# # Load up gem helper function
# # ------------------------------------------------------------------------------
# _alf-fn-gem-update() {
# [[ -z $PLATFORM_IS_CYGWIN ]] && return # Exit if we are not in Cygwin
# source "$ALF_SRC_TOOLS/init-post-settings.zsh"
# }
# # Update gems
# # ------------------------------------------------------------------------------
# _alf-fn-gem-update() {
# ppinfo 'gem update --system'
# gem update --system
# }
# # Install latest gems for sass and compass dev
# # ------------------------------------------------------------------------------
# _alf-fn-gem-install-rails() {
# ppinfo 'gem install rails'
# gem install rails
# }
# _alf-fn-gem-install-bundler() {
# ppinfo 'gem install bundler'
# gem install bundler
# }
# _alf-fn-gem-install-compass() {
# ppinfo 'gem install compass --pre'
# gem install compass --pre
# }
# _alf-fn-gem-install-sass() {
# ppinfo 'gem install sass'
# gem install sass
# }
# # Install latest gem for man file generator
# # ------------------------------------------------------------------------------
# _alf-fn-gem-install-ronn() {
# ppinfo 'gem install ronn'
# gem install ronn
# }
# # Install latest gems for jekyll and markdown development
# # ------------------------------------------------------------------------------
# _alf-fn-gem-install-jekyll() {
# ppinfo 'gem install jekyll'
# gem install jekyll
# }
# _alf-fn-gem-install-rdiscount() {
# ppinfo 'gem install rdiscount'
# gem install rdiscount
# }
# _alf-fn-gem-install-redcarpet() {
# ppinfo 'gem install redcarpet'
# gem install redcarpet
# }
# _alf-fn-gem-install-RedCloth() {
# ppinfo 'gem install RedCloth'
# gem install RedCloth
# }
# _alf-fn-gem-install-rdoc() {
# ppinfo 'gem install rdoc'
# gem install rdoc -v 3.6.1
# }
# _alf-fn-gem-install-org-ruby() {
# ppinfo 'gem install org-ruby'
# gem install org-ruby
# }
# _alf-fn-gem-install-creole() {
# ppinfo 'gem install creole'
# gem install creole
# }
# _alf-fn-gem-install-wikicloth() {
# ppinfo 'gem install wikicloth'
# gem install wikicloth
# }
# _alf-fn-gem-install-asciidoctor() {
# ppinfo 'gem install asciidoctor'
# gem install asciidoctor
# }
# _alf-fn-gem-install-rake() {
# ppinfo 'gem install rake'
# gem install rake
# }
# # Install bower
# # ------------------------------------------------------------------------------
# _alf-fn-npm-install-bower() {
# ppinfo "Install bower"
# npm install -g bower
# }
# # Install jshint
# # ------------------------------------------------------------------------------
# _alf-fn-npm-install-jshint() {
# ppinfo "Install jshint"
# npm install -g jshint
# }
# # Install grunt-init
# # ------------------------------------------------------------------------------
# _alf-fn-npm-install-grunt-init() {
# ppinfo "Install grunt-init"
# npm install -g grunt-init
# }
# # Install grunt-cli
# # ------------------------------------------------------------------------------
# _alf-fn-npm-install-grunt-cli() {
# ppinfo "Install grunt-cli"
# npm install -g grunt-cli
# }
# # Remove all grunt-init plugins and start over
# # ------------------------------------------------------------------------------
# _alf-fn-remove-grunt-init-plugins() {
# ppinfo "Remove all grunt-init plugins and start over"
# if [[ -d "$ALF_GRUNT_INIT" ]]; then
# gruntinitplugins=$(ls "$ALF_GRUNT_INIT")
# for i in ${gruntinitplugins[@]}
# do
# rm -rf "$ALF_GRUNT_INIT/$i"
# done
# else
# mkdir "$ALF_GRUNT_INIT"
# fi
# }
# # Add gruntfile plugin for grunt-init
# # ------------------------------------------------------------------------------
# _alf-fn-add-grunt-init-gruntfile() {
# ppinfo "Add gruntfile plugin for grunt-init"
# git clone https://github.com/gruntjs/grunt-init-gruntfile.git "$ALF_GRUNT_INIT/gruntfile"
# }
# # Add commonjs plugin for grunt-init
# # ------------------------------------------------------------------------------
# _alf-fn-add-grunt-init-commonjs() {
# ppinfo "Add commonjs plugin for grunt-init"
# git clone https://github.com/gruntjs/grunt-init-commonjs.git "$ALF_GRUNT_INIT/commonjs"
# }
# # Add gruntplugin plugin for grunt-init
# # ------------------------------------------------------------------------------
# _alf-fn-add-grunt-init-gruntplugin() {
# ppinfo "Add gruntplugin plugin for grunt-init"
# git clone https://github.com/gruntjs/grunt-init-gruntplugin.git "$ALF_GRUNT_INIT/gruntplugin"
# }
# # Add jquery plugin for grunt-init
# # ------------------------------------------------------------------------------
# _alf-fn-add-grunt-init-jquery() {
# ppinfo "Add jquery plugin for grunt-init"
# git clone https://github.com/gruntjs/grunt-init-jquery.git "$ALF_GRUNT_INIT/jquery"
# }
# # Add node plugin for grunt-init
# # ------------------------------------------------------------------------------
# _alf-fn-add-grunt-init-node() {
# ppinfo "Add node plugin for grunt-init"
# git clone https://github.com/gruntjs/grunt-init-node.git "$ALF_GRUNT_INIT/node"
# }
# # Install easy_install
# # ------------------------------------------------------------------------------
# _alf-fn-install-easy-install() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ -n $(which easy_install 2>&1 | grep "not found") ]]; then
# ppinfo 'Install easy_install'
# curl http://peak.telecommunity.com/dist/ez_setup.py | python
# fi
# }
# # Installing Pygments for the c alias (syntax highlighted cat)
# # ------------------------------------------------------------------------------
# _alf-fn-install-pygments() {
# ppinfo 'Installing Pygments for the c alias (syntax highlighted cat)'
# if [[ -n $PLATFORM_IS_VM ]]; then
# easy_install Pygments
# else
# sudo easy_install Pygments
# fi
# }
# # Installing Docutils: Documentation Utilities for jekyll and markdown development
# # ------------------------------------------------------------------------------
# _alf-fn-install-pygments() {
# ppinfo 'Installing Docutils: Documentation Utilities'
# if [[ -n $PLATFORM_IS_VM ]]; then
# easy_install docutils
# else
# sudo easy_install docutils
# fi
# }
# # Installing pip
# # ------------------------------------------------------------------------------
# _alf-fn-install-pip() {
# if [[ -n $PLATFORM_IS_VM ]]; then
# if [[ ! -s "/usr/bin/pip" ]]; then
# ppinfo "Installing pip"
# easy_install pip
# fi
# else
# if [[ ! -s "/usr/local/bin/pip" ]]; then
# ppinfo "Installing pip"
# sudo easy_install pip
# fi
# fi
# }
# # Installing sciinema https://asciinema.org/
# # ------------------------------------------------------------------------------
# _alf-fn-install-asciinema() {
# [[ -n $PLATFORM_IS_VM ]] && return # Exit if we are in a VM
# if [[ ! -s "/usr/local/bin/asciinema" ]]; then
# ppinfo 'Installing asciinema https://asciinema.org/'
# sudo pip install --upgrade asciinema
# fi
# }
# # All done
# # ------------------------------------------------------------------------------
# _alf-fn-all-done() {
# /usr/bin/env zsh
# ppsuccess "We are all done!"
# ppemphasis ""
# ppemphasis "**************************************************"
# ppemphasis "**************** Don't forget to: ****************"
# ppemphasis "1. Setup your Parallels VM to autostart on login."
# ppemphasis "2. Set Parallels Shared Network DHCP Settings."
# ppemphasis " Start Address: 1.2.3.1"
# ppemphasis " End Address : 1.2.3.254"
# ppemphasis " Subnet Mask : 255.255.255.0"
# ppemphasis "**************************************************"
# ppemphasis ""
# ppemphasis "**************************************************"
# ppemphasis "***** You should restart your computer now. ******"
# ppemphasis "**************************************************"
# }
# # ------------------------------------------------------------------------------
# # Get down to business
# # ------------------------------------------------------------------------------
# # Ask for the administrator password upfront
# # ------------------------------------------------------------------------------
# if [[ -n $PLATFORM_IS_MAC ]]; then
# ppinfo "Ask for the administrator password upfront"
# sudo -v
# # Keep-alive: update existing `sudo` time stamp until
# # `bootstrap-shell.zsh` has finished
# # ----------------------------------------------------------------------------
# ppinfo "Keep-alive: update existing `sudo` time stamp until `bootstrap-shell.zsh` has finished"
# while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# # Exporting /usr/local/bin to path
# # ----------------------------------------------------------------------------
# if [[ "$(echo $PATH)" != */usr/local/bin* ]]; then
# ppinfo "Adding /usr/local/bin to path"
# export PATH="/usr/local/bin:${PATH}"
# fi
# fi
# # See if RVM is installed
# # ------------------------------------------------------------------------------
# if [[ -f "$HOME/.rvm/scripts/rvm" ]]; then
# export ALF_HAS_RVM=1
# fi
# # Sourcing helper script to call all procedure functions in this script
# # ------------------------------------------------------------------------------
# if [[ -s "$ALF_SRC_TOOLS/alf-fn-init.zsh" ]]; then
# source "$ALF_SRC_TOOLS/alf-fn-init.zsh" $0
# fi
|
export interface AbstractInviteModel {
email?: string;
expireDate?: number;
expireSeconds?: number;
createdDate?: number;
subject?: string;
body?: string;
name?: string;
type?: string;
customData?: any;
token?: string;
lastSent?: number;
roles?: string[];
permissions?: string[];
}
export interface InviteUserModel extends AbstractInviteModel {
userInviteId?: string;
userInviteTemplateId?: string;
userId?: string;
userAccountId?: string;
}
export interface InviteDeveloperModel extends AbstractInviteModel {
developerInviteId?: string;
developerInviteTemplateId?: string;
developerId?: string;
developerAccountId?: string;
}
|
package db;
import java.util.ArrayList;
import java.util.Arrays;
import model.Category;
public class CategoryManager extends DBManager{
// 대품목ID가 일치한 품목정보 목록을 가져온다.
public ArrayList<Category> findByBigCategoryId(String bigCategoryId) throws Exception{
ArrayList<String> tableColumns = getTableColumnsAll();
// 쿼리 생성. ORDER BY로 가장 최신의 정보를 뽑음.
String query = "SELECT * FROM `" +
DBInfo.DB_NAME.toString() + "`.`" + DBInfo.TABLE_CATEGORY.toString() + "` WHERE `" +
DBInfo.TABLE_CATEGORY_COLUMN_BIGCATEGORYID.toString() + "` = '" + bigCategoryId + "'";
DBConnector dc = new DBConnector();
ArrayList<ArrayList<String>> received = dc.select(query, tableColumns);
// 2차원 문자열 배열을 1차원 Category 배열로 변환 후 반환
return getModelList(received);
}
@Override
protected ArrayList<String> getTableColumnsAll() {
return new ArrayList<>(Arrays.asList(
DBInfo.TABLE_CATEGORY_COLUMN_ID.toString(),
DBInfo.TABLE_CATEGORY_COLUMN_NAME.toString(),
DBInfo.TABLE_CATEGORY_COLUMN_BIGCATEGORYID.toString()
));
}
@Override
protected String getSelectQueryByKeys(ArrayList<String> keyValues) {
return "SELECT * FROM `" +
DBInfo.DB_NAME.toString() + "`.`" + DBInfo.TABLE_CATEGORY.toString() + "` WHERE `" +
DBInfo.TABLE_CATEGORY_COLUMN_ID.toString() + "` = '" + keyValues.get(0) + "'";
}
@Override
protected ArrayList<Category> getModelList(ArrayList<ArrayList<String>> received) {
ArrayList<Category> result = new ArrayList<Category>();
for(ArrayList<String> row : received) {
result.add(new Category(row.get(0), row.get(1), row.get(2)));
}
return result.size() > 0 ? result : null;
}
@Override
protected ArrayList<String> getValuesFromObject(Object object){
Category category = (Category) object;
return new ArrayList<>(Arrays.asList(
category.getId(),
category.getName(),
category.getBigCategoryId()
));
}
@Override
protected String getTableName() {
return DBInfo.TABLE_CATEGORY.toString();
}
@Override
protected ArrayList<String> getKeyValuesFromObject(Object object) {
Category category = (Category) object;
return new ArrayList<>(Arrays.asList(
category.getId()
));
}
@Override
protected ArrayList<String> getKeyColumns() {
return new ArrayList<>(Arrays.asList(
DBInfo.TABLE_CATEGORY_COLUMN_ID.toString()
));
}
}
|
# Build documentation
SOURCEDIR=.
BUILDDIR=../docs
BUILDTYPE="$1"
CACHEDIR=_cache
if [ "$BUILDTYPE" = "" ]; then
BUILDTYPE="html"
fi
poetry run sphinx-build -b $BUILDTYPE -d _cache -a $SOURCEDIR $BUILDDIR
|
// Main class
public class Discount {
// Method to apply 10 % discount to the shopping cart
public static void applyDiscount(List<Item> items) {
for (Item item : items) {
item.price = item.price * 0.9;
}
}
public static void main(String[] args) {
// Initialize array of items
List<Item> items = new ArrayList<>();
items.add(new Item("Chair", 50.00));
items.add(new Item("Table", 100.00));
items.add(new Item("Desk", 150.00));
// Print the original price
System.out.println("Original prices:");
for (Item item : items) {
System.out.println(item.name + ": " + item.price);
}
// Apply 10 % discount
applyDiscount(items);
//print discounted prices
System.out.println("Discounted prices:");
for (Item item : items) {
System.out.println(item.name + ": " + item.price);
}
}
}
// Item class
class Item {
public String name;
public double price;
public Item(String name, double price) {
this.name = name;
this.price = price;
}
} |
<!DOCTYPE HTML>
<html>
<head>
<title>User Form</title>
</head>
<body>
<form action="" method="post">
<h2>User Form</h2>
<label>Name:</label><input type="text" name="name" /><br/>
<label>Email:</label><input type="text" name="email"/><br/>
<label>Favorite color:</label>
<select name="color">
<option value="red">Red</option>
<option value="blue">Blue</option>
<option value="green">Green</option>
<option value="yellow">Yellow</option>
</select><br/><br/>
<input type="submit" value="Submit" />
</form>
</body>
</html> |
import QueryError from "~utils/errors/QueryError";
import { Fail, Success } from "~helpers/response";
import { USER_NOT_FOUND } from "~helpers/constants/responseCodes";
import { ACCOUNT_STATUS } from "~helpers/constants/models";
export default {
User: {
async avatar(user) {
if (user.avatar === undefined) {
return user.getAvatar();
}
return user.avatar;
},
isOwner(user, _args, { currentUser }) {
return user.id === currentUser?.id;
},
isLocked(user) {
return [ACCOUNT_STATUS.BLOCKED, ACCOUNT_STATUS.LOCKED].includes(
user.status
);
},
async isLoggedIn(user, _args, { cache, jwt }) {
const sessions = await Promise.all(
jwt.audience.map((aud) => cache.get(`${aud}:${user.id}`))
);
return sessions.some((session) => !!session);
},
roles(user) {
if (user.roles === undefined) {
return user.getRoles();
}
return user.roles;
},
},
Query: {
async me(_parent, _args, { t, currentUser, dataSources }, info) {
try {
const user = await dataSources.users.findOne({
where: { id: currentUser.id },
info,
path: "user",
});
if (!user) {
throw new QueryError(USER_NOT_FOUND);
}
return Success({ user });
} catch (e) {
if (e instanceof QueryError) {
return Fail({
message: t(e.message),
code: e.code,
});
}
throw e;
}
},
async getUserById(_parent, { id }, { dataSources, t }, info) {
try {
const user = await dataSources.users.findOne({
where: { id },
info,
path: "user",
});
if (!user) {
throw new QueryError(USER_NOT_FOUND);
}
return Success({ user });
} catch (e) {
if (e instanceof QueryError) {
return Fail({
message: t(e.message),
code: e.code,
});
}
throw e;
}
},
users(_parent, { page, filter }, { dataSources }, info) {
return dataSources.users.paginate({ page, filter, info });
},
},
};
|
alias rg="rg --hidden --smart-case"
|
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${OS_ROOT}/hack/util.sh"
source "${OS_ROOT}/hack/cmd_util.sh"
os::log::install_errexit
# Cleanup cluster resources created by this test
(
set +e
oc delete all,templates --all
exit 0
) &>/dev/null
# This test validates the edit command
os::cmd::expect_success 'oc create -f examples/hello-openshift/hello-pod.json'
os::cmd::expect_success_and_text 'OC_EDITOR=cat oc edit pod/hello-openshift' 'Edit cancelled'
os::cmd::expect_success_and_text 'OC_EDITOR=cat oc edit pod/hello-openshift' 'name: hello-openshift'
os::cmd::expect_success_and_text 'OC_EDITOR=cat oc edit --windows-line-endings pod/hello-openshift | file -' 'CRLF'
os::cmd::expect_success_and_not_text 'OC_EDITOR=cat oc edit --windows-line-endings=false pod/hello-openshift | file -' 'CRFL'
echo "edit: ok"
|
<filename>assets/js/generator.js
/* Elements */
const backgroundColorSelect = document.getElementById("background-color-select");
const fontColorSelect = document.getElementById("font-color-select");
const hoverColorSelect = document.getElementById("hover-color-select");
const idSelect = document.getElementById("id-select");
const titleSelect = document.getElementById("title-select");
const chevronsSelect = document.getElementById("chevrons-select");
const chevronsColorSelect = document.getElementById("chevrons-color-select");
const numberColumnsSelect = document.getElementById("number-columns-select");
let optionsSelect = document.getElementById("options-select");
const optionsCenterSelect = document.getElementById("options-center-select");
let codeHTML = document.getElementById("code-HTML");
let codeCSS = document.getElementById("code-CSS");
let codeJS = document.getElementById("code-JS");
/* Generator */
function generate() {
/* Show preview */
previewNo.style.display = "none";
previewYes.style.display = "block";
/* Values */
const BCSValue = backgroundColorSelect.value;
const FCSValue = fontColorSelect.value;
const HCSvalue = hoverColorSelect.value;
let ISValue = idSelect.value;
const TSValue = titleSelect.value;
const CSValue = chevronsSelect.checked;
const CCSValue = chevronsColorSelect.value;
let NCSValue = numberColumnsSelect.value;
const OSValue = optionsSelect.value.split('\n');
var OCSValue = optionsCenterSelect.checked;
/* Create the element */
previewYes.innerHTML
/* Background */
btnSelect.forEach(btn => {
btn.style.backgroundColor = BCSValue;
btn.style.color = FCSValue;
btn.addEventListener('mouseover', function(){
btn.style.color = HCSvalue;
});
btn.addEventListener('mouseleave', function(){
btn.style.color = CCSValue;
});
});
document.getElementById("opened-select").style.backgroundColor = BCSValue;
document.getElementById("opened-select").style.color = FCSValue;
/* Title */
document.getElementById("btn-open-select").innerHTML = TSValue;
document.getElementById("btn-close-select").innerHTML = TSValue;
/* Chevrons */
let TCOpen = "";
let TCClose = "";
if(CSValue === true) {
TCOpen = "<i class='arrow arrow-down'></i>";
TCClose = "<i class='arrow arrow-up'></i>";
if(TSValue === ""){
document.getElementById(`btn-open-select`).innerHTML = TSValue+"<i class='arrow arrow-down'></i>";
document.getElementById(`btn-close-select`).innerHTML = TSValue+"<i class='arrow arrow-up'></i>";
} else {
document.getElementById(`btn-open-select`).innerHTML = TSValue+"<i class='arrow arrow-down' style='margin-left: 15px';></i>";
document.getElementById(`btn-close-select`).innerHTML = TSValue+"<i class='arrow arrow-up' style='margin-left: 15px';></i>";
}
const arrowSelect = document.querySelectorAll('.arrow');
arrowSelect.forEach(arrow => {
arrow.style.borderColor = CCSValue;
arrow.addEventListener('mouseover', function(){
arrow.style.borderColor = HCSvalue;
})
arrow.addEventListener('mouseleave', function(){
arrow.style.borderColor = CCSValue;
})
});
} else {
document.getElementById("btn-open-select").innerHTML = TSValue;
document.getElementById("btn-close-select").innerHTML = TSValue;
}
/* Columns */
NCSValue = parseInt(NCSValue);
switch(true) {
case NCSValue < 1:
NCSValue = 1;
break;
case NCSValue > 4:
NCSValue = 4;
break;
default:
break;
}
document.getElementById("list-select").style.columns = NCSValue;
/* Options */
let Options = [];
OSValue.forEach(function(option) {
option = option.replace(',', '');
Options.push(option);
})
/* Options center */
if(OCSValue === true) {
var OptionsCenter = `display: block;
margin-bottom: 15px;
text-align: center;`;
}
/* Check ID */
if(ISValue === "") {
ISValue = "";
ISValueSHY = "";
} else {
ISValueSHY = "-"+ISValue;
}
/* Snippets */
let codeHTML = "";
codeHTML =
`<button class="btn-select" id="btn-open-select${ISValueSHY}" onclick="openSelect('${ISValue}')">${TSValue || ''}${TCOpen || ''}</button>
<div class="opened-select opened-select-hidden" id="opened-select${ISValueSHY}">
<button class="btn-select btn-close-select" id="btn-close-select${ISValueSHY}" onclick="closeSelect('${ISValue}')">${TSValue || ''}${TCClose || ''}</button>`;
if(Options[0] !== "") {
codeHTML +=
`
<ul class="list-select" id="list-select${ISValueSHY}">
${Options.map(option =>
`<li class="name-select">${option}</li>`
).join("\n ")}
</ul>`
} else {
codeHTML +=
`
<ul class="list-select" id="list-select${ISValueSHY}">
<li class="name-select">Your option</li>
<li class="name-select">Your option</li>
<li class="name-select">Your option</li>
<li class="name-select">Your option</li>
<li class="name-select">Your option</li>
</ul>`;
}
codeHTML += `
</div>`;
let codeCSS =
`.btn-select {
background-color: ${BCSValue || '#000000'};
border-color: transparent;
border-radius: 10px;
color: ${FCSValue || '#ffffff'};
font-size: 18px;
min-height: 50px;
padding: 5px 10px;
position: relative;
width: 100%;
}
.btn-select:hover {
color: ${HCSvalue || '#0091b5'};
cursor: pointer;
}
.btn-select-hidden {
display: none;
}
.btn-select-show {
display: block;
}
.btn-close-select {
border-top-left-radius: 10px;
border-top-right-radius: 10px;
margin: auto;
}
.arrow {
border: solid ${CCSValue || '#ffffff'};
border-width: 0 3px 3px 0;
display: inline-block;
${TSValue ? 'margin-left: 15px;' : 'margin-left: 0px;'}
padding: 5px;
position: absolute;
right: 15px;
}
.arrow:hover {
color: ${HCSvalue || '#0091b5'};
}
.arrow-up {
top: 50%;
transform: rotate(-135deg);
}
.arrow-down {
top: 35%;
transform: rotate(45deg);
}
.opened-select {
background-color: ${BCSValue || '#000000'};
border-radius: 10px;
width: 100%;
}
.opened-select-hidden {
display: none;
}
.opened-select-show {
display: block;
}
.list-select {
columns: ${NCSValue || 1};
list-style: none;
margin: auto;
padding: 2.5px 10px 5px 10px;
}
.name-select {
color: ${FCSValue || '#ffffff'};
${OptionsCenter || "margin-bottom: 15px;"}
}
.name-select:hover {
color: ${HCSvalue || '#0091b5'};
cursor: pointer;
}
`;
if(ISValue === "") {
var codeJS =
`/* Open the select */
function openSelect(idSelect) {
document.getElementById("btn-open-select").classList.remove("btn-select-show");
document.getElementById("btn-close-select").classList.remove("btn-select-hidden");
document.getElementById("opened-select").classList.remove("opened-select-hidden");
document.getElementById("btn-open-select").classList.add("btn-select-hidden");
document.getElementById("btn-close-select").classList.add("btn-select-show");
document.getElementById("opened-select").classList.add("opened-select-show");
}
/* Close the select */
function closeSelect(idSelect) {
document.getElementById("btn-open-select").classList.remove("btn-select-hidden");
document.getElementById("btn-close-select").classList.remove("btn-select-show");
document.getElementById("opened-select").classList.remove("opened-select-show");
document.getElementById("btn-open-select").classList.add("btn-select-show");
document.getElementById("btn-close-select").classList.add("btn-select-hidden");
document.getElementById("opened-select").classList.add("opened-select-hidden");
}
`;
} else {
var codeJS =
`/* Open the select */
function openSelect(idSelect) {
document.getElementById("btn-open-select-"+idSelect).classList.remove("btn-select-show");
document.getElementById("btn-close-select-"+idSelect).classList.remove("btn-select-hidden");
document.getElementById("opened-select-"+idSelect).classList.remove("opened-select-hidden");
document.getElementById("btn-open-select-"+idSelect).classList.add("btn-select-hidden");
document.getElementById("btn-close-select-"+idSelect).classList.add("btn-select-show");
document.getElementById("opened-select-"+idSelect).classList.add("opened-select-show");
}
/* Close the select */
function closeSelect(idSelect) {
document.getElementById("btn-open-select-"+idSelect).classList.remove("btn-select-hidden");
document.getElementById("btn-close-select-"+idSelect).classList.remove("btn-select-show");
document.getElementById("opened-select-"+idSelect).classList.remove("opened-select-show");
document.getElementById("btn-open-select-"+idSelect).classList.add("btn-select-show");
document.getElementById("btn-close-select-"+idSelect).classList.add("btn-select-hidden");
document.getElementById("opened-select-"+idSelect).classList.add("opened-select-hidden");
}
`;
}
/* Write preview */
writePreview(HCSvalue, CCSValue);
/* Show snippets */
snippetNo.style.display = "none";
snippetYes.style.display = "block";
btnHTML.style.backgroundColor = "#0091b5";
btnCSS.style.backgroundColor = "#00adb5";
btnJS.style.backgroundColor = "#00adb5";
snippetHTML.style.display = "block";
snippetCSS.style.display = "none";
snippetJS.style.display = "none";
/* Write snippets */
writeSnippet("HTML", codeHTML);
writeSnippet("CSS", codeCSS);
writeSnippet("JS", codeJS);
}
/* Write preview */
function writePreview(HCSvalue, CCSValue) {
const optionsSelect = document.getElementById("options-select").value.split('\n');
document.getElementById("list-select").innerHTML = "";
if(optionsSelect[0] !== "") {
optionsSelect.forEach(function(option) {
option = option.replace(',', '');
document.getElementById("list-select").innerHTML += `<li class="name-select">${option}</li>`;
});
} else {
const baseOptions = `
<li class="name-select">Your option</li>
<li class="name-select">Your option</li>
<li class="name-select">Your option</li>
<li class="name-select">Your option</li>
<li class="name-select">Your option</li>
<li class="name-select">Your option</li>`;
document.getElementById("list-select").innerHTML = baseOptions;
}
if(optionsCenterSelect.checked === true) {
document.querySelectorAll(".name-select").forEach(nameSelect => {
nameSelect.style.display = "block";
nameSelect.style.textAlign = "center";
});
}
document.querySelectorAll(".name-select").forEach(nameSelect => {
nameSelect.addEventListener('mouseover', function(){
nameSelect.style.color = HCSvalue;
});
nameSelect.addEventListener('mouseleave', function(){
nameSelect.style.color = CCSValue;
});
});
}
/* Write snippet */
function writeSnippet(type, code) {
switch(type) {
case "HTML":
codeHTML.innerText = code;
break;
case "CSS":
codeCSS.innerText = code;
break;
case "JS":
codeJS.innerText = code;
break;
}
} |
/*
* Copyright (c) 1991, 1992, 1993 Silicon Graphics, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee, provided
* that (i) the above copyright notices and this permission notice appear in
* all copies of the software and related documentation, and (ii) the name of
* Silicon Graphics may not be used in any advertising or
* publicity relating to the software without the specific, prior written
* permission of Silicon Graphics.
*
* THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF
* ANY KIND,
* EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
* WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
*
* IN NO EVENT SHALL SILICON GRAPHICS BE LIABLE FOR
* ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND,
* OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF
* LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <math.h>
#include "basis.h"
#ifndef PI
#define PI 3.141592657
#endif
enum {
NORMAL = 0,
WEIRD = 1
};
enum {
STREAK = 0,
CIRCLE = 1
};
#define MAXSTARS 400
#define MAXPOS 10000
#define MAXWARP 10
#define MAXANGLES 6000
typedef struct _starRec {
GLint type;
float x[2], y[2], z[2];
float offsetX, offsetY, offsetR, rotation;
} starRec;
GLenum doubleBuffer;
GLint windW, windH;
GLenum flag = NORMAL;
GLint starCount = MAXSTARS / 2;
float speed = 1.0;
GLint nitro = 0;
starRec stars[MAXSTARS];
float sinTable[MAXANGLES];
float Sin(float angle)
{
return (sinTable[(GLint)angle]);
}
float Cos(float angle)
{
return (sinTable[((GLint)angle+(MAXANGLES/4))%MAXANGLES]);
}
void NewStar(GLint n, GLint d)
{
if (rand()%4 == 0) {
stars[n].type = CIRCLE;
} else {
stars[n].type = STREAK;
}
stars[n].x[0] = (float)(rand() % MAXPOS - MAXPOS / 2);
stars[n].y[0] = (float)(rand() % MAXPOS - MAXPOS / 2);
stars[n].z[0] = (float)(rand() % MAXPOS + d);
if (rand()%4 == 0 && flag == WEIRD) {
stars[n].offsetX = (float)(rand() % 100 - 100 / 2);
stars[n].offsetY = (float)(rand() % 100 - 100 / 2);
stars[n].offsetR = (float)(rand() % 25 - 25 / 2);
} else {
stars[n].offsetX = 0.0;
stars[n].offsetY = 0.0;
stars[n].offsetR = 0.0;
}
}
void RotatePoint(float *x, float *y, float rotation)
{
float tmpX, tmpY;
tmpX = *x * Cos(rotation) - *y * Sin(rotation);
tmpY = *y * Cos(rotation) + *x * Sin(rotation);
*x = tmpX;
*y = tmpY;
}
void MoveStars(void)
{
float offset;
GLint n;
offset = speed * 60.0;
for (n = 0; n < starCount; n++) {
stars[n].x[1] = stars[n].x[0];
stars[n].y[1] = stars[n].y[0];
stars[n].z[1] = stars[n].z[0];
stars[n].x[0] += stars[n].offsetX;
stars[n].y[0] += stars[n].offsetY;
stars[n].z[0] -= offset;
stars[n].rotation += stars[n].offsetR;
if (stars[n].rotation > MAXANGLES) {
stars[n].rotation -= MAXANGLES;
}
if (stars[n].rotation < 0) {
stars[n].rotation += 360.0;
}
}
}
GLenum StarPoint(GLint n)
{
float x0, y0, x1, y1, width;
GLint i;
x0 = stars[n].x[0] * windW / stars[n].z[0];
y0 = stars[n].y[0] * windH / stars[n].z[0];
RotatePoint(&x0, &y0, stars[n].rotation);
x0 += windW / 2.0;
y0 += windH / 2.0;
if (x0 >= 0.0 && x0 < windW && y0 >= 0.0 && y0 < windH) {
if (stars[n].type == STREAK) {
x1 = stars[n].x[1] * windW / stars[n].z[1];
y1 = stars[n].y[1] * windH / stars[n].z[1];
RotatePoint(&x1, &y1, stars[n].rotation);
x1 += windW / 2.0;
y1 += windH / 2.0;
glLineWidth(MAXPOS/100.0/stars[n].z[0]+1.0);
// glColor3f(1.0, (MAXWARP-speed)/MAXWARP, (MAXWARP-speed)/MAXWARP);
glColor3f(rand() %100/100., (MAXWARP-speed)/MAXWARP, (MAXWARP-speed)/MAXWARP);
if (fabs(x0-x1) < 1.0 && fabs(y0-y1) < 1.0) {
glBegin(GL_POINTS);
glVertex2f(x0, y0);
glEnd();
} else {
glBegin(GL_LINES);
glVertex2f(x0, y0);
glVertex2f(x1, y1);
glEnd();
}
} else {
width = MAXPOS / 10.0 / stars[n].z[0] + 1.0;
glColor3f(1.0, 0.1, 0.1);
glBegin(GL_POLYGON);
for (i = 0; i < 8; i++) {
float x = x0 + width * Cos((float)i*MAXANGLES/8.0);
float y = y0 + width * Sin((float)i*MAXANGLES/8.0);
glVertex2f(x, y);
};
glEnd();
}
return GL_TRUE;
} else {
return GL_FALSE;
}
}
void ShowStars(void)
{
GLint n;
glClear(GL_COLOR_BUFFER_BIT);
for (n = 0; n < starCount; n++) {
if (stars[n].z[0] > speed || (stars[n].z[0] > 0.0 && speed < MAXWARP)) {
if (StarPoint(n) == GL_FALSE) {
NewStar(n, MAXPOS);
}
} else {
NewStar(n, MAXPOS);
}
}
}
static void Init(void)
{
float angle;
GLint n;
srand((unsigned int) glutGet(GLUT_ELAPSED_TIME) );
for (n = 0; n < MAXSTARS; n++) {
NewStar(n, 100);
}
angle = 0.0;
for (n = 0; n < MAXANGLES ; n++) {
sinTable[n] = sin(angle);
angle += PI / (MAXANGLES / 2.0);
}
glClearColor(0.0, 0.0, 0.0, 0.0);
glDisable(GL_DITHER);
}
void GLUTCALLBACK Reshape(int width, int height)
{
windW = (GLint)width;
windH = (GLint)height;
glViewport(0, 0, windW, windH);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(-0.5, windW+0.5, -0.5, windH+0.5);
glMatrixMode(GL_MODELVIEW);
}
static void GLUTCALLBACK Key(unsigned char key, int x, int y)
{
switch (key) {
case 27:
exit(1);
case 32:
flag = (flag == NORMAL) ? WEIRD : NORMAL;
if(flag == NORMAL)
printf("Normal\n");
else
printf("Weird\n");
break;
case 't':
nitro = 1;
break;
default:
return;
}
}
void GLUTCALLBACK Draw(void)
{
MoveStars();
ShowStars();
if (nitro > 0) {
speed = (float)(nitro / 10) + 1.0;
if (speed > MAXWARP) {
speed = MAXWARP;
}
if (++nitro > MAXWARP*10) {
nitro = -nitro;
}
} else if (nitro < 0) {
nitro++;
speed = (float)(-nitro / 10) + 1.0;
if (speed > MAXWARP) {
speed = MAXWARP;
}
}
glFlush();
if (doubleBuffer) {
glutSwapBuffers();
}
}
static GLenum Args(int argc, char **argv)
{
GLint i;
doubleBuffer = GL_FALSE;
for (i = 1; i < argc; i++) {
if (strcmp(argv[i], "-sb") == 0) {
doubleBuffer = GL_FALSE;
} else if (strcmp(argv[i], "-db") == 0) {
doubleBuffer = GL_TRUE;
} else if (strcmp(argv[i], "-?") == 0) {
printf("Usage: star.exe [-sb|-db] [-?]\n");
} else {
printf("%s (Bad option).\n", argv[i]);
return GL_FALSE;
}
}
return GL_TRUE;
}
void GLUTCALLBACK glut_post_redisplay_p(void)
{
DosSleep(10);
glutPostRedisplay();
}
int maininit(int argc, char **argv)
{
GLenum type;
glutInit(&argc, argv);
if (Args(argc, argv) == GL_FALSE) {
exit(1);
}
#if USE_MESA
printf("Press t, Space or Esc\n"); fflush(stdout);
#endif
windW = 300;
windH = 300;
glutInitWindowPosition(0, 0); glutInitWindowSize( 300, 300);
type = GLUT_RGB;
type |= (doubleBuffer) ? GLUT_DOUBLE : GLUT_SINGLE;
glutInitDisplayMode(type);
if (glutCreateWindow("Stars") == GL_FALSE) {
exit(1);
}
Init();
glutReshapeFunc(Reshape);
glutKeyboardFunc(Key);
glutDisplayFunc(Draw);
glutIdleFunc(glut_post_redisplay_p);
glutMainLoop();
return 0;
}
|
/**
* Copyright 2012 <NAME>. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. <NAME> licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.bah.applefox.main.plugins.fulltextindex;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
import org.apache.accumulo.core.client.mapreduce.InputFormatBase;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.tika.exception.TikaException;
import org.apache.tika.metadata.Metadata;
import org.apache.tika.parser.AutoDetectParser;
import org.apache.tika.parser.ParseContext;
import org.apache.tika.parser.Parser;
import org.apache.tika.sax.BodyContentHandler;
import org.xml.sax.ContentHandler;
import org.xml.sax.SAXException;
import com.bah.applefox.main.Loader;
import com.bah.applefox.main.plugins.utilities.AccumuloUtils;
import com.bah.applefox.main.plugins.utilities.DivsFilter;
import com.bah.applefox.main.plugins.utilities.IngestUtils;
import com.bah.applefox.main.plugins.webcrawler.utilities.PageCrawlException;
import com.bah.applefox.main.plugins.webcrawler.utilities.WebPageCrawl;
/**
* A MapReduce job that loads the NGrams from the pages indicated by the URLs in
* the URLs table into the Data Table. The data contained follows the format:
* Row ID: word + timestamp | Column Family: parent URL | Column Qualifier:
* another word on the page | timestamp | Value: number of times the word occurs
* on the page
*
*/
public class FTLoader extends Loader {
private static String dTable, urlCheckedTable, articleFile, divsFile;
private static int maxNGrams;
private static long longSuffix;
private static HashSet<String> stopWords;
private static Log log = LogFactory.getLog(FTLoader.class);
private static HashSet<String> exDivs;
/**
* MapperClass extends the Mapper class. It performs the map functionality
* of MapReduce.
*
*/
public static class MapperClass extends Mapper<Key, Value, Key, Value> {
/**
* Gets a URL from the URLs table in Accumulo, feeds that data into
* addToDataBaseTable
*
*/
@Override
public void map(Key key, Value value, Context context) {
// Get the row of the key (the url) and append it to text
Text currentURL = new Text();
ByteSequence row = key.getRowData();
currentURL
.append(row.getBackingArray(), row.offset(), row.length());
Value val = new Value("0".getBytes());
try {
// scan the table to ensure the url has not been checked
Scanner scan = AccumuloUtils.connectRead(urlCheckedTable);
scan.setRange(new Range(currentURL.toString()));
if (!scan.iterator().hasNext()) {
// If yet unchecked, check the url by passing it to the
// reduce job
context.write(new Key(currentURL.toString()), val);
}
} catch (NullPointerException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
} catch (AccumuloException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
} catch (AccumuloSecurityException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
} catch (TableNotFoundException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
} catch (IOException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
} catch (InterruptedException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
}
}
}
/**
* ReducerClass extends Reducer and would perform the Reduce functionality
* of MapReduce, but in this case it is only a place holder.
*
*/
public static class ReducerClass extends Reducer<Key, Value, Key, Value> {
public void reduce(Key key, Iterable<Value> values, Context context)
throws IOException, InterruptedException {
try {
// Add a consistent suffix to data in the table to ensure even
// splits
longSuffix = new Date().getTime();
// Add the data to the table with this method
if (addToDataBaseTable(key.getRow().toString())) {
// Write off the url as having been checked
BatchWriter w = AccumuloUtils
.connectBatchWrite(urlCheckedTable);
Mutation m = new Mutation(key.getRow().toString());
m.put("0", "0", new Value("0".getBytes()));
w.addMutation(m);
w.close();
}
} catch (AccumuloException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
} catch (AccumuloSecurityException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
} catch (TableNotFoundException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
} catch (TableExistsException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
} catch (Exception e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
}
}
}
/**
* run takes the comandline args as arguments (in this case from a
* configuration file), creates a new job, configures it, initiates it,
* waits for completion, and returns 0 if it is successful (1 if it is not)
*
* @param args
* the commandline arguments (in this case from a configuration
* file)
*
* @return 0 if the job ran successfully and 1 if it isn't
*/
public int run(String[] args) throws Exception {
try {
// Initialize variables
FTLoader.articleFile = args[8];
FTLoader.maxNGrams = Integer.parseInt(args[9]);
FTLoader.stopWords = getStopWords();
FTLoader.dTable = args[10];
FTLoader.urlCheckedTable = args[11];
FTLoader.divsFile = args[20];
FTLoader.exDivs = getExDivs();
// Give the job a name
String jobName = this.getClass().getSimpleName() + "_"
+ System.currentTimeMillis();
// Create job and set the jar
Job job = new Job(getConf(), jobName);
job.setJarByClass(this.getClass());
String urlTable = args[5];
job.setInputFormatClass(AccumuloInputFormat.class);
InputFormatBase.setZooKeeperInstance(job.getConfiguration(),
args[0], args[1]);
InputFormatBase.setInputInfo(job.getConfiguration(), args[2],
args[3].getBytes(), urlTable, new Authorizations());
job.setMapperClass(MapperClass.class);
job.setMapOutputKeyClass(Key.class);
job.setMapOutputValueClass(Value.class);
job.setReducerClass(ReducerClass.class);
job.setNumReduceTasks(Integer.parseInt(args[4]));
job.setOutputFormatClass(AccumuloOutputFormat.class);
job.setOutputKeyClass(Key.class);
job.setOutputValueClass(Value.class);
AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(),
args[0], args[1]);
AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), args[2],
args[3].getBytes(), true, urlTable);
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
} catch (IOException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
} catch (InterruptedException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
} catch (ClassNotFoundException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
}
return 1;
}
/**
* This method is used to add all information parsed by tika into the
* Accumulo table
*
* @param url
* - the URL of the page that has been parsed
* @param tikaParsed
* - all of the engrams from the page
* @throws TikaException
* @throws SAXException
*/
private static boolean addToDataBaseTable(String url) {
try {
// Connect to the data table
BatchWriter writer = AccumuloUtils.connectBatchWrite(dTable);
// Let the user know the url is being added
System.out.println("Adding " + url + " with prefix " + longSuffix);
// Get the input stream (in case it is not an html document
InputStream urlInput = new URL(url).openStream();
// Set the page contents (used for filtering if it is an html
// document)
String pageContents = getPageContents(new URL(url));
// If the document is HTML
if (exDivs.size() != 0
&& pageContents.toLowerCase().contains("<html>")) {
// Filter out some divs (especially generic headers/footers,
// etc.)
pageContents = DivsFilter.filterDivs(pageContents, exDivs);
urlInput = new ByteArrayInputStream(pageContents.getBytes());
}
// Parse with tika
Parser parser = new AutoDetectParser();
Metadata metadata = new Metadata();
ParseContext context = new ParseContext();
ContentHandler handler = new BodyContentHandler();
parser.parse(urlInput, handler, metadata, context);
// Get the keywords of the page and its title
String keywords = metadata.get("keywords");
String title = metadata.get("title");
if (title == null) {
WebPageCrawl p;
try {
p = new WebPageCrawl(url, "", Collections.<String>emptySet());
} catch (PageCrawlException e) {
log.info(e);
return false;
}
title = p.getTitle();
}
// If there are keywords, delimit the commas, otherwise make it a
// blank screen (not null)
if (keywords != null) {
keywords = keywords.replaceAll(",", "[ ]");
} else {
keywords = "";
}
// Make everything lower case for ease of search
String plainText = handler.toString().toLowerCase();
// Split it into <Key,Value> pairs of NGrams, with the Value being
// the count of the NGram on the page
HashMap<String, Integer> tikaParsed = IngestUtils
.collectTerms(IngestUtils
.createNGrams(plainText, maxNGrams));
// A counter for the final number of words
Integer totalWords = 0;
// A HashMap for the final NGrams
HashMap<String, Integer> finalParsed = new HashMap<String, Integer>();
for (String i : tikaParsed.keySet()) {
int freq = tikaParsed.get(i);
totalWords += freq;
// erase stop words
if (stopWords != null && !stopWords.contains(i)) {
finalParsed.put(i, tikaParsed.get(i));
} else if (stopWords == null) {
finalParsed.put(i, tikaParsed.get(i));
}
}
System.out.println("Tika Parsed: " + finalParsed.keySet().size());
System.out.println("Starting");
int counter = 0;
String namedURL = url + "[ ]" + title + "[ ]" + keywords;
for (String row : finalParsed.keySet()) {
row = row + " " + longSuffix;
for (String CQ : finalParsed.keySet()) {
String groupedVal = new String();
Integer wc = finalParsed.get(CQ);
double freq = wc.doubleValue() / totalWords.doubleValue();
groupedVal = wc + "," + freq;
Value val = new Value(IngestUtils.serialize(groupedVal));
Mutation m = new Mutation(row);
m.put(namedURL, CQ, new Date().getTime(), val);
writer.addMutation(m);
counter++;
}
}
System.out.println("Wrote " + counter
+ " Key-Value pairs to Accumulo.");
writer.close();
System.out.println("Stopped writing");
} catch (AccumuloException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
return false;
} catch (AccumuloSecurityException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
return false;
} catch (TableNotFoundException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
return false;
} catch (TableExistsException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
return false;
} catch (MalformedURLException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
} catch (IOException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
return false;
} catch (SAXException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
return false;
} catch (TikaException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
return false;
}
return true;
}
/**
* Gets the words that are supposed to be removed from the article file
* (Words such as the, a, an, etc. that are unimportant to the search
* engine)
*
*
* @return articles - All the words that shouldn't be included in the data
* @throws IOException
*/
private HashSet<String> getStopWords() {
HashSet<String> articles = new HashSet<String>();
System.out.println("getting stop words");
try {
// Read the file
BufferedReader reader = new BufferedReader(new FileReader(new File(
articleFile)));
// String to temporarily store each word
String temp;
// Add each word to temp
while ((temp = reader.readLine()) != null) {
// Add temp to articles
articles.add(temp);
}
reader.close();
} catch (FileNotFoundException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
} catch (IOException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
}
return articles;
}
/**
* Gets the ids of the div tags that should be excluded from the data on the
* page. This is useful for pages with repetitive generic headers and
* footers, allowing for more accurate results
*
* @return - HashSet of ids
*/
private HashSet<String> getExDivs() {
HashSet<String> divs = new HashSet<String>();
System.out.println("Getting stop divs");
try {
// Read in the file
BufferedReader reader = new BufferedReader(new FileReader(new File(
divsFile)));
// Temporary variable for the ids
String temp;
// Set temp to the ids
while ((temp = reader.readLine()) != null) {
// Add temp to divs
divs.add(temp);
}
reader.close();
} catch (FileNotFoundException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
} catch (IOException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
}
return divs;
}
/** This method is used to get the page source from the given URL
* @param url - the url from which to get the contents
* @return - the page contents
*/
private static String getPageContents(URL url) {
String pageContents = "";
try {
// Open the URL Connection
URLConnection con = url.openConnection();
// Get the file path, and eliminate unreadable documents
String filePath = url.toString();
// Reads content only if it is a valid format
if (!(filePath.endsWith(".pdf") || filePath.endsWith(".doc")
|| filePath.endsWith(".jsp") || filePath.endsWith("rss") || filePath
.endsWith(".css"))) {
// Sets the connection timeout (in milliseconds)
con.setConnectTimeout(1000);
// Tries to match the character set of the Web Page
String charset = "utf-8";
try {
Matcher m = Pattern.compile("\\s+charset=([^\\s]+)\\s*")
.matcher(con.getContentType());
charset = m.matches() ? m.group(1) : "utf-8";
} catch (Exception e) {
log.error("Page had no specified charset");
}
// Reader derived from the URL Connection's input stream, with
// the
// given character set
Reader r = new InputStreamReader(con.getInputStream(), charset);
// String Buffer used to append each chunk of Web Page data
StringBuffer buf = new StringBuffer();
// Tries to get an estimate of bytes available
int BUFFER_SIZE = con.getInputStream().available();
// If BUFFER_SIZE is too small, increases the size
if (BUFFER_SIZE <= 1000) {
BUFFER_SIZE = 1000;
}
// Character array to hold each chunk of Web Page data
char[] ch = new char[BUFFER_SIZE];
// Read the first chunk of Web Page data
int len = r.read(ch);
// Loops until end of the Web Page is reached
while (len != -1) {
// Appends the data chunk to the string buffer and gets the
// next chunk
buf.append(ch, 0, len);
len = r.read(ch, 0, BUFFER_SIZE);
}
// Sets the pageContents to the newly created string
pageContents = buf.toString();
}
} catch (UnsupportedEncodingException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
// Assume the body contents are blank if the character encoding is
// not supported
pageContents = "";
} catch (IOException e) {
if (e.getMessage() != null) {
log.error(e.getMessage());
} else {
log.error(e.getStackTrace());
}
// Assume the body contents are blank if the Web Page could not be
// accessed
pageContents = "";
}
return pageContents;
}
}
|
#!/bin/bash
# ------------------------------------------------------------------------
# Copyright 2018 WSO2, Inc. (http://wso2.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
# ------------------------------------------------------------------------
ECHO=`which echo`
KUBECTL=`which kubectl`
# methods
function echoBold () {
${ECHO} -e $'\e[1m'"${1}"$'\e[0m'
}
function usage () {
echoBold "This script automates the installation of Stream Processor Fully Distribured Deployment Kubernetes resources\n"
echoBold "Allowed arguments:\n"
echoBold "-h | --help"
echoBold "--wu | --wso2-username\t\tYour WSO2 username"
echoBold "--wp | --wso2-password\t\tYour WSO2 password"
echoBold "--cap | --cluster-admin-password\tKubernetes cluster admin password\n\n"
}
WSO2_SUBSCRIPTION_USERNAME=''
WSO2_SUBSCRIPTION_PASSWORD=''
ADMIN_PASSWORD=''
# capture named arguments
while [ "$1" != "" ]; do
PARAM=`echo $1 | awk -F= '{print $1}'`
VALUE=`echo $1 | awk -F= '{print $2}'`
case ${PARAM} in
-h | --help)
usage
exit 1
;;
--wu | --wso2-username)
WSO2_SUBSCRIPTION_USERNAME=${VALUE}
;;
--wp | --wso2-password)
WSO2_SUBSCRIPTION_PASSWORD=${VALUE}
;;
--cap | --cluster-admin-password)
ADMIN_PASSWORD=${VALUE}
;;
*)
echoBold "ERROR: unknown parameter \"${PARAM}\""
usage
exit 1
;;
esac
shift
done
echo 'Starting to deploy'
# create a new Kubernetes Namespace
kubectl create namespace wso2
# create a new service account in 'wso2' Kubernetes Namespace
kubectl create serviceaccount wso2svcacct -n wso2
# set namespace
kubectl config set-context $(kubectl config current-context) --namespace=wso2
#kubectl create secret docker-registry wso2creds --docker-server=docker.wso2.com --docker-username=chulanilakmalikarandana@gmail.com --docker-password=chul@P292 --docker-email=chulanilakmalikarandana@gmail.com
kubectl create secret docker-registry gcr-json-key --docker-server=https://gcr.io --docker-username=_json_key --docker-password="$(cat /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/wso2-sp-distributed-179caa93fb3d.json)" --docker-email=madushi95lakshini@gmail.com
# create Kubernetes Role and Role Binding necessary for the Kubernetes API requests made from Kubernetes membership scheme
kubectl create --username=admin --password=S8AvQu4BU790bUSs -f /home/user123/Applications/Mavericks_Kubernetes_Client/rbac/rbac.yaml
# volumes
echo 'deploying persistence volumes ...'
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/volumes/persistent-volumes.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/extras/rdbms/volumes/persistent-volumes.yaml
# Configuration Maps
echo 'deploying config maps ...'
kubectl create configmap sp-manager-bin --from-file=/home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/confs/sp-manager/bin/
kubectl create configmap sp-manager-conf --from-file=/home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/confs/sp-manager/conf/
kubectl create configmap sp-worker-bin --from-file=/home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/confs/sp-worker/bin/
kubectl create configmap sp-worker-conf --from-file=/home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/confs/sp-worker/conf/
kubectl create configmap sp-dashboard-conf --from-file=/home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/confs/status-dashboard/conf/
kubectl create configmap mysql-dbscripts --from-file=/home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/extras/confs/mysql/dbscripts/
sleep 30s
# databases
echo 'deploying databases ...'
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/extras/rdbms/mysql/rdbms-persistent-volume-claim.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/extras/rdbms/mysql/rdbms-service.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/extras/rdbms/mysql/rdbms-deployment.yaml
#zookeeper
echo 'deploying Zookeeper ...'
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/extras/zookeeper/zookeeper-deployment.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/extras/zookeeper/zookeeper-service.yaml
#kafka
echo 'deploying Kafka ...'
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/extras/kafka/kafka-deployment.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/extras/kafka/kafka-service.yaml
echo 'deploying volume claims...'
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-mgt-volume-claim.yaml
echo 'deploying Stream Processor manager profile and services...'
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-manager-1-service.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-manager-2-service.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-manager-1-deployment.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-manager-2-deployment.yaml
sleep 30s
echo 'deploying Stream Processor worker profile and services...'
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-worker-1-service-1.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-worker-2-service-1.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-worker-3-service-1.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-worker-4-service-1.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-worker-5-service-1.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-worker-6-service-1.yaml
#kubectl create -f ../sp/wso2sp-worker-7-service.yaml
#kubectl create -f ../sp/wso2sp-worker-8-service.yaml
#kubectl create -f ../sp/wso2sp-worker-9-service.yaml
#kubectl create -f ../sp/wso2sp-worker-10-service.yaml
#kubectl create -f ../sp/wso2sp-worker-11-service.yaml
#kubectl create -f ../sp/wso2sp-worker-12-service.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-worker-1-deployment.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-worker-2-deployment.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-worker-3-deployment.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-worker-4-deployment.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-worker-5-deployment.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-worker-6-deployment.yaml
#kubectl create -f ../sp/wso2sp-worker-7-deployment.yaml
#kubectl create -f ../sp/wso2sp-worker-8-deployment.yaml
#kubectl create -f ../sp/wso2sp-worker-9-deployment.yaml
#kubectl create -f ../sp/wso2sp-worker-10-deployment.yaml
#kubectl create -f ../sp/wso2sp-worker-11-deployment.yaml
#kubectl create -f ../sp/wso2sp-worker-12-deployment.yaml
echo 'deploying Stream Processor producer profile and services...'
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-producer-service.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/sp/wso2sp-producer-deployment.yaml
# deploying the ingress resource
echo 'Deploying Ingress...'
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/ingresses/wso2sp-manager-1-ingress.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/ingresses/wso2sp-manager-2-ingress.yaml
kubectl create -f /home/user123/Applications/Mavericks_Kubernetes_Client/pattern-distributed/ingresses/wso2sp-dashboard-ingress.yaml
sleep 20s
kubectl expose deployment wso2sp-worker-1 --type=NodePort --name=wso2sp-worker-1-service-1
NODEPORT1=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services wso2sp-worker-1)
gcloud compute firewall-rules create rule-1 --allow=tcp:$NODEPORT1
kubectl expose deployment wso2sp-worker-2 --type=NodePort --name=wso2sp-worker-2-service-1
NODEPORT2=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services wso2sp-worker-2)
gcloud compute firewall-rules create rule-2 --allow=tcp:$NODEPORT2
kubectl expose deployment wso2sp-worker-3 --type=NodePort --name=wso2sp-worker-3-service-1
NODEPORT3=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services wso2sp-worker-3)
gcloud compute firewall-rules create rule-3 --allow=tcp:$NODEPORT3
kubectl expose deployment wso2sp-worker-4 --type=NodePort --name=wso2sp-worker-4-service-1
NODEPORT4=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services wso2sp-worker-4)
gcloud compute firewall-rules create rule-4 --allow=tcp:$NODEPORT4
kubectl expose deployment wso2sp-worker-5 --type=NodePort --name=wso2sp-worker-5-service-1
NODEPORT5=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services wso2sp-worker-5)
gcloud compute firewall-rules create rule-5 --allow=tcp:$NODEPORT5
kubectl expose deployment wso2sp-worker-6 --type=NodePort --name=wso2sp-worker-6-service-1
NODEPORT6=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services wso2sp-worker-6)
gcloud compute firewall-rules create rule-6 --allow=tcp:$NODEPORT6
echo 'Finished'
|
# require './example'
require './calc'
# require './chuck_norris'
$stdout.sync = true
warmup do |app|
client = Rack::MockRequest.new(app)
client.get('/')
end
run Sinatra::Application
|
#!/bin/sh
# Run headers_$1 command for all suitable architectures
# Stop on error
set -e
do_command()
{
if [ -f ${srctree}/arch/$2/include/asm/Kbuild ]; then
make ARCH=$2 KBUILD_HEADERS=$1 headers_$1
else
printf "Ignoring arch: %s\n" ${arch}
fi
}
archs=$(ls ${srctree}/arch)
for arch in ${archs}; do
case ${arch} in
um) # no userspace export
;;
cris) # headers export are known broken
;;
*)
if [ -d ${srctree}/arch/${arch} ]; then
do_command $1 ${arch}
fi
;;
esac
done
|
<filename>src/aui-form-builder/js/aui-form-builder-field-types.js
/**
* The Form Builder Field Types Component
*
* @module aui-form-builder
* @submodule aui-form-builder-field-types
*/
/**
* `A.FormBuilder` extension, which is responsible for all the logic related
* to field types.
*
* @class A.FormBuilderFieldTypes
* @param {Object} config Object literal specifying layout builder configuration
* properties.
* @constructor
*/
A.FormBuilderFieldTypes = function() {};
A.FormBuilderFieldTypes.prototype = {
/**
* Construction logic executed during the `A.FormBuilderFieldTypes`
* instantiation. Lifecycle.
*
* @method initializer
* @protected
*/
initializer: function() {
this.after('fieldTypesChange', this._afterFieldTypesChange);
this.after('form-builder-field-types-modal:selectFieldType', this._afterSelectFieldType);
},
/**
* Destructor lifecycle implementation for the `A.FormBuilderFieldTypes` class.
* Lifecycle.
*
* @method destructor
* @protected
*/
destructor: function() {
A.Array.each(this.get('fieldTypes'), function(field) {
field.destroy();
});
this.get('fieldTypesModal').destroy();
},
/**
* Disables unique fields for the field class that the given field is an
* instance of.
*
* @method disableUniqueFieldType
* @param {A.FormField} field
*/
disableUniqueFieldType: function(field) {
var fieldType = this.findTypeOfField(field);
if (fieldType.get('unique')) {
fieldType.set('disabled', true);
}
},
/**
* Finds the type of the given field.
*
* @method findTypeOfField
* @param {A.FormBuilderFieldBase} field
*/
findTypeOfField: function(field) {
var fieldTypes = this.get('fieldTypes'),
i;
for (i = 0; i < fieldTypes.length; i++) {
if (field.constructor === fieldTypes[i].get('fieldClass')) {
return fieldTypes[i];
}
}
},
/**
* Hides the fields modal.
*
* @method hideFieldsPanel
*/
hideFieldsPanel: function() {
var fieldTypesModal = this.get('fieldTypesModal');
fieldTypesModal.hide();
},
/**
* Adds a the given field types to this form builder.
*
* @method registerFieldTypes
* @param {Array | Object | A.FormBuilderFieldType} typesToAdd This can be
* either an array of items or a single item. Each item should be either
* an instance of `A.FormBuilderFieldType`, or the configuration object
* to be used when instantiating one.
*/
registerFieldTypes: function(typesToAdd) {
var fieldTypes = this.get('fieldTypes');
typesToAdd = A.Lang.isArray(typesToAdd) ? typesToAdd : [typesToAdd];
A.Array.each(typesToAdd, function(type) {
fieldTypes.push(type);
});
this.set('fieldTypes', fieldTypes);
},
/**
* Shows the fields modal.
*
* @method showFieldsPanel
*/
showFieldsPanel: function() {
var fieldTypesModal = this.get('fieldTypesModal');
if (!fieldTypesModal.get('rendered')) {
fieldTypesModal.render();
}
fieldTypesModal.show();
},
/**
* Removes the given field types from this form builder.
*
* @method unregisterFieldTypes
* @param {Array | String | A.FormBuilderFieldType} typesToRemove This can be
* either an array of items, or a single one. For each item, if it's a
* string, the form builder will remove all registered field types with
* a field class that matches it. For items that are instances of
* `A.FormBuilderFieldType`, only the same instances will be removed.
*/
unregisterFieldTypes: function(typesToRemove) {
var instance = this;
typesToRemove = A.Lang.isArray(typesToRemove) ? typesToRemove : [typesToRemove];
A.Array.each(typesToRemove, function(type) {
instance._unregisterFieldType(type);
});
this.set('fieldTypes', this.get('fieldTypes'));
},
/**
* Fired after the `fieldTypes` attribute is set.
*
* @method _afterFieldTypesChange
* @param {EventFacade} event
* @protected
*/
_afterFieldTypesChange: function(event) {
this.get('fieldTypesModal').set('fieldTypes', event.newVal);
},
/**
* Fired after a field type is selected by the user.
*
* @method _afterSelectFieldType
* @param {EventFacade} event
* @protected
*/
_afterSelectFieldType: function(event) {
var field,
fieldType = event.fieldType;
if (!fieldType.get('disabled')) {
field = new(fieldType.get('fieldClass'))(fieldType.get('defaultConfig'));
this.showFieldSettingsPanel(field, fieldType.get('label'));
}
},
/**
* Check on all created fields if there is one of the same type
* of the parameter.
*
* @method _checkActiveLayoutHasFieldType
* @param {Object} fieldType
* @return {Boolean}
* @protected
*/
_checkActiveLayoutHasFieldType: function(fieldType) {
var col,
cols,
fieldList,
row,
rows = this.getActiveLayout().get('rows');
for (row = 0; row < rows.length; row++) {
cols = rows[row].get('cols');
for (col = 0; col < cols.length; col++) {
fieldList = cols[col].get('value');
if (fieldList && this._checkListHasFieldType(fieldList, fieldType)) {
return true;
}
}
}
return false;
},
/**
* Checks on all fields of a field list if there is one of the
* same type of the parameter.
*
* @method _checkListHasFieldType
* @param {A.FormBuilderFIeldList} fieldList
* @param {Object} fieldType
* @return {Boolean}
* @protected
*/
_checkListHasFieldType: function(fieldList, fieldType) {
var fields = fieldList.get('fields'),
i;
for (i = 0; i < fields.length; i++) {
if (this._hasFieldType(fieldType, fields[i])) {
return true;
}
}
return false;
},
/**
* Checks if the given field is of the given field type.
*
* @method _hasFieldType
* @param {A.FormBuilderFieldType} fieldType
* @param {A.FormField} field
* @return {Boolean}
* @protected
*/
_hasFieldType: function(fieldType, field) {
var i,
nestedFields = field.get('nestedFields');
if (field.constructor === fieldType.get('fieldClass')) {
return true;
}
for (i = 0; i < nestedFields.length; i++) {
if (this._hasFieldType(fieldType, nestedFields[i])) {
return true;
}
}
return false;
},
/**
* Sets the `fieldTypes` attribute.
*
* @method _setFieldTypes
* @param {Object | A.FormBuilderFieldType} val
* @return {A.FormBuilderFieldType}
* @protected
*/
_setFieldTypes: function(val) {
for (var i = 0; i < val.length; i++) {
if (!A.instanceOf(val[i], A.FormBuilderFieldType)) {
val[i] = new A.FormBuilderFieldType(val[i]);
}
}
return val;
},
/**
* Removes a single given field type from this form builder.
*
* @method _unregisterFieldType
* @param {String | A.FormBuilderFieldType} fieldType
* @protected
*/
_unregisterFieldType: function(fieldType) {
var fieldTypes = this.get('fieldTypes'),
i;
if (A.Lang.isFunction(fieldType)) {
for (i = fieldTypes.length - 1; i >= 0; i--) {
if (fieldTypes[i].get('fieldClass') === fieldType) {
this._unregisterFieldTypeByIndex(i);
}
}
}
else {
this._unregisterFieldTypeByIndex(fieldTypes.indexOf(fieldType));
}
},
/**
* Unregisters the field type at the given index.
*
* @method _unregisterFieldTypeByIndex
* @param {Number} index
* @protected
*/
_unregisterFieldTypeByIndex: function(index) {
var fieldTypes = this.get('fieldTypes');
if (index !== -1) {
fieldTypes[index].destroy();
fieldTypes.splice(index, 1);
}
},
/**
* Enable or disable unique FieldTypes based on created Fields.
*
* @method _updateUniqueFieldType
* @protected
*/
_updateUniqueFieldType: function() {
var instance = this;
A.Array.each(instance.get('fieldTypes'), function(fieldType) {
if (fieldType.get('unique')) {
fieldType.set('disabled', instance._checkActiveLayoutHasFieldType(fieldType));
}
});
},
/**
* Default value for the modal displayed to select a field.
*
* @method _valueFieldTypesModal
* @return {A.FormBuilderFieldTypesModal}
* @protected
*/
_valueFieldTypesModal: function() {
var fieldTypesModal = new A.FormBuilderFieldTypesModal({
centered: true,
cssClass: 'form-builder-modal',
draggable: false,
fieldTypes: this.get('fieldTypes'),
modal: true,
resizable: false,
visible: false,
zIndex: 4
});
fieldTypesModal.addTarget(this);
return fieldTypesModal;
}
};
/**
* Static property used to define the default attribute
* configuration for the `A.FormBuilderFieldTypes`.
*
* @property ATTRS
* @type Object
* @static
*/
A.FormBuilderFieldTypes.ATTRS = {
/**
* The collection of field types that can be selected as fields for
* this form builder.
*
* @attribute fieldTypes
* @default []
* @type Array
*/
fieldTypes: {
setter: '_setFieldTypes',
validator: A.Lang.isArray,
value: []
},
/**
* The modal that will be used to select a field type.
*
* @attribute fieldTypesModal
* @type `A.FormBuilderFieldTypesModal`
*/
fieldTypesModal: {
valueFn: '_valueFieldTypesModal'
}
};
|
#!/bin/bash
FN="IlluminaHumanMethylationEPICanno.ilm10b2.hg19_0.6.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/annotation/src/contrib/IlluminaHumanMethylationEPICanno.ilm10b2.hg19_0.6.0.tar.gz"
"https://bioarchive.galaxyproject.org/IlluminaHumanMethylationEPICanno.ilm10b2.hg19_0.6.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-illuminahumanmethylationepicanno.ilm10b2.hg19/bioconductor-illuminahumanmethylationepicanno.ilm10b2.hg19_0.6.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-illuminahumanmethylationepicanno.ilm10b2.hg19/bioconductor-illuminahumanmethylationepicanno.ilm10b2.hg19_0.6.0_src_all.tar.gz"
)
MD5="6a8e2398a0f930aa7828a1412fb21e88"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
import Foundation
func waitThenRunOnMain(delay: Double, closure: () -> Void) {
let dispatchTime = DispatchTime.now() + delay
DispatchQueue.main.asyncAfter(deadline: dispatchTime, execute: closure)
} |
<reponame>pick-stars/flinkx<filename>flinkx-connectors/flinkx-connector-elasticsearch6/src/main/java/com/dtstack/flinkx/connector/elasticsearch6/options/DtElasticsearch6Options.java
package com.dtstack.flinkx.connector.elasticsearch6.options;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
/**
* @description:
* @program: flinkx-all
* @author: lany
* @create: 2021/06/29 21:40
*/
public class DtElasticsearch6Options {
public static final ConfigOption<Integer> DT_BULK_FLUSH_MAX_ACTIONS_OPTION =
ConfigOptions.key("bulk-flush.max-actions")
.intType()
.defaultValue(1000)
.withDescription("Maximum number of actions to buffer for each bulk request.");
public static final ConfigOption<Integer> DT_PARALLELISM_OPTION =
ConfigOptions.key("parallelism")
.intType()
.defaultValue(1)
.withDescription("Parallelism for connector running.");
}
|
this.x.$require("../array/index.js").then(function () {
Function.prototype._X_CLOUD_INJECT = function (globalThis) {
//注入
globalThis = globalThis || {};
globalThis._data_ = {};
var runStr =
"(" +
(function (fun) {
return fun.toString().replace(/((?!\().)*(((?!(\{|\=\>)).)*)(\{|\=\>)([\s\S]*)/, function () {
return (
"function"+arguments[2]+"{(" +
function () {
globalThis.proxy({
get: function (str, $) {
$ = $ || 0;
if (/[^(\s|\t)]/.test(str)) {
str && eval("str =" + str);
return str;
}
}.bind(globalThis._data_),
set: function (str, val) {
if (/[^(\s|\t)]/.test(str)) {
str && typeof val != "undefined" && eval(str + "=" + val);
}
}.bind(globalThis._data_),
});
}.toString() +
")();globalThis=globalThis.data" +
arguments[6]
);
});
})(this) +
").bind(globalThis._data_);";
return function () {
return globalThis.run(
eval(
"globalThis.eval(runStr,globalThis)(" +
new Array(Array._X_CLOUD_ARGTOARR(arguments).length)
._X_CLOUD_ARRFILL(function (i) {
this[i] = "arguments[" + i + "]";
})
.join(",") +
")"
)
);
}.bind(this);
};
});
|
#!/bin/sh
if echo "$1" | grep -Eq 'i[[:digit:]]86-'; then
echo i386
else
echo "$1" | grep -Eo '^[[:alnum:]_]*'
fi |
#!/usr/bin/env bash
# Requires notify-send.py (pip)
save_file() {
if [ -z "$FILENAME" ]; then
FILENAME="$(zenity --file-selection --save --confirm-overwrite --filename="screenshot$(date +%Y%m%d%H%M%S).png")" || return 1
fi
cp "$TMPFILE" "$FILENAME"
echo "Saved"
}
clipboard() {
xclip -selection clipboard -t image/png "$TMPFILE"
echo "Copied"
}
if [ "$1" == "-h" ] || [ "$1" == "--help" ]; then
echo "Takes a screenshot using maim and displays a notification with possible actions."
echo -e "Usage:\n\t${0##*/} [filename] [maim options]"
exit
fi
FILENAME=""
if [[ "$1" != -* ]]; then
FILENAME="$1"
shift
fi
TMPFILE="$(mktemp /dev/shm/screenshot_sh.XXXXXX)"
maim "$@" > "$TMPFILE" || exit 1
trap "rm -f '$TMPFILE'" 0 # EXIT
trap "rm -f '$TMPFILE'; exit 1" 2 # INT
trap "rm -f '$TMPFILE'; exit 1" 1 15 # HUP TERM
ANS="$(notify-send.py -i "$TMPFILE" -t 0 test --action save:Save "copy:Copy to clipboard" both:Both discard:Discard | head -n 1)"
case $ANS in
save )
save_file
;;
copy )
clipboard
;;
both )
clipboard
save_file
;;
discard )
;;
esac
|
#!/usr/bin/python
# This script has been updated with the Sudy drills
# a string inside a string
x= "There are %d types of people." % 10
binary= "binary"
do_not = "don't"
# a string inside a string
y = "Those who know %s and those who %s." % (binary, do_not)
# Now print it out.
print x
print y
# Print is again with quotes
print "I said: %r." % x
print "I also said: '%s'." % y
hilarious = True
# a string inside a string
joke_evaluation = "Isn't that joke so funny?! %r"
print joke_evaluation % hilarious
w = "This is left side of..."
e = "a string with a right side."
# Print the 2 strings together.
print w + e
|
<filename>src/main.ts<gh_stars>0
import { NestFactory } from '@nestjs/core';
import {
SwaggerModule,
DocumentBuilder,
SwaggerCustomOptions,
} from '@nestjs/swagger';
import { AppModule } from './app.module';
import helmet from 'helmet';
import { logger } from './common/middlewares/logger.middlewares';
async function bootstrap() {
const app = await NestFactory.create(AppModule, {
cors: true,
logger: ['error', 'warn', 'debug', 'verbose', 'log'],
});
//Helmet can help protect your app from some well-known web vulnerabilities by setting HTTP headers appropriately
app.use(helmet());
app.use(logger);
const APP_NAME = process.env.npm_package_name;
const APP_VERSION = process.env.npm_package_version;
const AUTHOR = process.env.npm_package_author_name;
const config = new DocumentBuilder()
.setTitle('Cats example')
.setDescription(`The ${APP_NAME} API description`)
.setVersion(APP_VERSION)
.setContact(AUTHOR, '', '')
.addTag('cats')
.build();
const customOptions: SwaggerCustomOptions = {
customSiteTitle: 'Cats API Docs',
};
const document = SwaggerModule.createDocument(app, config);
SwaggerModule.setup('docs', app, document, customOptions);
await app.listen(3000);
}
bootstrap();
|
<reponame>jvm-odoo/jvm<filename>src/url/url.service.ts
import { InjectRepository } from '@nestjs/typeorm'
import { Repository } from 'typeorm'
import { UrlEntity } from './url.entity'
export class UrlService {
constructor(
@InjectRepository(UrlEntity)
private readonly urlRepository: Repository<UrlEntity>
) {}
async find(shortcut: string): Promise<UrlEntity> {
return this.urlRepository.findOne({
where: { shortcut },
})
}
async findAll(): Promise<UrlEntity[]> {
return this.urlRepository.find()
}
async create(original?: string): Promise<UrlEntity> {
if (!original || original.length < 25) {
throw new Error('URL too short')
}
const shortcut = this.generateShortcutUrl()
const urlEntity = this.urlRepository.create({
original,
shortcut,
})
return this.urlRepository.save(urlEntity)
}
generateShortcutUrl(): string {
let result = ''
const characters =
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
for (var i = 0; i < 5; i++) {
result += characters.charAt(
Math.floor(Math.random() * characters.length)
)
}
return result
}
}
|
drop table if exists `sys_user`;
drop table if exists `sys_resource`;
drop table if exists `sys_permission`;
drop table if exists `sys_role`;
drop table if exists `sys_role_resource_permission`;
drop table if exists `sys_group`;
drop table if exists `sys_user_group`;
drop table if exists `sys_auth`;
create table `sys_user`(
`id` bigint not null auto_increment,
`username` varchar(100),
`email` varchar(100),
`mobile_phone_number` varchar(20),
`password` varchar(100),
`salt` varchar(10),
`create_date` timestamp default 0,
`status` varchar(50),
`deleted` bool,
`admin` bool,
`update_timestamp` datetime NULL DEFAULT NULL ,
`create_timestamp` datetime NULL DEFAULT NULL ,
constraint `pk_sys_user` primary key(`id`),
constraint `unique_sys_user_username` unique(`username`),
constraint `unique_sys_user_email` unique(`email`),
constraint `unique_sys_user_mobile_phone_number` unique(`mobile_phone_number`),
index `idx_sys_user_status` (`status`)
) charset=utf8 ENGINE=InnoDB;
create table `sys_resource`(
`id` bigint not null auto_increment,
`name` varchar(100),
`identity` varchar(100),
`url` varchar(200),
`parent_id` bigint,
`parent_ids` varchar(200) default '',
`icon` varchar(200),
`weight` int,
`is_show` bool,
`update_timestamp` datetime NULL DEFAULT NULL ,
`create_timestamp` datetime NULL DEFAULT NULL ,
constraint `pk_sys_resource` primary key(`id`),
index `idx_sys_resource_name` (`name`),
index `idx_sys_resource_identity` (`identity`),
index `idx_sys_resource_user` (`url`),
index `idx_sys_resource_parent_id` (`parent_id`),
index `idx_sys_resource_parent_ids_weight` (`parent_ids`, `weight`)
) charset=utf8 ENGINE=InnoDB;
create table `sys_permission`(
`id` bigint not null auto_increment,
`name` varchar(100),
`permission` varchar(100),
`description` varchar(200),
`is_show` bool,
`update_timestamp` datetime NULL DEFAULT NULL ,
`create_timestamp` datetime NULL DEFAULT NULL ,
constraint `pk_sys_permission` primary key(`id`),
index idx_sys_permission_name (`name`),
index idx_sys_permission_permission (`permission`),
index idx_sys_permission_show (`is_show`)
) charset=utf8 ENGINE=InnoDB;
create table `sys_role`(
`id` bigint not null auto_increment,
`name` varchar(100),
`role` varchar(100),
`description` varchar(200),
`is_show` bool,
`update_timestamp` datetime NULL DEFAULT NULL ,
`create_timestamp` datetime NULL DEFAULT NULL ,
constraint `pk_sys_role` primary key(`id`),
index `idx_sys_role_name` (`name`),
index `idx_sys_role_role` (`role`),
index `idx_sys_role_show` (`is_show`)
) charset=utf8 ENGINE=InnoDB;
create table `sys_role_resource_permission`(
`id` bigint not null auto_increment,
`role_id` bigint,
`resource_id` bigint,
`permission_ids` varchar(500),
`update_timestamp` datetime NULL DEFAULT NULL ,
`create_timestamp` datetime NULL DEFAULT NULL ,
constraint `pk_sys_role_resource_permission` primary key(`id`),
constraint `unique_sys_role_resource_permission` unique(`role_id`, `resource_id`)
) charset=utf8 ENGINE=InnoDB;
create table `sys_group`(
`id` bigint not null auto_increment,
`name` varchar(100),
`type` varchar(50),
`is_show` bool,
`update_timestamp` datetime NULL DEFAULT NULL ,
`create_timestamp` datetime NULL DEFAULT NULL ,
constraint `pk_sys_group` primary key(`id`),
index `idx_sys_group_type` (`type`),
index `idx_sys_group_show` (`is_show`)
) charset=utf8 ENGINE=InnoDB;
create table `sys_user_group`(
`id` bigint not null auto_increment,
`user_id` bigint,
`group_id` bigint,
`update_timestamp` datetime NULL DEFAULT NULL ,
`create_timestamp` datetime NULL DEFAULT NULL ,
constraint `pk_sys_user_group` primary key(`id`),
index `idx_sys_user_group_user` (`user_id`),
index `idx_sys_user_group_group` (`group_id`)
) charset=utf8 ENGINE=InnoDB;
create table `sys_auth`(
`id` bigint not null auto_increment,
`user_id` bigint,
`group_id` bigint,
`role_ids` varchar(500),
`type` varchar(50),
constraint `pk_sys_auth` primary key(`id`),
index `idx_sys_auth_user` (`user_id`),
index `idx_sys_auth_group` (`group_id`),
index `idx_sys_auth_type` (`type`)
) charset=utf8 ENGINE=InnoDB;
-- 权限
insert into `sys_permission` values (1, '所有', 'all', '所有数据操作的权限', 1, NOW(), NOW());
insert into `sys_permission` values (2, '新增', 'save', '新增数据操作的权限', 1, NOW(), NOW());
insert into `sys_permission` values (3, '修改', 'update', '修改数据操作的权限', 1, NOW(), NOW());
insert into `sys_permission` values (4, '删除', 'delete', '删除数据操作的权限', 1, NOW(), NOW());
insert into `sys_permission` values (5, '查看', 'view', '查看数据操作的权限', 1, NOW(), NOW());
insert into `sys_permission` values (6, '不显示的权限', 'none', '不显示的权限', 0, NOW(), NOW());
-- 角色
insert into `sys_role` values (1, '管理员', 'admin', '拥有所有权限', 1, NOW(), NOW());
insert into `sys_role` values (2, '测试人员', 'test', '测试人员', 1, NOW(), NOW());
insert into `sys_role` values (3, '不显示的角色', 'none', '测试人员', 0, NOW(), NOW());
-- 资源
insert into `sys_resource`(`id`, `parent_id`, `parent_ids`, weight, `name`, `identity`, `url`, `is_show`, `update_timestamp`, `create_timestamp`)
values (1, 0, '0/', 1, '示例列表', 'example:example', '/showcase/sample', true, NOW(), NOW());
insert into `sys_resource`(`id`, `parent_id`, `parent_ids`, weight, `name`, `identity`, `url`, `is_show`, `update_timestamp`, `create_timestamp`)
values (2, 0, '0/', 2, '逻辑删除列表', 'example:deleted', '/showcase/deleted', false, NOW(), NOW());
insert into `sys_resource`(`id`, `parent_id`, `parent_ids`, weight, `name`, `identity`, `url`, `is_show`, `update_timestamp`, `create_timestamp`)
values (3, 0, '0/', 4, '文件上传列表', 'example:upload', '/showcase/upload', true, NOW(), NOW());
-- 角色--资源--权限
insert into `sys_role_resource_permission` (`id`, `role_id`, `resource_id`, `permission_ids`, `update_timestamp`, `create_timestamp`)
values(1, 1, 1, '1,2,6', NOW(), NOW());
insert into `sys_role_resource_permission` (`id`, `role_id`, `resource_id`, `permission_ids`, `update_timestamp`, `create_timestamp`)
values(2, 1, 2, '1,3,5', NOW(), NOW());
insert into `sys_role_resource_permission` (`id`, `role_id`, `resource_id`, `permission_ids`, `update_timestamp`, `create_timestamp`)
values(3, 2, 3, '1,3,6', NOW(), NOW());
insert into `sys_role_resource_permission` (`id`, `role_id`, `resource_id`, `permission_ids`, `update_timestamp`, `create_timestamp`)
values(4, 3, 1, '1,4,6', NOW(), NOW());
-- 分组
insert into `sys_group` (`id`, `name`, `type`, `is_show`, `update_timestamp`, `create_timestamp`)
values(1, '管理员', 'admin', true, NOW(), NOW());
insert into `sys_group` (`id`, `name`, `type`, `is_show`, `update_timestamp`, `create_timestamp`)
values(2, '用户组', 'user', true, NOW(), NOW());
-- 用户--分组
insert into `sys_user_group` (`id`, `group_id`, `user_id`, `update_timestamp`, `create_timestamp`)
values(1, 2, 1, NOW(), NOW());
-- 用户组授权
insert into sys_auth (`id`, `user_id`, `group_id`, `role_ids`, `type`)
values(1, 0, 1, '1,3', 'user_group');
insert into sys_auth (`id`, `user_id`, `group_id`, `role_ids`, `type`)
values(2, 0, 2, '2,3', 'user_group'); |
<reponame>dbulaja98/ISA-2020-TEAM19<filename>backend/src/main/java/com/pharmacySystem/mappers/GradeMapper.java
package com.pharmacySystem.mappers;
import com.pharmacySystem.DTOs.CreateGradeDTO;
import com.pharmacySystem.DTOs.EmployeeGradeDTO;
import com.pharmacySystem.DTOs.MedicineGradeDTO;
import com.pharmacySystem.DTOs.PharmacyGradeDTO;
import com.pharmacySystem.model.grade.Grade;
import com.pharmacySystem.model.medicine.Medicine;
import com.pharmacySystem.model.pharmacy.Pharmacy;
import com.pharmacySystem.model.user.Dermatologist;
import com.pharmacySystem.model.user.Pharmacist;
public class GradeMapper {
public static EmployeeGradeDTO createEmployeeGradeDTOFromPharmacist(Pharmacist pharmacist) {
return new EmployeeGradeDTO(pharmacist);
}
public static EmployeeGradeDTO createEmployeeGradeDTOFromDermatologist(Dermatologist dermatologist) {
return new EmployeeGradeDTO(dermatologist);
}
public static MedicineGradeDTO createMedicineGradeDTOFromMedicine(Medicine medicine) {
return new MedicineGradeDTO(medicine);
}
public static PharmacyGradeDTO createPharmacyGradeDTOFromPharmacy(Pharmacy pharmacy) {
return new PharmacyGradeDTO(pharmacy);
}
public static void createGradeFromCreateGradeDTO(Grade grade, CreateGradeDTO createGradeDTO) {
grade.setGrade(createGradeDTO.getGrade());
grade.setGradedId(createGradeDTO.getGradedId());
grade.setType(createGradeDTO.getGradeType());
}
} |
<filename>gmall-wms-interface/src/main/java/com/atguigu/gmall/wms/api/GmallWmsApi.java
package com.atguigu.gmall.wms.api;
import com.atguigu.gmall.common.bean.ResponseVo;
import com.atguigu.gmall.wms.entity.WareSkuEntity;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import java.util.List;
public interface GmallWmsApi {
@GetMapping("wms/waresku/sku/{skuId}")
public ResponseVo<List<WareSkuEntity>> querySkuWareBySkuId(@PathVariable Long skuId);
}
|
package com.comp.admin.utils;
/**
*
*/
public class ConstantUtil {
public static final String DEFAULT_PASSWORD = "<PASSWORD>";
public static final String SESS_MENU= "menus";
public static final String SESS_MODULE= "module";
public static final String SESS_USER = "currUser";
public static final String LOGIN_NAME = "loginName";
}
|
SELECT *
FROM Student
WHERE age BETWEEN 25 AND 35; |
#Choose a target
target_name="Zapp-App"
# Get project directory path
current_pwd="$PWD"
echo "Current pwd dir is $current_pwd"
pods_dir=`cd "Pods/"; pwd`
echo "Current Pods dir is $pods_dir"
#project_dir=`cd "../../"; pwd`
#cd "$current_pwd"
# Get .xcodeproj file path
project_file=`find "$current_pwd" -maxdepth 1 -name "*.xcodeproj" | tail -1`
if [ -z "$project_file" ]; then
echo "Can't find the .xcodeproj file, going to skip this script. It's properly a debug build anyway..:)"
else
# add script to project
ruby add_scripts_to_project.rb $target_name $project_file
fi
|
<gh_stars>10-100
// Package vagrantutil is a high level wrapper around Vagrant which provides an
// idiomatic go API.
package vagrantutil
import (
"bufio"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/koding/logging"
)
//go:generate stringer -type=Status -output=stringer.go
type Status int
const (
// Some possible states:
// https://github.com/mitchellh/vagrant/blob/master/templates/locales/en.yml#L1504
Unknown Status = iota
NotCreated
Running
Saved
PowerOff
Aborted
Preparing
)
// Box represents a single line of `vagrant box list` output.
type Box struct {
Name string
Provider string
Version string
}
// CommandOutput is the streaming output of a command
type CommandOutput struct {
Line string
Error error
}
type Vagrant struct {
// VagrantfilePath is the directory with specifies the directory where
// Vagrantfile is being stored.
VagrantfilePath string
// ProviderName overwrites the default provider used for the Vagrantfile.
ProviderName string
// ID is the unique ID of the given box.
ID string
// State is populated/updated if the Status() or List() method is called.
State string
// Log is used for logging output of vagrant commands in debug mode.
Log logging.Logger
}
// NewVagrant returns a new Vagrant instance for the given path. The path
// should be unique. If the path already exists in the system it'll be used, if
// not a new setup will be createad.
func NewVagrant(path string) (*Vagrant, error) {
if path == "" {
return nil, errors.New("vagrant: path is empty")
}
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
return &Vagrant{
VagrantfilePath: path,
}, nil
}
// Create creates the vagrantFile in the pre initialized vagrant path.
func (v *Vagrant) Create(vagrantFile string) error {
// Recreate the directory in case it was removed between
// call to NewVagrant and Create.
if err := os.MkdirAll(v.VagrantfilePath, 0755); err != nil {
return v.error(err)
}
v.debugf("create:\n%s", vagrantFile)
return v.error(ioutil.WriteFile(v.vagrantfile(), []byte(vagrantFile), 0644))
}
// Version returns the current installed vagrant version
func (v *Vagrant) Version() (string, error) {
out, err := v.vagrantCommand().run("version", "--machine-readable")
if err != nil {
return "", err
}
records, err := parseRecords(out)
if err != nil {
return "", v.error(err)
}
versionInstalled, err := parseData(records, "version-installed")
if err != nil {
return "", v.error(err)
}
return versionInstalled, nil
}
// Status returns the state of the box, such as "Running", "NotCreated", etc...
func (v *Vagrant) Status() (s Status, err error) {
defer func() {
v.State = s.String()
}()
var notCreated bool
cmd := v.vagrantCommand()
cmd.ignoreErr = func(err error) bool {
if isNotCreated(err) {
notCreated = true
return true
}
return false
}
out, err := cmd.run("status", "--machine-readable")
if err != nil {
return Unknown, err
}
if notCreated {
return NotCreated, nil
}
records, err := parseRecords(out)
if err != nil {
return Unknown, v.error(err)
}
status, err := parseData(records, "state")
if err != nil {
return Unknown, v.error(err)
}
s, err = toStatus(status)
if err != nil {
return Unknown, err
}
return s, nil
}
func (v *Vagrant) Provider() (string, error) {
out, err := v.vagrantCommand().run("status", "--machine-readable")
if err != nil {
return "", err
}
records, err := parseRecords(out)
if err != nil {
return "", v.error(err)
}
provider, err := parseData(records, "provider-name")
if err != nil {
return "", v.error(err)
}
return provider, nil
}
// List returns all available boxes on the system. Under the hood it calls
// "global-status" and parses the output.
func (v *Vagrant) List() ([]*Vagrant, error) {
// Refresh box status cache. So it does not report that aborted
// box is running etc.
_, err := v.vagrantCommand().run("global-status", "--prune")
if err != nil {
return nil, err
}
out, err := v.vagrantCommand().run("global-status")
if err != nil {
return nil, err
}
output := make([][]string, 0)
scanner := bufio.NewScanner(strings.NewReader(out))
collectStarted := false
for scanner.Scan() {
if strings.HasPrefix(scanner.Text(), "--") {
scanner.Scan() // advance to next line
collectStarted = true
}
if !collectStarted {
continue
}
trimmedLine := strings.TrimSpace(scanner.Text())
if trimmedLine == "" {
break // we are finished with collecting the boxes
}
output = append(output, strings.Fields(trimmedLine))
}
if err := scanner.Err(); err != nil {
return nil, v.error(err)
}
boxes := make([]*Vagrant, len(output))
for i, box := range output {
// example box: [0c269f6 default virtualbox aborted /Users/fatih/path]
boxes[i] = &Vagrant{
ID: box[0],
VagrantfilePath: box[len(box)-1],
State: box[3],
}
}
return boxes, nil
}
// Up executes "vagrant up" for the given vagrantfile. The returned channel
// contains the output stream. At the end of the output, the error is put into
// the Error field if there is any.
func (v *Vagrant) Up() (<-chan *CommandOutput, error) {
if v.ProviderName != "" {
return v.vagrantCommand().start("up", "--provider", v.ProviderName)
}
return v.vagrantCommand().start("up")
}
// Halt executes "vagrant halt". The returned reader contains the output
// stream. The client is responsible of calling the Close method of the
// returned reader.
func (v *Vagrant) Halt() (<-chan *CommandOutput, error) {
return v.vagrantCommand().start("halt")
}
// Destroy executes "vagrant destroy". The returned reader contains the output
// stream. The client is responsible of calling the Close method of the
// returned reader.
func (v *Vagrant) Destroy() (<-chan *CommandOutput, error) {
if _, err := os.Stat(v.VagrantfilePath); os.IsNotExist(err) {
// Makes Destroy idempotent if called consecutively multiple times on
// the same path.
//
// Returning closed channel to not make existing like the one
// below hang:
//
// ch, err := vg.Destroy()
// if err != nil {
// ...
// }
// for line := range ch {
// ...
// }
//
ch := make(chan *CommandOutput)
close(ch)
return ch, nil
}
cmd := v.vagrantCommand()
cmd.onSuccess = func() {
// cleanup vagrant directory on success, as it's no longer needed;
// after destroy it should be not possible to call vagrant up
// again, call to Create is required first
if err := os.RemoveAll(v.VagrantfilePath); err != nil {
v.debugf("failed to cleanup %q after destroy: %s", v.VagrantfilePath, err)
}
// We leave empty directory to not make other commands fail
// due to missing cwd.
//
// TODO(rjeczalik): rework lookup to use box id instead
if err := os.MkdirAll(v.VagrantfilePath, 0755); err != nil {
v.debugf("failed to create empty dir %q after destroy: %s", v.VagrantfilePath, err)
}
}
// if vagrant box is not created, return success - the destroy
// should be effectively a nop
cmd.ignoreErr = isNotCreated
return cmd.start("destroy", "--force")
}
var stripFmt = strings.NewReplacer("(", "", ",", "", ")", "")
// BoxList executes "vagrant box list", parses the output and returns all
// available base boxes.
func (v *Vagrant) BoxList() ([]*Box, error) {
out, err := v.vagrantCommand().run("box", "list")
if err != nil {
return nil, err
}
var boxes []*Box
scanner := bufio.NewScanner(strings.NewReader(out))
for scanner.Scan() {
line := strings.TrimSpace(stripFmt.Replace(scanner.Text()))
if line == "" {
continue
}
var box Box
n, err := fmt.Sscanf(line, "%s %s %s", &box.Name, &box.Provider, &box.Version)
if err != nil {
return nil, v.errorf("%s for line: %s", err, line)
}
if n != 3 {
return nil, v.errorf("unable to parse output line: %s", line)
}
boxes = append(boxes, &box)
}
if err := scanner.Err(); err != nil {
return nil, v.error(err)
}
return boxes, nil
}
// BoxAdd executes "vagrant box add" for the given box. The returned channel
// contains the output stream. At the end of the output, the error is put into
// the Error field if there is any.
//
// TODO(rjeczalik): BoxAdd does not support currently adding boxes directly
// from files.
func (v *Vagrant) BoxAdd(box *Box) (<-chan *CommandOutput, error) {
args := append([]string{"box", "add"}, toArgs(box)...)
return v.vagrantCommand().start(args...)
}
// BoxRemove executes "vagrant box remove" for the given box. The returned channel
// contains the output stream. At the end of the output, the error is put into
// the Error field if there is any.
func (v *Vagrant) BoxRemove(box *Box) (<-chan *CommandOutput, error) {
args := append([]string{"box", "remove"}, toArgs(box)...)
return v.vagrantCommand().start(args...)
}
// SSH executes "vagrant ssh" for the given vagrantfile. The returned channel
// contains the output stream. At the end of the output, the error is put into
// the Error field if there is any.
func (v *Vagrant) SSH(command string) (<-chan *CommandOutput, error) {
args := []string{"ssh", "-c", command}
return v.vagrantCommand().start(args...)
}
// vagrantfile returns the Vagrantfile path
func (v *Vagrant) vagrantfile() string {
return filepath.Join(v.VagrantfilePath, "Vagrantfile")
}
// vagrantfileExists checks if a Vagrantfile exists in the given path. It
// returns a nil error if exists.
func (v *Vagrant) vagrantfileExists() error {
if _, err := os.Stat(v.vagrantfile()); os.IsNotExist(err) {
return err
}
return nil
}
// vagrantCommand creates a command which is setup to be run next to
// Vagrantfile
func (v *Vagrant) vagrantCommand() *command {
return newCommand(v.VagrantfilePath, v.Log)
}
func (v *Vagrant) debugf(format string, args ...interface{}) {
if v.Log != nil {
v.Log.New(v.VagrantfilePath).Debug(format, args...)
}
}
func (v *Vagrant) errorf(format string, args ...interface{}) error {
err := fmt.Errorf(format, args...)
v.debugf("%s", err)
return err
}
func (v *Vagrant) error(err error) error {
if err != nil {
v.debugf("%s", err)
}
return err
}
// Runs "ssh-config" and returns the output.
func (v *Vagrant) SSHConfig() (string, error) {
out, err := v.vagrantCommand().run("ssh-config")
if err != nil {
return "", err
}
return out, nil
}
// toArgs converts the given box to argument list for `vagrant box add/remove`
// commands
func toArgs(box *Box) (args []string) {
if box.Provider != "" {
args = append(args, "--provider", box.Provider)
}
if box.Version != "" {
args = append(args, "--box-version", box.Version)
}
return append(args, box.Name)
}
// toStatus converts the given state string to Status type
func toStatus(state string) (Status, error) {
switch state {
case "running":
return Running, nil
case "not_created":
return NotCreated, nil
case "saved":
return Saved, nil
case "poweroff":
return PowerOff, nil
case "aborted":
return Aborted, nil
case "preparing":
return Preparing, nil
default:
return Unknown, fmt.Errorf("Unknown state: %s", state)
}
}
|
#include <gtest/gtest.h>
#include <plog/Log.h>
#include <plog/Appenders/ConsoleAppender.h>
#include <plog/Formatters/MessageOnlyFormatter.h>
int main(int argc, char **argv) {
plog::ConsoleAppender<plog::MessageOnlyFormatter> appender;
plog::init(plog::verbose, &appender);
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
apt-get update && apt-get install -y wget
wget https://github.com/AdoptOpenJDK/openjdk16-binaries/releases/download/jdk-16%2B36/OpenJDK16-jdk_x64_linux_hotspot_16_36.tar.gz -O jdk.tar.gz
tar -xzf jdk.tar.gz -C /opt/
mv /opt/jdk-16+36 /opt/jdk |
<gh_stars>1-10
/*
* Copyright 2021 <NAME>.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.getlime.security.powerauth.app.nextstep.controller;
import io.getlime.core.rest.model.base.request.ObjectRequest;
import io.getlime.core.rest.model.base.response.ObjectResponse;
import io.getlime.security.powerauth.app.nextstep.service.OtpDefinitionService;
import io.getlime.security.powerauth.lib.nextstep.model.exception.ApplicationNotFoundException;
import io.getlime.security.powerauth.lib.nextstep.model.exception.OtpDefinitionAlreadyExistsException;
import io.getlime.security.powerauth.lib.nextstep.model.exception.OtpDefinitionNotFoundException;
import io.getlime.security.powerauth.lib.nextstep.model.exception.OtpPolicyNotFoundException;
import io.getlime.security.powerauth.lib.nextstep.model.request.CreateOtpDefinitionRequest;
import io.getlime.security.powerauth.lib.nextstep.model.request.DeleteOtpDefinitionRequest;
import io.getlime.security.powerauth.lib.nextstep.model.request.GetOtpDefinitionListRequest;
import io.getlime.security.powerauth.lib.nextstep.model.request.UpdateOtpDefinitionRequest;
import io.getlime.security.powerauth.lib.nextstep.model.response.CreateOtpDefinitionResponse;
import io.getlime.security.powerauth.lib.nextstep.model.response.DeleteOtpDefinitionResponse;
import io.getlime.security.powerauth.lib.nextstep.model.response.GetOtpDefinitionListResponse;
import io.getlime.security.powerauth.lib.nextstep.model.response.UpdateOtpDefinitionResponse;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.responses.ApiResponses;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.validation.annotation.Validated;
import org.springframework.web.bind.annotation.*;
import javax.validation.Valid;
/**
* REST controller for OTP definitions.
*
* @author <NAME>, <EMAIL>
*/
@RestController
@RequestMapping("otp/definition")
@Validated
public class OtpDefinitionController {
private static final Logger logger = LoggerFactory.getLogger(OtpDefinitionController.class);
private final OtpDefinitionService otpDefinitionService;
/**
* REST controller constructor.
* @param otpDefinitionService OTP definition service.
*/
@Autowired
public OtpDefinitionController(OtpDefinitionService otpDefinitionService) {
this.otpDefinitionService = otpDefinitionService;
}
/**
* Create an OTP definition.
* @param request Create OTP definition request.
* @return Create OTP definition response.
* @throws OtpDefinitionAlreadyExistsException Thrown when OTP definition already exists.
* @throws ApplicationNotFoundException Thrown when application is not found.
* @throws OtpPolicyNotFoundException Thrown when OTP policy is not found.
*/
@Operation(summary = "Create an OTP definition")
@ApiResponses(value = {
@ApiResponse(responseCode = "200", description = "OTP definition was created"),
@ApiResponse(responseCode = "400", description = "Invalid request, error codes: REQUEST_VALIDATION_FAILED, OTP_DEFINITION_ALREADY_EXISTS, APPLICATION_NOT_FOUND, OTP_POLICY_NOT_FOUND"),
@ApiResponse(responseCode = "500", description = "Unexpected error")
})
@RequestMapping(method = RequestMethod.POST)
public ObjectResponse<CreateOtpDefinitionResponse> createOtpDefinition(@Valid @RequestBody ObjectRequest<CreateOtpDefinitionRequest> request) throws OtpDefinitionAlreadyExistsException, ApplicationNotFoundException, OtpPolicyNotFoundException {
logger.info("Received createOtpDefinition request, OTP definition name: {}", request.getRequestObject().getOtpDefinitionName());
final CreateOtpDefinitionResponse response = otpDefinitionService.createOtpDefinition(request.getRequestObject());
logger.info("The createOtpDefinition request succeeded, OTP definition name: {}", request.getRequestObject().getOtpDefinitionName());
return new ObjectResponse<>(response);
}
/**
* Update an OTP definition via PUT method.
* @param request Update OTP definition request.
* @return Update OTP definition response.
* @throws OtpDefinitionNotFoundException Thrown when OTP definition is not found.
* @throws ApplicationNotFoundException Thrown when application is not found.
* @throws OtpPolicyNotFoundException Thrown when OTP policy is not found.
*/
@Operation(summary = "Update an OTP definition")
@ApiResponses(value = {
@ApiResponse(responseCode = "200", description = "OTP definition was updated"),
@ApiResponse(responseCode = "400", description = "Invalid request, error codes: REQUEST_VALIDATION_FAILED, OTP_DEFINITION_NOT_FOUND, APPLICATION_NOT_FOUND, OTP_POLICY_NOT_FOUND"),
@ApiResponse(responseCode = "500", description = "Unexpected error")
})
@RequestMapping(method = RequestMethod.PUT)
public ObjectResponse<UpdateOtpDefinitionResponse> updateOtpDefinition(@Valid @RequestBody ObjectRequest<UpdateOtpDefinitionRequest> request) throws OtpDefinitionNotFoundException, ApplicationNotFoundException, OtpPolicyNotFoundException {
logger.info("Received updateOtpDefinition request, OTP definition name: {}", request.getRequestObject().getOtpDefinitionName());
final UpdateOtpDefinitionResponse response = otpDefinitionService.updateOtpDefinition(request.getRequestObject());
logger.info("The updateOtpDefinition request succeeded, OTP definition name: {}", request.getRequestObject().getOtpDefinitionName());
return new ObjectResponse<>(response);
}
/**
* Update an OTP definition via POST method.
* @param request Update OTP definition request.
* @return Update OTP definition response.
* @throws OtpDefinitionNotFoundException Thrown when OTP definition is not found.
* @throws ApplicationNotFoundException Thrown when application is not found.
* @throws OtpPolicyNotFoundException Thrown when OTP policy is not found.
*/
@Operation(summary = "Update an OTP definition")
@ApiResponses(value = {
@ApiResponse(responseCode = "200", description = "OTP definition was updated"),
@ApiResponse(responseCode = "400", description = "Invalid request, error codes: REQUEST_VALIDATION_FAILED, OTP_DEFINITION_NOT_FOUND, APPLICATION_NOT_FOUND, OTP_POLICY_NOT_FOUND"),
@ApiResponse(responseCode = "500", description = "Unexpected error")
})
@RequestMapping(value = "update", method = RequestMethod.POST)
public ObjectResponse<UpdateOtpDefinitionResponse> updateOtpDefinitionPost(@Valid @RequestBody ObjectRequest<UpdateOtpDefinitionRequest> request) throws OtpDefinitionNotFoundException, ApplicationNotFoundException, OtpPolicyNotFoundException {
logger.info("Received updateOtpDefinitionPost request, OTP definition name: {}", request.getRequestObject().getOtpDefinitionName());
final UpdateOtpDefinitionResponse response = otpDefinitionService.updateOtpDefinition(request.getRequestObject());
logger.info("The updateOtpDefinitionPost request succeeded, OTP definition name: {}", request.getRequestObject().getOtpDefinitionName());
return new ObjectResponse<>(response);
}
/**
* Get OTP definition list.
* @param includeRemoved Whether removed OTP definitions should be included.
* @return Get OTP definition list response.
*/
@Operation(summary = "Get OTP list")
@ApiResponses(value = {
@ApiResponse(responseCode = "200", description = "OTP list sent in response"),
@ApiResponse(responseCode = "400", description = "Invalid request"),
@ApiResponse(responseCode = "500", description = "Unexpected error")
})
@RequestMapping(method = RequestMethod.GET)
public ObjectResponse<GetOtpDefinitionListResponse> getOtpDefinitionList(@RequestParam boolean includeRemoved) {
logger.info("Received getOtpDefinitionList request");
GetOtpDefinitionListRequest request = new GetOtpDefinitionListRequest();
request.setIncludeRemoved(includeRemoved);
final GetOtpDefinitionListResponse response = otpDefinitionService.getOtpDefinitionList(request);
logger.info("The getOtpDefinitionList request succeeded");
return new ObjectResponse<>(response);
}
/**
* Get OTP definition list using POST method.
* @param request Get OTP definition list request.
* @return Get OTP definition list response.
*/
@Operation(summary = "Get OTP definition list")
@ApiResponses(value = {
@ApiResponse(responseCode = "200", description = "OTP definition was updated"),
@ApiResponse(responseCode = "400", description = "Invalid request, error codes: REQUEST_VALIDATION_FAILED"),
@ApiResponse(responseCode = "500", description = "Unexpected error")
})
@RequestMapping(value = "list", method = RequestMethod.POST)
public ObjectResponse<GetOtpDefinitionListResponse> getOtpDefinitionListPost(@Valid @RequestBody ObjectRequest<GetOtpDefinitionListRequest> request) {
logger.info("Received getOtpDefinitionListPost request");
final GetOtpDefinitionListResponse response = otpDefinitionService.getOtpDefinitionList(request.getRequestObject());
logger.info("The getOtpDefinitionListPost request succeeded");
return new ObjectResponse<>(response);
}
/**
* Delete an OTP definition.
* @param request Delete an OTP request.
* @return Delete an OTP response.
* @throws OtpDefinitionNotFoundException Thrown when OTP definition is not found.
*/
@Operation(summary = "Delete an OTP definition")
@ApiResponses(value = {
@ApiResponse(responseCode = "200", description = "OTP definition was updated"),
@ApiResponse(responseCode = "400", description = "Invalid request, error codes: REQUEST_VALIDATION_FAILED, OTP_DEFINITION_NOT_FOUND"),
@ApiResponse(responseCode = "500", description = "Unexpected error")
})
@RequestMapping(value = "delete", method = RequestMethod.POST)
public ObjectResponse<DeleteOtpDefinitionResponse> deleteOtpDefinition(@Valid @RequestBody ObjectRequest<DeleteOtpDefinitionRequest> request) throws OtpDefinitionNotFoundException {
logger.info("Received deleteOtpDefinition request, OTP definition name: {}", request.getRequestObject().getOtpDefinitionName());
final DeleteOtpDefinitionResponse response = otpDefinitionService.deleteOtpDefinition(request.getRequestObject());
logger.info("The deleteOtpDefinition request succeeded, OTP definition name: {}", request.getRequestObject().getOtpDefinitionName());
return new ObjectResponse<>(response);
}
}
|
import { useContext } from 'react';
import ToolboxContext from '../context';
const useLanguage = () => {
const { language } = useContext(ToolboxContext);
return language;
};
export default useLanguage;
|
# !/bin/bash
#
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
FABRIC_CA="$GOPATH/src/github.com/hyperledger/fabric-ca"
FABRIC_CAEXEC="$FABRIC_CA/bin/fabric-ca"
TESTDATA="$FABRIC_CA/testdata"
SCRIPTDIR="$FABRIC_CA/scripts/fvt"
CSR="$TESTDATA/csr.json"
HOST="http://localhost:8888"
RUNCONFIG="$TESTDATA/postgres.json"
INITCONFIG="$TESTDATA/csr_ecdsa256.json"
RC=0
$($FABRIC_TLS) && HOST="https://localhost:8888"
. $SCRIPTDIR/fabric-ca_utils
: ${FABRIC_CA_DEBUG="false"}
while getopts "k:l:x:" option; do
case "$option" in
x) CA_CFG_PATH="$OPTARG" ;;
k) KEYTYPE="$OPTARG" ;;
l) KEYLEN="$OPTARG" ;;
esac
done
: ${KEYTYPE="ecdsa"}
: ${KEYLEN="256"}
: ${FABRIC_CA_DEBUG="false"}
test -z "$CA_CFG_PATH" && CA_CFG_PATH=$HOME/fabric-ca
CLIENTCERT="$CA_CFG_PATH/cert.pem"
CLIENTKEY="$CA_CFG_PATH/key.pem"
export CA_CFG_PATH
genClientConfig "$CA_CFG_PATH/client-config.json"
$FABRIC_CAEXEC client reenroll $HOST <(echo "{
\"hosts\": [
\"admin@fab-client.raleigh.ibm.com\",
\"fab-client.raleigh.ibm.com\",
\"127.0.0.2\"
],
\"key\": {
\"algo\": \"$KEYTYPE\",
\"size\": $KEYLEN
},
\"names\": [
{
\"O\": \"Hyperledger\",
\"O\": \"Fabric\",
\"OU\": \"FABRIC_CA\",
\"OU\": \"FVT\",
\"STREET\": \"Miami Blvd.\",
\"DC\": \"peer\",
\"UID\": \"admin\",
\"L\": \"Raleigh\",
\"L\": \"RTP\",
\"ST\": \"North Carolina\",
\"C\": \"US\"
}
]
}")
RC=$?
$($FABRIC_CA_DEBUG) && printAuth $CLIENTCERT $CLIENTKEY
exit $RC
|
<filename>main.py
# -*- coding: utf-8 -*-
import tornado.web
import tornado.ioloop
import tornado.gen
from twython import Twython
import tornadoredis
import redis
import os, sys, json
import argparse
import ConfigParser
import logging
class Status(tornado.web.RequestHandler):
def get(self):
user_key = self.get_secure_cookie('birdhouse')
if not user_key:
self.redirect("/")
return
try:
cookie = json.loads(user_key)
screen_name = cookie["screen_name"]
id_str = cookie["id_str"]
except:
#doublecheck the cookie isn't hosed
#if it is, kill it and have the user come back thru the oauth process
self.clear_cookie('birdhouse')
self.redirect("/")
return
creds = rserver.get("credentials:"+id_str)
if creds is None:
#user has revoked access previously; redirect them to the homepage to oauth again
#note: in most cases screen_name will not be in list here, but just in case.
rserver.lrem("users", id_str, 0)
rserver.delete("since_id:"+id_str)
self.clear_cookie('birdhouse')
self.redirect("/")
else:
#TODO: call Twitter here to get latest screen_name, name, etc.
#(it may change after user oauths)
self.render("status.html", screen_name=screen_name)
class LoginSuccess(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.engine
def get(self):
oauth_verifier = self.get_argument('oauth_verifier')
oauth_token = self.get_argument('oauth_token')
secret = self.get_secure_cookie("bh_auth_secret")
logger.debug("trying to finish auth with "+ secret)
twitter = Twython(CONSUMER_KEY, CONSUMER_SECRET, oauth_token, secret)
final_step = twitter.get_authorized_tokens(oauth_verifier)
logger.debug(json.dumps(final_step))
screen_name = final_step['screen_name']
id_str = final_step['user_id']
credentials = {}
credentials['token'] = final_step['oauth_token']
credentials['secret'] = final_step['oauth_token_secret']
# save login info
yield tornado.gen.Task(conn.set, "credentials:"+id_str, json.dumps(credentials))
yield tornado.gen.Task(conn.lpush, "users", id_str)
# set cookie
cookie_data = {"token": final_step['oauth_token'], "secret": final_step['oauth_token_secret'], "screen_name": screen_name, "id_str": id_str}
self.set_secure_cookie("birdhouse", json.dumps(cookie_data))
self.clear_cookie("bh_auth_token")
self.clear_cookie("bh_auth_secret")
self.redirect("/status")
class TwitterLoginHandler(tornado.web.RequestHandler):
def get(self):
if not self.get_secure_cookie('birdhouse'):
twitter = Twython(CONSUMER_KEY, CONSUMER_SECRET)
auth = twitter.get_authentication_tokens(callback_url= BHHOST+":"+BHPORT+'/success')
logger.debug("Back in python from twitter.")
oauth_token = auth['oauth_token']
oauth_token_secret = auth['oauth_token_secret']
self.set_secure_cookie("bh_auth_token",oauth_token)
self.set_secure_cookie("bh_auth_secret",oauth_token_secret)
self.redirect(auth['auth_url'])
else:
self.redirect('/status')
class Settings(tornado.web.RequestHandler):
def get(self):
self.render("settings.html")
class About(tornado.web.RequestHandler):
def get(self):
self.render("about.html")
class Intro(tornado.web.RequestHandler):
def get(self):
if not self.get_secure_cookie('birdhouse'):
self.render("intro.html")
else:
self.redirect('/status')
###################################################
# setup logging
logger = logging.getLogger("birdhouse")
logger.setLevel(logging.DEBUG)
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT)
# parse arguments
argparser = argparse.ArgumentParser()
argparser.add_argument('config', help='path to config file')
args = argparser.parse_args()
# read application config
cfg = ConfigParser.ConfigParser()
cfg.read(args.config)
try:
CONSUMER_KEY = cfg.get('twitter', 'app_key')
CONSUMER_SECRET = cfg.get('twitter', 'app_secret')
BHHOST = cfg.get('birdhouse', 'host')
BHPORT = cfg.get('birdhouse', 'port')
REDIS_HOST = cfg.get('redis', 'host')
REDIS_PORT = int(cfg.get('redis', 'port'))
COOKIE_SECRET = cfg.get('birdhouse', 'cookie_secret')
except:
logger.critical("Please set your config variables properly in %s before running main.py." % args.config)
sys.exit(2)
conn = tornadoredis.Client(host=REDIS_HOST, port=REDIS_PORT)
conn.connect()
rserver = redis.Redis(REDIS_HOST, REDIS_PORT)
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
cookie_secret=COOKIE_SECRET,
twitter_consumer_key=CONSUMER_KEY,
twitter_consumer_secret=CONSUMER_SECRET,
debug=True
)
application = tornado.web.Application([
(r"/", Intro),
(r"/login", TwitterLoginHandler),
(r"/success", LoginSuccess),
(r"/status", Status),
(r"/settings", Settings)
], **settings)
if __name__ == "__main__":
#schedule the listener - it will be responsible for setting queues up and starting schedulers
#start tornado listener for web interface
application.listen(BHPORT)
logger.info("Birdhouse Logger Inner starting on port %s" %BHPORT)
tornado.ioloop.IOLoop.instance().start()
|
#!/bin/sh
if [ "$#" -ne 1 ] || ! [ -f "$1" ]; then
echo "Usage: $0 <path-to-binary>"
exit 1
fi
EXE="$(basename "$1")"
TARGET="$(cd "$(dirname "$1")"; pwd)/$EXE"
sizeof() {
du --apparent-size --block-size=1 "$1" | cut -f1
}
BEFORE="$(sizeof "$TARGET")"
strip -s "$TARGET" && echo "$EXE stripped: $BEFORE -> $(sizeof "$TARGET")"
|
from pkgbuilder.pkgsource import PkgSource
from pkgbuilder.command import command_exec
class PkgSourceGit(PkgSource):
type = "git"
def init(self):
super().init()
exec_cmd = ["git", "clone", self.url, self.src_path]
command_exec(exec_cmd)
def update(self):
super().update()
exec_cmd = ["git", "pull"]
command_exec(exec_cmd)
def get_tag(self):
super().get_tag()
exec_cmd = ['git', 'rev-parse', 'HEAD']
return command_exec(exec_cmd, True)
def get_changelog(self, old, new):
super().get_changelog(old, new)
exec_cmd = ['git', 'log', old + '..' + new]
return command_exec(exec_cmd, True)
|
#include "unity.h"
void app_main()
{
unity_run_menu();
}
|
# Common test suite configuration.
# Sourced by test case scripts (through lib.sh),
# and mock programs (through lib-init-mock.bash).
set -eEuo pipefail
shopt -s lastpipe
IFS=$'\n'
export LC_COLLATE=C
test_globals_initial=$(comm -13 <(compgen -e | sort) <(compgen -v | sort))
if [[ -n ${ACONFMGR_CURRENT_TEST+x} ]]
then
test_name=$ACONFMGR_CURRENT_TEST
else
test_name=$(basename "$0" .sh)
fi
export ACONFMGR_CURRENT_TEST=$test_name
config_dir=../tmp/test/"$test_name"/config
tmp_dir=../tmp/test/"$test_name"/tmp
test_data_dir=../tmp/test/"$test_name"/testdata
|
<filename>moduliths-events/moduliths-events-jpa/src/main/java/org/moduliths/events/jpa/JpaEventPublicationRegistry.java
/*
* Copyright 2017-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.moduliths.events.jpa;
import lombok.EqualsAndHashCode;
import lombok.NonNull;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.time.Instant;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.moduliths.events.CompletableEventPublication;
import org.moduliths.events.EventPublication;
import org.moduliths.events.EventPublicationRegistry;
import org.moduliths.events.EventSerializer;
import org.moduliths.events.PublicationTargetIdentifier;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.Assert;
/**
* JPA based {@link EventPublicationRegistry}.
*
* @author <NAME>
*/
@Slf4j
@RequiredArgsConstructor
class JpaEventPublicationRegistry implements EventPublicationRegistry, DisposableBean {
private final @NonNull JpaEventPublicationRepository events;
private final @NonNull EventSerializer serializer;
/*
* (non-Javadoc)
* @see org.springframework.events.EventPublicationRegistry#store(java.lang.Object, java.util.Collection)
*/
@Override
public void store(Object event, Stream<PublicationTargetIdentifier> listeners) {
listeners.map(it -> CompletableEventPublication.of(event, it)) //
.map(this::map) //
.forEach(it -> events.save(it));
}
/*
* (non-Javadoc)
* @see org.springframework.events.EventPublicationRegistry#findIncompletePublications()
*/
@Override
public Iterable<EventPublication> findIncompletePublications() {
List<EventPublication> result = events.findByCompletionDateIsNull().stream() //
.map(it -> JpaEventPublicationAdapter.of(it, serializer)) //
.collect(Collectors.toList());
return result;
}
/*
* (non-Javadoc)
* @see org.springframework.events.EventPublicationRegistry#markCompleted(java.lang.Object, org.springframework.events.ListenerId)
*/
@Override
@Transactional(propagation = Propagation.REQUIRES_NEW)
public void markCompleted(Object event, PublicationTargetIdentifier listener) {
Assert.notNull(event, "Domain event must not be null!");
Assert.notNull(listener, "Listener identifier must not be null!");
events.findBySerializedEventAndListenerId(serializer.serialize(event), listener.toString()) //
.map(JpaEventPublicationRegistry::LOGCompleted) //
.ifPresent(it -> events.saveAndFlush(it.markCompleted()));
}
/*
* (non-Javadoc)
* @see org.springframework.beans.factory.DisposableBean#destroy()
*/
@Override
public void destroy() throws Exception {
List<JpaEventPublication> outstandingPublications = events.findByCompletionDateIsNull();
if (outstandingPublications.isEmpty()) {
LOG.info("No publications outstanding!");
return;
}
LOG.info("Shutting down with the following publications left unfinished:");
outstandingPublications
.forEach(it -> LOG.info("\t{} - {} - {}", it.getId(), it.getEventType().getName(), it.getListenerId()));
}
private JpaEventPublication map(EventPublication publication) {
JpaEventPublication result = JpaEventPublication.builder() //
.eventType(publication.getEvent().getClass()) //
.publicationDate(publication.getPublicationDate()) //
.listenerId(publication.getTargetIdentifier().toString()) //
.serializedEvent(serializer.serialize(publication.getEvent()).toString()) //
.build();
LOG.debug("Registering publication of {} with id {} for {}.", //
result.getEventType(), result.getId(), result.getListenerId());
return result;
}
private static JpaEventPublication LOGCompleted(JpaEventPublication publication) {
LOG.debug("Marking publication of event {} with id {} to listener {} completed.", //
publication.getEventType(), publication.getId(), publication.getListenerId());
return publication;
}
@EqualsAndHashCode
@RequiredArgsConstructor(staticName = "of")
static class JpaEventPublicationAdapter implements EventPublication {
private final JpaEventPublication publication;
private final EventSerializer serializer;
/*
* (non-Javadoc)
* @see org.springframework.events.EventPublication#getEvent()
*/
@Override
public Object getEvent() {
return serializer.deserialize(publication.getSerializedEvent(), publication.getEventType());
}
/*
* (non-Javadoc)
* @see org.springframework.events.EventPublication#getListenerId()
*/
@Override
public PublicationTargetIdentifier getTargetIdentifier() {
return PublicationTargetIdentifier.of(publication.getListenerId());
}
/*
* (non-Javadoc)
* @see org.springframework.events.EventPublication#getPublicationDate()
*/
@Override
public Instant getPublicationDate() {
return publication.getPublicationDate();
}
}
}
|
/*
* Copyright (C) 2013 salesforce.com, inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Construct a new HtmlComponent.
*
* @public
* @class
* @constructor
*
* @param {Object}
* config - component configuration
* @param {Boolean}
* [localCreation] - local creation
* @export
*/
function HtmlComponent(config, localCreation) {
var context = $A.getContext();
// setup some basic things
this.concreteComponentId = config["concreteComponentId"];
this.containerComponentId = config["containerComponentId"];
this.shouldAutoDestroy=true;
this.rendered = false;
this.inUnrender = false;
this.localId = config["localId"];
this.valueProviders = {};
//this.eventValueProvider = undefined;
this.docLevelHandlers = undefined;
this.references={};
this.handlers = {};
this.localIndex = {};
this.destroyed=0;
this.version = config["version"];
this.owner = $A.clientService.getCurrentAccessGlobalId();
this.name='';
// allows components to skip creation path checks if it's doing something weird
// such as wrapping server created components in client created one
var act = config["skipCreationPath"] ? null : context.getCurrentAction();
var forcedPath = false;
if (act) {
var currentPath = act.topPath();
if (config["creationPath"]) {
//
// This is a server side config, so we need to sync ourselves with it.
// The use case here is that the caller has gotten a returned array of
// components, and is instantiating them independently. We can warn the
// user when they do the wrong thing, but we'd actually like it to work
// for most cases.
//
this.creationPath = act.forceCreationPath(config["creationPath"]);
forcedPath = true;
} else if (!context.containsComponentConfig(currentPath) && !!localCreation) {
// skip creation path if the current top path is not in server returned
// componentConfigs and localCreation
this.creationPath = "client created";
} else {
this.creationPath = act.getCurrentPath();
}
//$A.log("l: [" + this.creationPath + "]");
}
// create the globally unique id for this component
this.setupGlobalId(config["globalId"], localCreation);
var partialConfig;
if (this.creationPath && this.creationPath !== "client created") {
partialConfig = context.getComponentConfig(this.creationPath);
// Done with it in the context, it's now safe to remove so we don't process it again later.
context.removeComponentConfig(this.creationPath);
}
if (partialConfig) {
this.validatePartialConfig(config,partialConfig);
this.partialConfig = partialConfig;
}
// get server rendering if there was one
if (config["rendering"]) {
this.rendering = config["rendering"];
} else if (partialConfig && partialConfig["rendering"]) {
this.rendering = this.partialConfig["rendering"];
}
// add this component to the global index
$A.componentService.indexComponent(this);
// sets this components definition, preferring partialconfig if it exists
this.setupComponentDef(this.partialConfig || config);
// Saves a flag to indicate whether the component implements the root marker interface.
this.isRootComponent = true;
// join attributes from partial config and config, preferring partial when overlapping
var configAttributes = { "values": {} };
if (config["attributes"]) {
//$A.util.apply(configAttributes["values"], config["attributes"]["values"], true);
for(var key in config["attributes"]["values"]) {
configAttributes["values"][key] = config["attributes"]["values"][key];
}
configAttributes["valueProvider"] = config["attributes"]["valueProvider"] || config["valueProvider"];
}
if (partialConfig && partialConfig["attributes"]) {
$A.util.apply(configAttributes["values"], partialConfig["attributes"]["values"], true);
// NOTE: IT USED TO BE SOME LOGIC HERE TO OVERRIDE THE VALUE PROVIDER BECAUSE OF PARTIAL CONFIGS
// IF WE RUN INTO ISSUES AT SOME POINT AFTER HALO, LOOK HERE FIRST!
}
if (!configAttributes["facetValueProvider"]) {
configAttributes["facetValueProvider"] = this;
}
//JBUCH: HALO: FIXME: THIS IS A DIRTY FILTHY HACK AND I HAVE BROUGHT SHAME ON MY FAMILY
this.attributeValueProvider = configAttributes["valueProvider"];
this.facetValueProvider = configAttributes["facetValueProvider"];
// create all value providers for this component m/v/c etc.
this.setupValueProviders(config["valueProviders"]);
// initialize attributes
this.setupAttributes(configAttributes);
// index this component with its value provider (if it has a localid)
this.doIndex(this);
// setup flavors
this.setupFlavors(config, configAttributes);
// clean up refs to partial config
this.partialConfig = undefined;
if (forcedPath && act && this.creationPath) {
act.releaseCreationPath(this.creationPath);
}
var tag = this.attributeSet.getValue("tag");
if (!$A.util.isUndefinedOrNull(tag)) {
this.componentDef.getHelper().validateTagName(tag);
}
}
HtmlComponent.prototype = Object.create(Component.prototype);
// Not returning anything in these since empty functions will not be called by the JS engine as an
/** The SuperRender calls are blank since we will never have a super, no need to ever do any logic to for them. */
HtmlComponent.prototype.superRender = function(){};
HtmlComponent.prototype.superAfterRender = function(){};
HtmlComponent.prototype.superRerender = function(){};
HtmlComponent.prototype.superUnrender = function(){};
/** No Super, so just return undefined */
HtmlComponent.prototype.getSuper = function(){};
/** Will always be Superest, so no need to check for a super */
HtmlComponent.prototype.getSuperest = function(){ return this; };
/**
* Component.js has logic that is specific to HtmlComponent. Great! So we can move that into here and out of Component.js
* That logic is the LockerService part to assign trust to the owner.
*/
HtmlComponent.prototype.setupComponentDef = function() {
// HtmlComponent optimization, go straight to an internal API for the component def
this.componentDef = $A.componentService.getComponentDef({"descriptor":"markup://aura:html"});
// propagating locker key when possible
$A.lockerService.trust(this.componentDef, this);
// aura:html is syntactic sugar for document.createElement() and the resulting elements need to be directly visible to the container
// otherwise no code would be able to manipulate them
var owner = this.getOwner();
var ownerName = owner.getType();
while (ownerName === "aura:iteration" || ownerName === "aura:if") {
owner = owner.getOwner();
ownerName = owner.getType();
}
$A.lockerService.trust(owner, this);
};
/**
* Not all the value providers are necessary for HTML. We don't need the component events (e.) or controller (c.)
*/
HtmlComponent.prototype.setupValueProviders = function(customValueProviders) {
var vp=this.valueProviders;
vp["v"]=this.attributeSet = new AttributeSet(this.componentDef.attributeDefs);
vp["this"]=this;
vp["globalid"]=this.globalId;
vp["def"]=this.componentDef;
vp["null"]=null;
vp["version"] = this.version ? this.version : this.getVersionInternal();
if(customValueProviders) {
for (var key in customValueProviders) {
this.addValueProvider(key,customValueProviders[key]);
}
}
};
/**
* Simple type checking. All simple components implement aura:rootComponent and cannot be extended,
* so the simple condition here is sufficient unless any of the individual components change.
*/
HtmlComponent.prototype.isInstanceOf = function(type) {
return type === "aura:html" || type === "aura:rootComponent";
};
/**
* Copied from htmlRenderer.js
* Now has access to internal APIs and advanced mode compilation.
*/
HtmlComponent.prototype["renderer"] = {
"render" : function(component, helper) {
var tag = component.attributeSet.getValue("tag");
if ($A.util.isUndefinedOrNull(tag)) {
throw new Error("Undefined tag attribute for " + component.getGlobalId());
}
helper.validateTagName(tag);
var HTMLAttributes = component.attributeSet.getValue("HTMLAttributes");
var element = document.createElement(tag);
for ( var attribute in HTMLAttributes) {
helper.createHtmlAttribute(component, element, attribute, HTMLAttributes[attribute]);
}
$A.util.setDataAttribute(element, $A.componentService.renderedBy, component.globalId);
helper.processJavascriptHref(element);
if (helper.canHaveBody(component)) {
var body=component.attributeSet.getBody(component.globalId);
$A.renderingService.renderFacet(component,body,element);
}
// aura:html is syntactic sugar for document.createElement() and the resulting elements need to be directly visible to the container
// otherwise no code would be able to manipulate them
var owner = component.getOwner();
var ownerName = owner.getType();
// TODO: Manually checking for aura:iteration or aura:if is a hack. Ideally, getOwner() or another API would
// always return the element we need to key against.
while (ownerName === "aura:iteration" || ownerName === "aura:if") {
owner = owner.getOwner();
ownerName = owner.getType();
}
$A.lockerService.trust(owner, element);
return element;
},
"rerender" : function(component, helper) {
var element = component.getElement();
if (!element) {
return;
}
var skipMap = {
"height" : true,
"width" : true,
"class" : true
};
var HTMLAttributes = component.attributeSet.getValue("HTMLAttributes");
if (HTMLAttributes) {
for (var name in HTMLAttributes) {
var lowerName = name.toLowerCase();
if (skipMap[lowerName] || lowerName.indexOf("on") === 0) {
continue;
}
var value = HTMLAttributes[name];
if ($A.util.isExpression(value)) {
value = value.evaluate();
}
if (helper.SPECIAL_BOOLEANS.hasOwnProperty(lowerName)) {
value = $A.util.getBooleanValue(value);
}
var oldValue = element[helper.caseAttribute(lowerName)];
if (value !== oldValue) {
helper.createHtmlAttribute(component, element, lowerName, value);
if($A.util.isExpression(oldValue)){
oldValue.removeChangeHandler(component,"HTMLAttributes."+name);
}
}
}
var className = HTMLAttributes["class"];
if ($A.util.isExpression(className)) {
className = className.evaluate();
}
if($A.util.isUndefinedOrNull(className)){
className='';
}
if (!$A.util.isUndefinedOrNull(element.getAttribute("data-aura-class"))) {
className += (" " + element.getAttribute("data-aura-class"));
}
if (element["className"] !== className) {
element["className"] = className;
}
}
helper.processJavascriptHref(element);
if (helper.canHaveBody(component)) {
$A.renderingService.rerenderFacet(component,component.attributeSet.getBody(component.globalId),element);
}
},
"afterRender" : function(component, helper) {
if (helper.canHaveBody(component)) {
$A.afterRender(component.attributeSet.getBody(component.globalId));
}
},
"unrender" : function(component, helper) {
var HTMLAttributes = component.attributeSet.getValue("HTMLAttributes");
for ( var attribute in HTMLAttributes) {
helper.destroyHtmlAttribute(component, attribute, HTMLAttributes[attribute]);
}
// Even if we don't have body we need to deattach the elements from the component itself
$A.renderingService.unrenderFacet(component, component.attributeSet.getBody(component.globalId));
}
};
/** Copied unchanged from htmlHelper.js */
HtmlComponent.prototype["helper"] = {
SPECIAL_BOOLEANS: {
"checked": true,
"selected": true,
"disabled": true,
"readonly": true,
"multiple": true,
"ismap": true,
"defer": true,
"declare": true,
"noresize": true,
"nowrap": true,
"noshade": true,
"compact": true,
"autocomplete": true,
"required": true
},
SPECIAL_CASINGS: {
"readonly": "readOnly",
"colspan": "colSpan",
"rowspan": "rowSpan",
"bgcolor": "bgColor",
"tabindex": "tabIndex",
"usemap": "useMap",
"accesskey": "accessKey",
"maxlength": "maxLength",
"for": "htmlFor",
"class": "className",
"frameborder": "frameBorder"
},
// "void elements" as per http://dev.w3.org/html5/markup/syntax.html#syntax-elements
BODYLESS_TAGS: {
"area": true,
"base": true,
"br": true,
"col": true,
"command": true,
"embed": true,
"hr": true,
"img": true,
"input": true,
"keygen": true,
"link": true,
"meta": true,
"param": true,
"source": true,
"track": true,
"wbr": true
},
// List must be kept in sync with org.auraframework.def.HtmlTag enum
ALLOWED_TAGS:{
"a":true,
"abbr":true,
"acronym":true,
"address":true,
"area":true,
"article":true,
"aside":true,
"audio":true,
"b":true,
"bdi":true,
"bdo":true,
"big":true,
"blockquote":true,
"body":true,
"br":true,
"button":true,
"caption":true,
"canvas":true,
"center":true,
"cite":true,
"code":true,
"col":true,
"colgroup":true,
"command":true,
"datalist":true,
"dd":true,
"del":true,
"details":true,
"dfn":true,
"dir":true,
"div":true,
"dl":true,
"dt":true,
"em":true,
"fieldset":true,
"figure":true,
"figcaption":true,
"footer":true,
"form":true,
"h1":true,
"h2":true,
"h3":true,
"h4":true,
"h5":true,
"h6":true,
"head":true,
"header":true,
"hgroup":true,
"hr":true,
"html":true,
"i":true,
"iframe":true,
"img":true,
"input":true,
"ins":true,
"keygen":true,
"kbd":true,
"label":true,
"legend":true,
"li":true,
"link":true,
"map":true,
"mark":true,
"menu":true,
"meta":true,
"meter":true,
"nav":true,
"ol":true,
"optgroup":true,
"option":true,
"output":true,
"p":true,
"pre":true,
"progress":true,
"q":true,
"rp":true,
"rt":true,
"ruby":true,
"s":true,
"samp":true,
"script":true,
"section":true,
"select":true,
"small":true,
"source":true,
"span":true,
"strike":true,
"strong":true,
"style":true,
"sub":true,
"summary":true,
"sup":true,
"table":true,
"tbody":true,
"td":true,
"textarea":true,
"tfoot":true,
"th":true,
"thead":true,
"time":true,
"title":true,
"tr":true,
"track":true,
"tt":true,
"u":true,
"ul":true,
"var":true,
"video":true,
"wbr":true
},
// string constants used to save and remove click handlers
NAMES: {
"domHandler": "fcDomHandler",
"hashHandler": "fcHashHandler"
},
validateTagName: function(tagName) {
if (!this.ALLOWED_TAGS.hasOwnProperty(tagName) && !this.ALLOWED_TAGS.hasOwnProperty(tagName.toLowerCase())){
throw new Error("The HTML tag '" + tagName + "' is not allowed.");
}
},
caseAttribute: function (attribute) {
return this.SPECIAL_CASINGS[attribute] || attribute;
},
/**
* Adds or replaces existing "onclick" handler for the given handlerName.
*
* Is used to add independent handlers eg. dom level and hash navigation handling on <a href/>
*/
addNamedClickHandler: function (element, handler, handlerName) {
var previousHandler = element[handlerName];
if ($A.util.isFunction(previousHandler)) {
$A.util.removeOn(element, "click", previousHandler);
}
$A.util.on(element, "click", handler);
element[handlerName] = handler;
return previousHandler;
},
domEventHandler: function (event) {
var eventName = "on" + event.type,
element = event.currentTarget,
ownerComponent = $A.componentService.getRenderingComponentForElement(element);
// cmp might be destroyed, just ignore this event.
if (!ownerComponent) {
return;
}
var htmlAttributes = ownerComponent.get("v.HTMLAttributes"),
valueExpression = htmlAttributes[eventName],
onclickExpression;
if (eventName === 'ontouchend' || eventName === 'onpointerup' || eventName === 'onMSPointerUp') {
// Validate that either onclick or ontouchend is wired up to an action never both simultaneously
onclickExpression = htmlAttributes["onclick"];
if (!$A.util.isEmpty(onclickExpression)) {
if ($A.util.isEmpty(valueExpression)) {
// Map from touch event to onclick
valueExpression = onclickExpression;
}
}
}
if ($A.util.isExpression(valueExpression)) {
var action = valueExpression.evaluate();
// This can resolve to null if you have an expression pointing to an attribute which could be an Action
if(action) {
this.dispatchAction(action, event, ownerComponent);
}
}
},
// NOTE: Do not remove attributes from this method
// Used by MetricsService plugin to collect information
dispatchAction: function (action, event) {
$A.run(function() {
action.runDeprecated(event);
});
},
canHaveBody: function (component) {
var tag = component.attributeSet.getValue("tag");
if ($A.util.isUndefinedOrNull(tag)) {
throw new Error("Undefined tag attribute for " + component.getGlobalId());
}
return !this.BODYLESS_TAGS[tag.toLowerCase()];
},
createHtmlAttribute: function (component, element, name, attribute) {
var value;
var lowerName = name.toLowerCase();
// special handling if the attribute is an inline event handler
if (lowerName.indexOf("on") === 0) {
var eventName = lowerName.substring(2);
if (eventName === "click") {
this.addNamedClickHandler(element, $A.getCallback(this.domEventHandler.bind(this)), this.NAMES.domHandler);
} else {
$A.util.on(element, eventName, $A.getCallback(this.domEventHandler.bind(this)));
}
} else {
var isSpecialBoolean = this.SPECIAL_BOOLEANS.hasOwnProperty(lowerName);
if ($A.util.isExpression(attribute)) {
attribute.addChangeHandler(component, "HTMLAttributes." + name);
value = attribute.evaluate();
} else {
value = attribute;
}
if (isSpecialBoolean) {
value = $A.util.getBooleanValue(value);
}
var isString = $A.util.isString(value);
if (isString && value.indexOf("/auraFW") === 0) {
// prepend any Aura resource urls with servlet context path
value = $A.getContext().getContextPath() + value;
}
if (lowerName === "href" && element.tagName === "A" && value && $A.util.supportsTouchEvents()) {
var HTMLAttributes = component.attributeSet.getValue("HTMLAttributes");
var target = HTMLAttributes["target"];
if ($A.util.isExpression(target)) {
target = target.evaluate();
}
this.addNamedClickHandler(element, function () {
if (isString && value.indexOf("#") === 0) {
$A.run(function () {
$A.historyService.set(value.substring(1));
});
}
}, this.NAMES.hashHandler);
if(target){
element.setAttribute("target", target);
}
element.setAttribute("href", value);
} else if (!$A.util.isUndefinedOrNull(value) && (lowerName === "role" || lowerName.lastIndexOf("aria-", 0) === 0)) {
// use setAttribute to render accessibility attributes to markup
// do not set the property on the HTMLElement if value is null or undefined to avoid accessibility confusion.
element.setAttribute(name, value);
} else if (isSpecialBoolean) {
// handle the boolean attributes for whom presence implies truth
var casedName = this.caseAttribute(lowerName);
if (value === false) {
element.removeAttribute(casedName);
// Support for IE's weird handling of checked (unchecking case):
if (casedName === "checked") {
element.removeAttribute("defaultChecked");
}
} else {
element.setAttribute(casedName, name);
// Support for IE's weird handling of checked (checking case):
if (casedName === "checked") {
element.setAttribute("defaultChecked", true);
}
}
// We still need to make sure that the property is set on the HTMLElement, because it is used for
// change detection:
if($A.util.isUndefinedOrNull(value)){
value='';
}
element[casedName] = value;
} else {
// KRIS: HALO:
// If in older IE's you set the type attribute to a value that the browser doesn't support
// you'll get an exception.
// Also, you can't change the type after the element has been added to the DOM.
// Honestly, I can't see how this wasn't blowing up Pre-halo
if ($A.util.isIE && element.tagName === "INPUT" && lowerName === "type") {
try {
element.setAttribute("type", value);
} catch (e) {
return undefined;
}
}
// as long as we have a valid value at this point, set
// it as an attribute on the DOM node
// IE renders null value as string "null" for input (text)
// element, we have to work around that.
else if (!$A.util.isUndefined(value) && !($A.util.isIE && element.tagName === "INPUT" && lowerName === "value" && value === null)) {
var casedAttribute = this.caseAttribute(lowerName);
lowerName = name.toLowerCase();
if (lowerName === "style" && $A.util.isIE) {
element.style.cssText = value;
} else if (lowerName === "type" || lowerName === "href" || lowerName === "style" || lowerName.indexOf("data-") === 0) {
// special case we have to use "setAttribute"
element.setAttribute(casedAttribute, value);
} else if (lowerName === "srcdoc" && element.tagName === "IFRAME" && !$A.util.isUndefinedOrNull(value)) {
var message;
// Check if srcdoc is allowed. This may change as new defs are sent down.
if (!$A.get("$Global")["srcdoc"]) {
message = "The '" + name + "' attribute is not supported, and will not be set for " + element + " in " + component;
$A.warning(message);
} else {
message = "The '" + name + "' attribute has been set for " + element + " in " + component;
element[casedAttribute] = value;
}
// Track any usages for eventual deprecation
$A.logger.reportError(new $A.auraError(message), null, "WARNING");
} else if (lowerName === "rel" && value && value.toLowerCase && value.toLowerCase() === "import" && element.tagName === "LINK") {
$A.warning("The '" + name + "' attribute is not supported, and will not be set for " + element + " in " + component);
} else {
if ($A.util.isUndefinedOrNull(value)) {
value = '';
}
element[casedAttribute] = value;
}
}
// W-2872594, IE11 input text set('v.value', null) would not clear up the field.
else if ($A.util.isIE && element.tagName === "INPUT" && lowerName === "value" && value === null) {
element.value = '';
}
}
}
},
destroyHtmlAttribute: function (component, name, attribute) {
if ($A.util.isExpression(attribute)) {
attribute.removeChangeHandler(component, "HTMLAttributes." + name);
}
},
processJavascriptHref: function (element) {
if (element.tagName === "A") {
var href = element.getAttribute("href");
if (!href) {
/*eslint-disable no-script-url*/
element.setAttribute("href", "javascript:void(0);");
}
element.addEventListener("click", this.inlineJavasciptCSPViolationPreventer);
}
},
inlineJavasciptCSPViolationPreventer: function(event) {
// Check for javascript: inline javascript
/*eslint-disable no-script-url*/
var hrefTarget = this.href;
if (hrefTarget && /^\s*javascript:\s*void\((\s*|0|null|'.*')\)/.test(hrefTarget.toLowerCase())) {
event.preventDefault();
}
}
};
Aura.Component.HtmlComponent = HtmlComponent;
|
<reponame>jbwyme/action-destinations
import type { Settings } from './generated-types'
import type { BrowserDestinationDefinition } from '../../lib/browser-destinations'
import { browserDestination } from '../../runtime/shim'
import appboy from '@braze/web-sdk'
import trackEvent from './trackEvent'
import updateUserProfile from './updateUserProfile'
import trackPurchase from './trackPurchase'
import debounce, { resetUserCache } from './debounce'
import { defaultValues, DestinationDefinition } from '@segment/actions-core'
declare global {
interface Window {
appboy: typeof appboy
}
}
const presets: DestinationDefinition['presets'] = [
{
name: 'Identify Calls',
subscribe: 'type = "identify" or type = "group"',
partnerAction: 'updateUserProfile',
mapping: defaultValues(updateUserProfile.fields)
},
{
name: 'Order Completed calls',
subscribe: 'type = "track" and event = "Order Completed"',
partnerAction: 'trackPurchase',
mapping: defaultValues(trackPurchase.fields)
},
{
name: 'Track Calls',
subscribe: 'type = "track" and event != "Order Completed"',
partnerAction: 'trackEvent',
mapping: {
...defaultValues(trackEvent.fields),
eventName: {
'@path': '$.event'
},
eventProperties: {
'@path': '$.properties'
}
}
}
]
export const destination: BrowserDestinationDefinition<Settings, typeof appboy> = {
name: 'Braze Web Mode',
slug: 'actions-braze-web',
mode: 'device',
settings: {
sdkVersion: {
description: 'The version of the SDK to use. Defaults to 3.3.',
label: 'SDK Version',
type: 'string',
choices: [
{
value: '3.3',
label: '3.3'
}
],
default: '3.3',
required: true
},
api_key: {
description: 'Found in the Braze Dashboard under Settings → Manage Settings → Apps → Web',
label: 'API Key',
type: 'password',
required: true
},
endpoint: {
label: 'SDK Endpoint',
description:
'Your Braze SDK endpoint. [See more details](https://www.braze.com/docs/user_guide/administrative/access_braze/sdk_endpoints/)',
type: 'string',
format: 'uri',
choices: [
{ label: 'US-01 (https://dashboard-01.braze.com)', value: 'sdk.iad-01.braze.com' },
{ label: 'US-02 (https://dashboard-02.braze.com)', value: 'sdk.iad-02.braze.com' },
{ label: 'US-03 (https://dashboard-03.braze.com)', value: 'sdk.iad-03.braze.com' },
{ label: 'US-04 (https://dashboard-04.braze.com)', value: 'sdk.iad-04.braze.com' },
{ label: 'US-05 (https://dashboard-05.braze.com)', value: 'sdk.iad-05.braze.com' },
{ label: 'US-06 (https://dashboard-06.braze.com)', value: 'sdk.iad-06.braze.com' },
{ label: 'US-08 (https://dashboard-08.braze.com)', value: 'sdk.iad-08.braze.com' },
{ label: 'EU-01 (https://dashboard-01.braze.eu)', value: 'sdk.fra-01.braze.eu' }
],
default: 'sdk.iad-01.braze.com',
required: true
},
allowCrawlerActivity: {
description:
'Allow Braze to log activity from crawlers. [See more details](https://js.appboycdn.com/web-sdk/latest/doc/modules/appboy.html#initializationoptions)',
label: 'Allow Crawler Activity',
default: false,
type: 'boolean',
required: false
},
allowUserSuppliedJavascript: {
description:
'To indicate that you trust the Braze dashboard users to write non-malicious Javascript click actions, set this property to true. If enableHtmlInAppMessages is true, this option will also be set to true. [See more details](https://js.appboycdn.com/web-sdk/latest/doc/modules/appboy.html#initializationoptions)',
label: 'Allow User Supplied Javascript',
default: false,
type: 'boolean',
required: false
},
appVersion: {
description:
'Version to which user events sent to Braze will be associated with. [See more details](https://js.appboycdn.com/web-sdk/latest/doc/modules/appboy.html#initializationoptions)',
label: 'App Version',
type: 'string',
required: false
},
contentSecurityNonce: {
description:
'Allows Braze to add the nonce to any <script> and <style> elements created by the SDK. [See more details](https://js.appboycdn.com/web-sdk/latest/doc/modules/appboy.html#initializationoptions)',
label: 'Content Security nonce',
type: 'string',
required: false
},
devicePropertyAllowlist: {
label: 'Device Property Allow List',
description:
'By default, the Braze SDK automatically detects and collects all device properties in DeviceProperties. To override this behavior, provide an array of DeviceProperties. [See more details](https://js.appboycdn.com/web-sdk/latest/doc/modules/appboy.html#initializationoptions)',
type: 'string',
required: false,
multiple: true
},
disablePushTokenMaintenance: {
label: 'Disable Push Token Maintenance',
type: 'boolean',
default: true,
required: false,
description:
'By default, users who have already granted web push permission will sync their push token with the Braze backend automatically on new session to ensure deliverability. To disable this behavior, set this option to false'
},
doNotLoadFontAwesome: {
label: 'Do Not Load Font Awesome',
type: 'boolean',
default: false,
description:
'Braze automatically loads FontAwesome 4.7.0 from the FontAwesome CDN. To disable this behavior set this option to true.'
},
enableLogging: {
label: 'Enable Logging',
required: false,
default: false,
type: 'boolean',
description: 'Set to true to enable logging by default'
},
enableSdkAuthentication: {
label: 'Enable SDK Authentication',
type: 'boolean',
required: false,
default: false,
description: 'Set to true to enable the SDK Authentication feature.'
},
inAppMessageZIndex: {
label: 'In-App Message Z Index',
type: 'number',
required: false,
description:
"By default, the Braze SDK will show In-App Messages with a z-index of 1040 for the screen overlay, 1050 for the actual in-app message, and 1060 for the message's close button. Provide a value for this option to override these default z-indexes."
},
localization: {
label: 'Localization',
type: 'string',
default: 'en',
required: false,
description:
"By default, any SDK-generated user-visible messages will be displayed in the user's browser language. Provide a value for this option to override that behavior and force a specific language. The value for this option should be a ISO 639-1 Language Code."
},
manageServiceWorkerExternally: {
label: 'Manage Service Worker Externally',
type: 'boolean',
default: false,
required: false,
description:
'If you have your own service worker that you register and control the lifecycle of, set this option to true and the Braze SDK will not register or unregister a service worker. [See more details](https://js.appboycdn.com/web-sdk/latest/doc/modules/appboy.html#initializationoptions)'
},
minimumIntervalBetweenTriggerActionsInSeconds: {
label: 'Minimum Interval Between Trigger Actions in Seconds',
type: 'number',
required: false,
default: 30,
description:
'Provide a value to override the default interval between trigger actions with a value of your own. [See more details](https://js.appboycdn.com/web-sdk/latest/doc/modules/appboy.html#initializationoptions)'
},
noCookies: {
label: 'No Cookies',
type: 'boolean',
default: false,
required: false,
description:
'By default, the Braze SDK will store small amounts of data (user ids, session ids), in cookies. Pass true for this option to disable cookie storage and rely entirely on HTML 5 localStorage to identify users and sessions. [See more details](https://js.appboycdn.com/web-sdk/latest/doc/modules/appboy.html#initializationoptions)'
},
openCardsInNewTab: {
label: 'Open Cards In New Tab',
type: 'boolean',
default: false,
required: false,
description:
'By default, links from Card objects load in the current tab or window. Set this option to true to make links from cards open in a new tab or window.'
},
openInAppMessagesInNewTab: {
label: 'Open In-App Messages In New Tab',
type: 'boolean',
default: false,
required: false,
description:
'By default, links from in-app message clicks load in the current tab or a new tab as specified in the dashboard on a message-by-message basis. Set this option to true to force all links from in-app message clicks open in a new tab or window.'
},
requireExplicitInAppMessageDismissal: {
label: 'Require Explicit In-App Message Dismissal',
type: 'boolean',
required: false,
default: false,
description:
'By default, when an in-app message is showing, pressing the escape button or a click on the greyed-out background of the page will dismiss the message. Set this option to true to prevent this behavior and require an explicit button click to dismiss messages.'
},
safariWebsitePushId: {
label: 'Safari Website Push ID',
type: 'string',
required: false,
description:
'If you support Safari push, you must specify this option with the website push ID that you provided to Apple when creating your Safari push certificate (starts with "web", e.g. "web.com.example.domain").'
},
serviceWorkerLocation: {
label: 'Service Worker Location',
type: 'string',
required: false,
description:
'By default, when registering users for web push notifications Braze will look for the required service worker file in the root directory of your web server at /service-worker.js. If you want to host your service worker at a different path on that server, provide a value for this option that is the absolute path to the file, e.g. /mycustompath/my-worker.js. VERY IMPORTANT: setting a value here limits the scope of push notifications on your site. For instance, in the above example, because the service ,worker file is located within the /mycustompath/ directory, appboy.registerAppboyPushMessages MAY ONLY BE CALLED from web pages that start with http://yoursite.com/mycustompath/.'
},
sessionTimeoutInSeconds: {
label: 'Session Timeout in Seconds',
type: 'number',
default: 1800, // 30 minutes
required: false,
description:
'By default, sessions time out after 30 minutes of inactivity. Provide a value for this configuration option to override that default with a value of your own.'
}
},
initialize: async ({ settings }, dependencies) => {
const { endpoint, api_key, ...expectedConfig } = settings
const sdkVersion = settings.sdkVersion?.length ? settings.sdkVersion : '3.3'
await dependencies.loadScript(`https://js.appboycdn.com/web-sdk/${sdkVersion}/service-worker.js`)
const initialized = appboy.initialize(settings.api_key, { baseUrl: endpoint, ...expectedConfig })
if (!initialized) {
throw new Error('Failed to initialize AppBoy')
}
appboy.openSession()
resetUserCache()
return appboy
},
presets,
actions: {
updateUserProfile,
trackEvent,
trackPurchase,
debounce
}
}
export default browserDestination(destination)
|
<filename>node_modules/googleapis/build/src/apis/testing/index.d.ts
/*! THIS FILE IS AUTO-GENERATED */
import { AuthPlus } from 'googleapis-common';
import { testing_v1 } from './v1';
export declare const VERSIONS: {
'v1': typeof testing_v1.Testing;
};
export declare function testing(version: 'v1'): testing_v1.Testing;
export declare function testing(options: testing_v1.Options): testing_v1.Testing;
declare const auth: AuthPlus;
export { auth };
|
<filename>demo/PictureWidget/index.js<gh_stars>1-10
import './picture-widget.scss';
import PictureWidget from './PictureWidget';
export { PictureWidget };
|
SELECT c.name AS 'Customer Name', o.total AS 'Total Order Value'
FROM customers c
INNER JOIN orders o
ON c.id = o.customer_id
GROUP BY c.id
HAVING SUM(o.total) > amount; |
import server from './index'
import 'jest'
describe('server', () => {
it('should be server', async () => {
const res = await server
expect(res).toBeDefined()
return res.stop()
})
})
|
/*
* Copyright 2020 GridGain Systems, Inc. and Contributors.
*
* Licensed under the GridGain Community Edition License (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.gridgain.com/products/software/community-edition/gridgain-community-edition-license
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.query;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.binary.BinaryObjectBuilder;
import org.apache.ignite.cache.QueryEntity;
import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
import org.apache.ignite.cache.query.FieldsQueryCursor;
import org.apache.ignite.cache.query.SqlFieldsQuery;
import org.apache.ignite.cache.query.annotations.QuerySqlField;
import org.apache.ignite.cluster.ClusterState;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.DataRegionConfiguration;
import org.apache.ignite.configuration.DataStorageConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.failure.StopNodeFailureHandler;
import org.apache.ignite.internal.processors.cache.CachePartialUpdateCheckedException;
import org.apache.ignite.internal.processors.cache.index.AbstractIndexingCommonTest;
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.testframework.GridTestUtils;
import org.junit.Test;
/**
* Checks add field with invalid data type to index.
*/
public class CreateIndexOnInvalidDataTypeTest extends AbstractIndexingCommonTest {
/** Keys count. */
private static final int KEY_CNT = 10;
/** {@inheritDoc} */
@Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
return super.getConfiguration(igniteInstanceName)
.setFailureHandler(new StopNodeFailureHandler())
.setDataStorageConfiguration(
new DataStorageConfiguration()
.setDefaultDataRegionConfiguration(
new DataRegionConfiguration()
.setPersistenceEnabled(true)
)
);
}
/** {@inheritDoc} */
@Override protected void beforeTest() throws Exception {
super.beforeTest();
cleanPersistenceDir();
}
/** {@inheritDoc} */
@Override protected void afterTest() throws Exception {
stopAllGrids();
super.afterTest();
}
/**
* Check case when index is created on the field with invalid data type.
* Test steps:
* - create cache with query entity describes a table;
* - fill data (real data contains the fields that was not described by query entity);
* - execute alter table (ADD COLUMN with invalid type for exists field);
* - try to create index for the new field - exception must be throw;
* - checks that index isn't created.
*/
@Test
public void testCreateIndexOnInvalidData() throws Exception {
startGrid();
grid().cluster().state(ClusterState.ACTIVE);
IgniteCache<Integer, Value> c = grid().createCache(
new CacheConfiguration<Integer, Value>()
.setName("test")
.setSqlSchema("PUBLIC")
.setQueryEntities(
Collections.singleton(
new QueryEntity(Integer.class, Value.class)
.setTableName("TEST")
)
)
.setBackups(1)
.setAffinity(new RendezvousAffinityFunction(false, 10))
);
for (int i = 0; i < KEY_CNT; ++i)
c.put(i, new Value(i));
sql("ALTER TABLE TEST ADD COLUMN (VAL_DATE DATE)");
sql("CREATE INDEX TEST_VAL_INT_IDX ON TEST(VAL_INT)");
GridTestUtils.assertThrowsAnyCause(log, () -> {
sql("CREATE INDEX TEST_VAL_DATE_IDX ON TEST(VAL_DATE)");
return null;
},
IgniteSQLException.class, "java.util.Date cannot be cast to java.sql.Date");
// Wait for node stop if it is initiated by FailureHandler
U.sleep(1000);
List<List<?>> res = sql("SELECT val_int FROM TEST where val_int > -1").getAll();
assertEquals(KEY_CNT, res.size());
GridTestUtils.assertThrowsAnyCause(log, () -> {
sql("DROP INDEX TEST_VAL_DATE_IDX");
return null;
},
IgniteSQLException.class, "Index doesn't exist: TEST_VAL_DATE_IDX");
}
/**
* Check case when row with invalid field is added.
* Test steps:
* - create table;
* - create two index;
* - try add entry - exception must be thrown;
* - remove the index for field with invalid type;
* - check that select query that uses the index for valid field is successful.
*/
@Test
public void testAddInvalidDataToIndex() throws Exception {
startGrid();
grid().cluster().state(ClusterState.ACTIVE);
sql("CREATE TABLE TEST (ID INT PRIMARY KEY, VAL_INT INT, VAL_DATE DATE) " +
"WITH \"CACHE_NAME=test,VALUE_TYPE=ValueType0\"");
sql("CREATE INDEX TEST_0_VAL_DATE_IDX ON TEST(VAL_DATE)");
sql("CREATE INDEX TEST_1_VAL_INT_IDX ON TEST(VAL_INT)");
BinaryObjectBuilder bob = grid().binary().builder("ValueType0");
bob.setField("VAL_INT", 10);
bob.setField("VAL_DATE", new java.util.Date());
GridTestUtils.assertThrowsAnyCause(log, () -> {
grid().cache("test").put(0, bob.build());
return null;
},
CachePartialUpdateCheckedException.class, "Failed to update keys");
// Wait for node stop if it is initiated by FailureHandler
U.sleep(1000);
sql("DROP INDEX TEST_0_VAL_DATE_IDX");
// Check successful insert after index is dropped.
grid().cache("test").put(1, bob.build());
List<List<?>> res = sql("SELECT VAL_INT FROM TEST WHERE VAL_INT > 0").getAll();
assertEquals(2, res.size());
}
/**
* @param sql SQL query.
* @param args Query parameters.
* @return Results cursor.
*/
private FieldsQueryCursor<List<?>> sql(String sql, Object... args) {
return grid().context().query().querySqlFields(new SqlFieldsQuery(sql)
.setLazy(true)
.setArgs(args), false);
}
/**
*
*/
private static class Value {
/** */
@QuerySqlField
int val_int;
/** */
java.util.Date val_date;
/**
* @param val Test value.
*/
public Value(int val) {
this.val_int = val;
val_date = new Date(val);
}
}
}
|
<reponame>LeticiaISilveira/python-boilerplate
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import six
from .compat import unicode
def yn_input(text, yes='yes', no='no', default='yes'):
"""
Asks a yes/no question and return the answer.
"""
suffix = ' [%s/%s] ' % (yes[0].upper(), no[0])
while True:
ans = grab_input(text + suffix).lower()
if ans in (yes, no):
return ans
elif not ans:
return default
elif ans[0] == yes[0]:
return yes
elif ans[0] == no[0]:
return no
text = ' - please enter %r or %r' % (yes, no)
def ny_input(text, yes='yes', no='no'):
"""
Like yn_input, but the default choice is 'no'.
"""
return yn_input(text, yes, no, default=no)
def default_input(text, default):
"""
Asks for some input with a default string value.
"""
default = unicode(default)
return grab_input('%s [%s] ' % (text, default)) or default
def grab_input(msg=''):
"""
Asks for user input.
Like the builtin input() function, but has the same behavior in python 2
and 3.
"""
if six.PY2:
# noinspection PyUnresolvedReferences
return raw_input(msg)
else:
return input(msg)
def show(*args, **kwargs):
"""
Alias to print() function.
Can be useful in conjunction with mock() in test cases.
"""
return print(*args, **kwargs)
|
<gh_stars>100-1000
/*
* Copyright © 2020 Lisk Foundation
*
* See the LICENSE file at the top-level directory of this distribution
* for licensing information.
*
* Unless otherwise agreed in a custom licensing agreement with the Lisk Foundation,
* no part of this software, including this file, may be copied, modified,
* propagated, or distributed except according to the terms contained in the
* LICENSE file.
*
* Removal or modification of this copyright notice is prohibited.
*
*/
export type Transaction = TransactionObject & TransactionFunctions;
export interface TransactionObject {
readonly id: Buffer;
readonly moduleID: number;
readonly assetID: number;
readonly nonce: bigint;
readonly fee: bigint;
readonly senderPublicKey: Buffer;
receivedAt?: Date;
feePriority?: bigint;
}
export interface TransactionFunctions {
getBytes: () => Buffer;
}
export enum Status {
FAIL = 0,
OK = 1,
}
export enum TransactionStatus {
INVALID = 0,
UNPROCESSABLE,
PROCESSABLE,
}
|
#!/bin/bash
function usage() {
echo "usage: external_dependencies.sh"
echo " => execute this is a directory and it will try to find all "
echo " external (not metwork) dependencies (with ldd)"
}
if test "${1:-}" = "--help"; then
usage
exit 0
fi
( find . -type f -name "*.so*" -exec ldd {} 2>/dev/null \; ; find . -type f -wholename "./bin/*" -exec ldd {} 2>/dev/null \;) |grep "=>" | grep "not found" |awk -F '=>' '{print $1;}' |sort |uniq
|
<reponame>nimoqqq/roses<filename>kernel-d-validator/validator-api/src/main/java/cn/stylefeng/roses/kernel/validator/api/validators/date/DateValueValidator.java
/*
* Copyright [2020-2030] [https://www.stylefeng.cn]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Guns采用APACHE LICENSE 2.0开源协议,您在使用过程中,需要注意以下几点:
*
* 1.请不要删除和修改根目录下的LICENSE文件。
* 2.请不要删除和修改Guns源码头部的版权声明。
* 3.请保留源码和相关描述文件的项目出处,作者声明等。
* 4.分发源码时候,请注明软件出处 https://gitee.com/stylefeng/guns
* 5.在修改包名,模块名称,项目代码等时,请注明软件出处 https://gitee.com/stylefeng/guns
* 6.若您的项目无法满足以上几点,可申请商业授权
*/
package cn.stylefeng.roses.kernel.validator.api.validators.date;
import cn.hutool.core.date.DateUtil;
import cn.hutool.core.util.StrUtil;
import javax.validation.ConstraintValidator;
import javax.validation.ConstraintValidatorContext;
/**
* 日期校验格式,通过format的参数来校验格式
*
* @author fengshuonan
* @date 2020/11/18 21:30
*/
public class DateValueValidator implements ConstraintValidator<DateValue, String> {
private Boolean required;
private String format;
@Override
public void initialize(DateValue constraintAnnotation) {
this.required = constraintAnnotation.required();
this.format = constraintAnnotation.format();
}
@Override
public boolean isValid(String dateValue, ConstraintValidatorContext context) {
if (StrUtil.isEmpty(dateValue)) {
// 校验是不是必填
if (required) {
return false;
} else {
return true;
}
} else {
try {
// 校验日期格式
DateUtil.parse(dateValue, format);
return true;
} catch (Exception e) {
return false;
}
}
}
}
|
#!/bin/bash -e
cd /domjudge-src/domjudge*
chown -R domjudge: .
sudo -u domjudge ./configure -with-baseurl=http://localhost/
sudo -u domjudge make domserver
make install-domserver
sudo -u domjudge make docs
make install-docs
|
# Import the necessary libraries
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
# Create the Tokenizer
tokenizer = Tokenizer()
# Fit the Tokenizer to the data
tokenizer.fit_on_texts(X_train)
# Generate sequences from text
X_train_sequences = tokenizer.texts_to_sequences(X_train)
X_test_sequences = tokenizer.texts_to_sequences(X_test)
# Get the index of the words from the vocab
word_index = tokenizer.word_index
# Obtain the maximum sequence length
max_sequence_length = max(len(seq) for seq in X_train_sequences)
# Pad and truncate the sequences
X_train_padded = tf.keras.preprocessing.sequence.pad_sequences(X_train_sequences, maxlen=max_sequence_length)
X_test_padded = tf.keras.preprocessing.sequence.pad_sequences(X_test_sequences, maxlen=max_sequence_length)
# Create a model
model = tf.keras.Sequential([
tf.keras.layers.Embedding(input_dim=len(word_index)+1, output_dim=128, input_length=max_sequence_length),
tf.keras.layers.Bidirectional(tf.keras.layers.GRU(128)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(num_classes, activation='softmax')
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(X_train_padded, y_train, epochs=10, batch_size=64)
# Evaluate the model
test_loss, test_acc = model.evaluate(X_test_padded, y_test) |
<reponame>onearmbandit/MTAV
import "TweenMax";
import ScrollMagic from "ScrollMagic";
import "animation.gsap";
import "debug.addIndicators";
import Swiper, { Navigation } from "swiper";
Swiper.use([Navigation]);
require("../../scss/website/components/mtav-swiper.scss");
require("../../scss/website/home-page.scss");
(function ($) {
$(window).load(function () {
$("#overlay").fadeOut();
});
var p = $(".imgvideo-title-sec");
var offset = p.offset();
offset = offset.top;
$(window).scroll(function () {
var sc = $(window).scrollTop();
if ($(window).scrollTop() > offset + 900) {
$("header").fadeIn();
} else {
$("header").fadeOut();
}
});
var swiper = new Swiper(".bythenumber-slider", {
navigation: {
nextEl: ".bythenumber-next",
prevEl: ".bythenumber-prev",
},
});
var controller = new ScrollMagic.Controller();
var wipeAnimation = new TimelineMax();
var scene = new ScrollMagic.Scene({
triggerElement: ".js-hand-trigger",
duration: 1000,
triggerHook: 0,
})
.setPin(".js-handanimation")
.setTween(wipeAnimation)
.addTo(controller);
let wwidth = $(window).width();
if (wwidth > 1024) {
wipeAnimation.add([TweenMax.to(".dummy", 0.2, {})]);
wipeAnimation.add([
TweenMax.to(".hand1-wrapper", 0.5, {
left: "-40%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand2-wrapper", 0.5, {
right: "-35%",
ease: Linear.easeNone,
}),
]);
wipeAnimation.add([TweenMax.to(".dummy", 0.2, {})]);
wipeAnimation.add([
TweenMax.to(".hand3-wrapper", 0.5, {
top: "-29%",
right: "5%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand4-wrapper", 0.5, {
bottom: "-32%",
left: "9%",
ease: Linear.easeNone,
}),
]);
wipeAnimation.add([TweenMax.to(".dummy", 0.2, {})]);
wipeAnimation.add([
TweenMax.to(".hand5-wrapper", 0.5, {
bottom: "-36%",
right: "4%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand6-wrapper", 0.5, {
top: "-33%",
left: "4%",
ease: Linear.easeNone,
}),
TweenMax.to(".logo-wrapper--anim", 0.3, {
marginTop: "0",
opacity: "1",
ease: Linear.easeNone,
}),
]);
wipeAnimation.add([TweenMax.to(".dummy", 0.2, {})]);
wipeAnimation.add([
TweenMax.to(".hand1-wrapper", 0.3, {
left: "-60%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand2-wrapper", 0.3, {
right: "-60%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand3-wrapper", 0.3, {
top: "-60%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand4-wrapper", 0.3, {
bottom: "-70%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand5-wrapper", 0.3, {
bottom: "-75%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand6-wrapper", 0.3, {
top: "-60%",
ease: Linear.easeNone,
}),
TweenMax.to(".scroll-arrow", 0.3, {
bottom: "-80px",
ease: Linear.easeNone,
}),
]);
} else if (wwidth > 767) {
wipeAnimation.add([TweenMax.to(".dummy", 0.2, {})]);
wipeAnimation.add([
TweenMax.to(".hand1-wrapper", 0.5, {
left: "-42%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand2-wrapper", 0.5, {
right: "-44%",
ease: Linear.easeNone,
}),
]);
wipeAnimation.add([TweenMax.to(".dummy", 0.2, {})]);
wipeAnimation.add([
TweenMax.to(".hand3-wrapper", 0.5, {
top: "-28%",
right: "-41%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand4-wrapper", 0.5, {
bottom: "-32%",
left: "-26%",
ease: Linear.easeNone,
}),
]);
wipeAnimation.add([TweenMax.to(".dummy", 0.2, {})]);
wipeAnimation.add([
TweenMax.to(".hand5-wrapper", 0.5, {
bottom: "-41%",
right: "-31%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand6-wrapper", 0.5, {
top: "-34%",
left: "-30%",
ease: Linear.easeNone,
}),
TweenMax.to(".logo-wrapper--anim", 0.3, {
marginTop: "0",
opacity: "1",
ease: Linear.easeNone,
}),
]);
wipeAnimation.add([TweenMax.to(".dummy", 0.2, {})]);
wipeAnimation.add([
TweenMax.to(".hand1-wrapper", 0.3, {
left: "-60%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand2-wrapper", 0.3, {
right: "-60%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand3-wrapper", 0.3, {
top: "-60%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand4-wrapper", 0.3, {
bottom: "-65%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand5-wrapper", 0.3, {
bottom: "-70%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand6-wrapper", 0.3, {
top: "-60%",
ease: Linear.easeNone,
}),
TweenMax.to(".scroll-arrow", 0.3, {
bottom: "-80px",
ease: Linear.easeNone,
}),
]);
} else {
wipeAnimation.add([TweenMax.to(".dummy", 0.2, {})]);
wipeAnimation.add([
TweenMax.to(".hand1-wrapper", 0.5, {
left: "-42%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand2-wrapper", 0.5, {
right: "-44%",
ease: Linear.easeNone,
}),
]);
wipeAnimation.add([TweenMax.to(".dummy", 0.2, {})]);
wipeAnimation.add([
TweenMax.to(".hand3-wrapper", 0.5, {
top: "-28%",
right: "-41%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand4-wrapper", 0.5, {
bottom: "-32%",
left: "-26%",
ease: Linear.easeNone,
}),
]);
wipeAnimation.add([TweenMax.to(".dummy", 0.2, {})]);
wipeAnimation.add([
TweenMax.to(".hand5-wrapper", 0.5, {
bottom: "-41%",
right: "-31%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand6-wrapper", 0.5, {
top: "-34%",
left: "-30%",
ease: Linear.easeNone,
}),
TweenMax.to(".logo-wrapper--anim", 0.3, {
marginTop: "0",
opacity: "1",
ease: Linear.easeNone,
}),
]);
wipeAnimation.add([TweenMax.to(".dummy", 0.2, {})]);
wipeAnimation.add([
TweenMax.to(".hand1-wrapper", 0.3, {
left: "-60%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand2-wrapper", 0.3, {
right: "-60%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand3-wrapper", 0.3, {
top: "-60%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand4-wrapper", 0.3, {
bottom: "-65%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand5-wrapper", 0.3, {
bottom: "-70%",
ease: Linear.easeNone,
}),
TweenMax.to(".hand6-wrapper", 0.3, {
top: "-60%",
ease: Linear.easeNone,
}),
TweenMax.to(".scroll-arrow", 0.3, {
bottom: "-80px",
ease: Linear.easeNone,
}),
]);
}
})(jQuery);
|
#include "mzpch.h"
#include "WindowsWindow.h"
#include "Mazel/Events/ApplicationEvent.h"
#include "Mazel/Events/MouseEvent.h"
#include "Mazel/Events/KeyEvent.h"
#include "Platform/OpenGL/OpenGLContext.h"
namespace Mazel
{
static bool s_GLFWInitialized = false;
static void GLFWErrorCallback(int error_code, const char* description)
{
MZ_CORE_ERROR("GLFW Error ({0}): {1}", error_code, description);
}
Window* Window::Create(const WindowProps& props)
{
return new WindowsWindow(props);
}
WindowsWindow::WindowsWindow(const WindowProps& props)
{
Init(props);
}
void WindowsWindow::Init(const WindowProps& props)
{
m_Data.Title = props.Title;
m_Data.Width = props.Width;
m_Data.Height = props.Height;
MZ_CORE_INFO("Creating window {0} ({1}, {2})", m_Data.Title, m_Data.Width, m_Data.Height);
if (!s_GLFWInitialized)
{
int isInit = glfwInit();
MZ_CORE_ASSERT(isInit, "Could not initialize GLFW!");
glfwSetErrorCallback(GLFWErrorCallback);
s_GLFWInitialized = true;
}
m_Window = glfwCreateWindow((int)m_Data.Width, (int)m_Data.Height, m_Data.Title.c_str(), nullptr, nullptr);
m_Context = new OpenGLContext(m_Window);
m_Context->Init();
glfwSetWindowUserPointer(m_Window, &m_Data);
SetVSync(true);
#pragma region [GLFW callbacks]
glfwSetWindowSizeCallback(m_Window, [](GLFWwindow* window, int width, int height)
{
WindowData& data = *(WindowData*)glfwGetWindowUserPointer(window);
data.Width = width;
data.Height = height;
WindowResizeEvent ev(width, height);
data.EventCallback(ev);
});
glfwSetWindowCloseCallback(m_Window, [](GLFWwindow* window)
{
WindowData& data = *(WindowData*)glfwGetWindowUserPointer(window);
WindowCloseEvent ev;
data.EventCallback(ev);
});
glfwSetKeyCallback(m_Window, [](GLFWwindow* window, int key, int scancode, int action, int mods)
{
WindowData& data = *(WindowData*)glfwGetWindowUserPointer(window);
switch (action)
{
case GLFW_PRESS:
{
KeyPressedEvent ev(key, 0);
data.EventCallback(ev);
break;
}
case GLFW_RELEASE:
{
KeyReleasedEvent ev(key);
data.EventCallback(ev);
break;
}
case GLFW_REPEAT:
{
KeyPressedEvent ev(key, 1);
data.EventCallback(ev);
break;
}
}
});
glfwSetCharCallback(m_Window, [](GLFWwindow* window, unsigned int codepoint)
{
WindowData& data = *(WindowData*)glfwGetWindowUserPointer(window);
KeyTypedEvent ev(codepoint);
data.EventCallback(ev);
});
glfwSetMouseButtonCallback(m_Window, [](GLFWwindow* window, int button, int action, int mods)
{
WindowData& data = *(WindowData*)glfwGetWindowUserPointer(window);
switch (action)
{
case GLFW_PRESS:
{
MouseButtonPressedEvent ev(button);
data.EventCallback(ev);
break;
}
case GLFW_RELEASE:
{
MouseButtonReleasedEvent ev(button);
data.EventCallback(ev);
break;
}
}
});
glfwSetScrollCallback(m_Window, [](GLFWwindow* window, double xOffset, double yOffset)
{
WindowData& data = *(WindowData*)glfwGetWindowUserPointer(window);
MouseScrolledEvent ev((float)xOffset, (float)yOffset);
data.EventCallback(ev);
});
glfwSetCursorPosCallback(m_Window, [](GLFWwindow* window, double xPos, double yPos)
{
WindowData& data = *(WindowData*)glfwGetWindowUserPointer(window);
MouseMovedEvent ev((float)xPos, (float)yPos);
data.EventCallback(ev);
});
#pragma endregion [GLFW callbacks]
}
void WindowsWindow::Shutdown()
{
glfwDestroyWindow(m_Window);
glfwTerminate();
}
void WindowsWindow::OnUpdate()
{
glfwPollEvents();
m_Context->SwapBuffers();
}
void WindowsWindow::SetVSync(bool enabled)
{
if (enabled)
glfwSwapInterval(1);
else
glfwSwapInterval(0);
m_Data.VSync = enabled;
}
bool WindowsWindow::IsVSync() const
{
return m_Data.VSync;
}
} |
package com.wpisen.trace.agent.trace;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import com.wpisen.trace.agent.collect.Event;
import com.wpisen.trace.agent.collect.EventType;
import com.wpisen.trace.agent.common.util.Assert;
import com.wpisen.trace.agent.core.*;
/**
* 当前会话信息管理
*
* @since 0.1.0
*/
public class TraceSession {
/**
* session 节点信息
*/
private TraceRequest traceRequest;
/**
* 生成下个节点的调用ID
*/
private String rpcId;
/**
* 用户于记录调用当中临时变量
*/
private Map<String, Object> attributes = new ConcurrentHashMap<String, Object>();
private TraceContext context;
private int serialNumber = 0;
private Map<EventType, LinkedList<Event>> eventTable = new HashMap<>();
public TraceSession(TraceContext context, TraceRequest traceRequest) {
Assert.notNull(context);
Assert.notNull(traceRequest);
Assert.notNull(traceRequest.getParentRpcId());
this.context = context;
this.traceRequest = traceRequest;
// TODO 临时逻辑需求,默认全部都是调式模式 。后期需要进行合理的设计
if(traceRequest.getProperties()==null){
traceRequest.setProperties(new Properties());
}
if (!traceRequest.getProperties().containsKey(TraceContext.DEBUG_ID)) {
traceRequest.getProperties().setProperty(TraceContext.DEBUG_ID, traceRequest.getTraceId());
}
}
/**
* 获取当前rpcID
*
* @return
*/
public String getCurrentRpcId() {
return rpcId == null ? "0" : rpcId;
}
/**
* a-->b(0.1) b-->c (0.1.1) 生成下个节点RPCID
*
* @return
*/
public String getNextRpcId() {
serialNumber++;
rpcId = traceRequest.getParentRpcId() + "." + serialNumber;
return rpcId;
}
/**
* 获取traceId
*
* @return
*/
public String getTraceId() {
return traceRequest.getTraceId();
}
public TraceRequest getTraceRequest() {
return traceRequest;
}
public Object setAttribute(String key, Object param) {
return attributes.put(key, param);
}
public Object removeAttribute(String key) {
return attributes.remove(key);
}
public Object getAttribute(String key) {
return attributes.get(key);
}
public TraceContext getContext() {
return context;
}
public static String createTraceId() {
return UUID.randomUUID().toString().replaceAll("-", "");
}
public void addNode(TraceNode node) {
AppInfo appinfo = DefaultApplication.getInstance().getAppInfo();
node.setAppId(appinfo.getAppId());
node.setAppDetail(appinfo.getAppName());
if (traceRequest.getProperties().containsKey(TraceContext.UPLOAD_PATH)) {
TraceBeanWapper wapper = new TraceBeanWapper(node);
wapper.setTheUploadUrl(traceRequest.getProperties().getProperty(TraceContext.UPLOAD_PATH));
context.storeNode(wapper);
} else {
context.storeNode(node);
}
}
public void addTraceFrom(TraceFrom from) {
if (traceRequest.getProperties().containsKey(TraceContext.UPLOAD_PATH)) {
TraceBeanWapper wapper = new TraceBeanWapper(from);
wapper.setTheUploadUrl(traceRequest.getProperties().getProperty(TraceContext.UPLOAD_PATH));
context.storeNode(wapper);
} else {
context.storeNode(from);
}
}
public Boolean isDubug() {
Properties properties = traceRequest.getProperties();
if (properties.get(TraceContext.DEBUG_ID) != null) {
return true;
}
return false;
}
public void addEvent(Event event) {
LinkedList<Event> eventList = eventTable.get(event.getType());
if (eventList == null) {
eventList = new LinkedList<Event>();
eventTable.put(event.getType(), eventList);
}
eventList.add(event);
}
/**
*
* 清除事件。<br/>
* 该方法一般由事件框架去调用,手动调用须保证同类别事件不会同时激活。
*
* @author <EMAIL>
* @date: 2016年7月11日 下午5:49:10
* @version 1.0
*
* @param event
*/
public void clearEvent(Event event) {
LinkedList<Event> eventList = eventTable.get(event.getType());
if (eventList != null)
eventList.remove(event);
}
public Event getCurrentEvent(EventType type) {
LinkedList<Event> eventList = eventTable.get(type);
if (eventList != null && !eventList.isEmpty()) {
return eventList.get(eventList.size() - 1);
}
return null;
}
}
|
<reponame>descholar-ceo/rwbanks
const { getBanks, getBank } = require("./index");
console.log(getBanks()); // get a list of all licensed banks
getBanks((error, banks) => {
console.log(banks);
});
// get a bank by swiftcode
getBank("BKIGRWRW", function (error, bank) {
console.log(bank);
});
console.log(getBank("BKIGRWRW"));
// {
// name: 'BANK OF KIGALI LIMITED',
// swift_code: 'BKIGRWRW',
// bank_code: 'BKIG',
// address: 'KN 4 Ave, Kigali, Rwanda',
// postal_code: '175',
// phone_number: '+250788143000',
// toll_free: '4455',
// email_address: '<EMAIL>',
// ussd_code: '*334#'
// }
|
<filename>src/main/java/com/bullhornsdk/data/model/response/list/customobject/ClientCorporationCustomObjectInstance24ListWrapper.java
package com.bullhornsdk.data.model.response.list.customobject;
import com.bullhornsdk.data.model.entity.core.customobject.ClientCorporationCustomObjectInstance24;
import com.bullhornsdk.data.model.response.list.StandardListWrapper;
public class ClientCorporationCustomObjectInstance24ListWrapper extends StandardListWrapper<ClientCorporationCustomObjectInstance24> {
}
|
import Mastodon, { WebSocket as SocketListener, Status, Notification, Instance, Response } from 'megalodon'
import log from 'electron-log'
import { LocalAccount } from '~/src/types/localAccount'
const StreamingURL = async (account: LocalAccount): Promise<string> => {
if (!account.accessToken) {
throw new Error('access token is empty')
}
const client = new Mastodon(account.accessToken, account.baseURL + '/api/v1')
const res: Response<Instance> = await client.get<Instance>('/instance')
return res.data.urls.streaming_api
}
export { StreamingURL }
export default class WebSocket {
private client: Mastodon
private listener: SocketListener | null
constructor(account: LocalAccount, streamingURL: string) {
const url = streamingURL.replace(/^https:\/\//, 'wss://')
this.client = new Mastodon(account.accessToken!, url + '/api/v1')
this.listener = null
}
startUserStreaming(updateCallback: Function, notificationCallback: Function, deleteCallback: Function, errCallback: Function) {
this.listener = this.client.socket('/streaming', 'user')
this.listener.on('connect', _ => {
log.info('/streaming/?stream=user started')
})
this.listener.on('update', (status: Status) => {
updateCallback(status)
})
this.listener.on('notification', (notification: Notification) => {
notificationCallback(notification)
})
this.listener.on('delete', (id: string) => {
deleteCallback(id)
})
this.listener.on('error', (err: Error) => {
errCallback(err)
})
this.listener.on('parser-error', (err: Error) => {
errCallback(err)
})
}
/**
* Start new custom streaming with websocket.
* @param stream Path of streaming.
* @param updateCallback A callback function which is called update.
* @param errCallback A callback function which ic called error.
* When local timeline, the path is `public:local`.
* When public timeline, the path is `public`.
* When hashtag timeline, the path is `hashtag&tag=tag_name`.
* When list timeline, the path is `list&list=list_id`.
*/
start(stream: string, updateCallback: Function, deleteCallback: Function, errCallback: Function) {
this.listener = this.client.socket('/streaming', stream)
this.listener.on('connect', _ => {
log.info(`/streaming/?stream=${stream} started`)
})
this.listener.on('update', (status: Status) => {
updateCallback(status)
})
this.listener.on('delete', (id: string) => {
deleteCallback(id)
})
this.listener.on('error', (err: Error) => {
errCallback(err)
})
this.listener.on('parser-error', (err: Error) => {
errCallback(err)
})
}
stop() {
if (this.listener) {
this.listener.removeAllListeners('connect')
this.listener.removeAllListeners('update')
this.listener.removeAllListeners('notification')
this.listener.removeAllListeners('error')
this.listener.removeAllListeners('parser-error')
this.listener.on('error', (e: Error) => {
log.error(e)
})
this.listener.on('parser-error', (e: Error) => {
log.error(e)
})
this.listener.stop()
log.info('streaming stopped')
}
}
}
|
package com.shape.converter.kmltosdo.kml.service;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import com.shape.converter.kmltosdo.kml.service.entitiy.KmlShapeFailLogEntity;
import com.shape.converter.kmltosdo.kml.service.model.SplitKmlIntoJTSServiceResult;
import com.shape.converter.kmltosdo.kml.service.repo.KmlShapeFailLogRepo;
@Service
public class KmlShapeFailLogService {
@Autowired
private KmlShapeFailLogRepo kmlShapeFailLogRepo;
@Transactional
public void insertKmlShapeFailLogUsingJtsResult(SplitKmlIntoJTSServiceResult splitKmlIntoJTSServiceResult) {
KmlShapeFailLogEntity newKmlShapeFailLogEntity = new KmlShapeFailLogEntity();
newKmlShapeFailLogEntity.setFileName(splitKmlIntoJTSServiceResult.getKmlFileName());
newKmlShapeFailLogEntity.setFailReason(splitKmlIntoJTSServiceResult.getErrorMessage());
if(splitKmlIntoJTSServiceResult.getException() != null){
newKmlShapeFailLogEntity.setFailDetail(ExceptionUtils.getStackTrace(splitKmlIntoJTSServiceResult.getException()));
}
kmlShapeFailLogRepo.save(newKmlShapeFailLogEntity);
}
}
|
<reponame>hallyn/lxd<gh_stars>0
package main
import (
"encoding/json"
"fmt"
"net/http"
"os"
"os/exec"
"path"
"strconv"
"github.com/gorilla/mux"
"gopkg.in/lxc/go-lxc.v2"
"github.com/lxc/lxd/shared"
)
func snapshotsDir(c *lxdContainer) string {
return shared.VarPath("lxc", c.name, "snapshots")
}
func snapshotDir(c *lxdContainer, name string) string {
return path.Join(snapshotsDir(c), name)
}
func snapshotStateDir(c *lxdContainer, name string) string {
return path.Join(snapshotDir(c, name), "state")
}
func snapshotRootfsDir(c *lxdContainer, name string) string {
return path.Join(snapshotDir(c, name), "rootfs")
}
func containerSnapshotsGet(d *Daemon, r *http.Request) Response {
recursion_str := r.FormValue("recursion")
recursion, err := strconv.Atoi(recursion_str)
if err != nil {
recursion = 0
}
cname := mux.Vars(r)["name"]
c, err := newLxdContainer(cname, d)
if err != nil {
return SmartError(err)
}
regexp := fmt.Sprintf("%s/", cname)
length := len(regexp)
q := "SELECT name FROM containers WHERE type=? AND SUBSTR(name,1,?)=?"
var name string
inargs := []interface{}{cTypeSnapshot, length, regexp}
outfmt := []interface{}{name}
results, err := shared.DbQueryScan(d.db, q, inargs, outfmt)
if err != nil {
return SmartError(err)
}
var result_string []string
var result_map []shared.Jmap
for _, r := range results {
name = r[0].(string)
if recursion == 0 {
url := fmt.Sprintf("/%s/containers/%s/snapshots/%s", shared.APIVersion, cname, name)
result_string = append(result_string, url)
} else {
_, err := os.Stat(snapshotStateDir(c, name))
body := shared.Jmap{"name": name, "stateful": err == nil}
result_map = append(result_map, body)
}
}
if recursion == 0 {
return SyncResponse(true, result_string)
} else {
return SyncResponse(true, result_map)
}
}
/*
* Note, the code below doesn't deal with snapshots of snapshots.
* To do that, we'll need to weed out based on # slashes in names
*/
func nextSnapshot(d *Daemon, name string) int {
base := fmt.Sprintf("%s/snap", name)
length := len(base)
q := fmt.Sprintf("SELECT MAX(name) FROM containers WHERE type=? AND SUBSTR(name,1,?)=?")
var numstr string
inargs := []interface{}{cTypeSnapshot, length, base}
outfmt := []interface{}{numstr}
results, err := shared.DbQueryScan(d.db, q, inargs, outfmt)
if err != nil {
return 0
}
max := 0
for _, r := range results {
numstr = r[0].(string)
if len(numstr) <= length {
continue
}
substr := numstr[length:]
var num int
count, err := fmt.Sscanf(substr, "%d", &num)
if err != nil || count != 1 {
continue
}
if num >= max {
max = num + 1
}
}
return max
}
func containerSnapshotsPost(d *Daemon, r *http.Request) Response {
name := mux.Vars(r)["name"]
/*
* snapshot is a three step operation:
* 1. choose a new name
* 2. copy the database info over
* 3. copy over the rootfs
*/
c, err := newLxdContainer(name, d)
if err != nil {
return SmartError(err)
}
raw := shared.Jmap{}
if err := json.NewDecoder(r.Body).Decode(&raw); err != nil {
return BadRequest(err)
}
snapshotName, err := raw.GetString("name")
if err != nil || snapshotName == "" {
// come up with a name
i := nextSnapshot(d, name)
snapshotName = fmt.Sprintf("snap%d", i)
}
stateful, err := raw.GetBool("stateful")
if err != nil {
return BadRequest(err)
}
fullName := fmt.Sprintf("%s/%s", name, snapshotName)
snapDir := snapshotDir(c, snapshotName)
if shared.PathExists(snapDir) {
return Conflict
}
err = os.MkdirAll(snapDir, 0700)
if err != nil {
return InternalError(err)
}
snapshot := func() error {
StateDir := snapshotStateDir(c, snapshotName)
err = os.MkdirAll(StateDir, 0700)
if err != nil {
return err
}
if stateful {
// TODO - shouldn't we freeze for the duration of rootfs snapshot below?
if !c.c.Running() {
return fmt.Errorf("Container not running\n")
}
opts := lxc.CheckpointOptions{Directory: StateDir, Stop: true, Verbose: true}
if err := c.c.Checkpoint(opts); err != nil {
return err
}
}
/* Create the db info */
args := DbCreateContainerArgs{
d: d,
name: fullName,
ctype: cTypeSnapshot,
config: c.config,
profiles: c.profiles,
ephem: c.ephemeral,
baseImage: c.config["volatile.baseImage"],
architecture: c.architecture,
}
_, err := dbCreateContainer(args)
if err != nil {
return err
}
/* Create the directory and rootfs, set perms */
/* Copy the rootfs */
oldPath := fmt.Sprintf("%s/", shared.VarPath("lxc", name, "rootfs"))
newPath := snapshotRootfsDir(c, snapshotName)
err = exec.Command("rsync", "-a", "--devices", oldPath, newPath).Run()
return err
}
return AsyncResponse(shared.OperationWrap(snapshot), nil)
}
func snapshotHandler(d *Daemon, r *http.Request) Response {
containerName := mux.Vars(r)["name"]
c, err := newLxdContainer(containerName, d)
if err != nil {
return SmartError(err)
}
snapshotName := mux.Vars(r)["snapshotName"]
dir := snapshotDir(c, snapshotName)
_, err = os.Stat(dir)
if err != nil {
return SmartError(err)
}
switch r.Method {
case "GET":
return snapshotGet(c, snapshotName)
case "POST":
return snapshotPost(r, c, snapshotName)
case "DELETE":
return snapshotDelete(d, c, snapshotName)
default:
return NotFound
}
}
func snapshotGet(c *lxdContainer, name string) Response {
_, err := os.Stat(snapshotStateDir(c, name))
body := shared.Jmap{"name": name, "stateful": err == nil}
return SyncResponse(true, body)
}
func snapshotPost(r *http.Request, c *lxdContainer, oldName string) Response {
raw := shared.Jmap{}
if err := json.NewDecoder(r.Body).Decode(&raw); err != nil {
return BadRequest(err)
}
newName, err := raw.GetString("name")
if err != nil {
return BadRequest(err)
}
oldDir := snapshotDir(c, oldName)
newDir := snapshotDir(c, newName)
_, err = os.Stat(newDir)
if !os.IsNotExist(err) {
return InternalError(err)
} else if err == nil {
return Conflict
}
/*
* TODO: do we need to do something more intelligent here? We probably
* shouldn't do anything for stateful snapshots, since changing the fs
* out from under criu will cause it to fail, but it may be useful to
* do something for stateless ones.
*/
rename := func() error { return os.Rename(oldDir, newDir) }
return AsyncResponse(shared.OperationWrap(rename), nil)
}
func snapshotDelete(d *Daemon, c *lxdContainer, name string) Response {
dbRemoveSnapshot(d, c.name, name)
dir := snapshotDir(c, name)
remove := func() error { return os.RemoveAll(dir) }
return AsyncResponse(shared.OperationWrap(remove), nil)
}
|
#!/bin/sh -e
mkdir -p ~/.ssh
cp /integration/client_test_rsa ~/.ssh/id_rsa
chmod -R 700 ~/.ssh
cat >~/.ssh/config <<EOF
Host sshportal
Port 2222
HostName sshportal
Host testserver
Port 2222
HostName testserver
Host *
StrictHostKeyChecking no
ControlMaster auto
SendEnv TEST_*
EOF
set -x
# login
ssh sshportal -l invite:integration
# hostgroup/usergroup/acl
ssh sshportal -l admin hostgroup create
ssh sshportal -l admin hostgroup create --name=hg1
ssh sshportal -l admin hostgroup create --name=hg2 --comment=test
ssh sshportal -l admin usergroup inspect hg1 hg2
ssh sshportal -l admin hostgroup ls
ssh sshportal -l admin usergroup create
ssh sshportal -l admin usergroup create --name=ug1
ssh sshportal -l admin usergroup create --name=ug2 --comment=test
ssh sshportal -l admin usergroup inspect ug1 ug2
ssh sshportal -l admin usergroup ls
ssh sshportal -l admin acl create --ug=ug1 --ug=ug2 --hg=hg1 --hg=hg2 --comment=test --action=allow --weight=42
ssh sshportal -l admin acl inspect 2
ssh sshportal -l admin acl ls
# basic host create
ssh sshportal -l admin host create bob@example.org:1234
ssh sshportal -l admin host create test42
ssh sshportal -l admin host create --name=testtest --comment=test --password=test test@test.test
ssh sshportal -l admin host create --group=hg1 --group=hg2 hostwithgroups.org
ssh sshportal -l admin host inspect example test42 testtest hostwithgroups
ssh sshportal -l admin host update --assign-group=hg1 test42
ssh sshportal -l admin host update --unassign-group=hg1 test42
ssh sshportal -l admin host update --assign-group=hg1 test42
ssh sshportal -l admin host update --assign-group=hg2 --unassign-group=hg2 test42
ssh sshportal -l admin host ls
# backup/restore
ssh sshportal -l admin config backup --indent --ignore-events > backup-1
ssh sshportal -l admin config restore --confirm < backup-1
ssh sshportal -l admin config backup --indent --ignore-events > backup-2
(
cat backup-1 | grep -v '"date":' | grep -v 'tedAt":' > backup-1.clean
cat backup-2 | grep -v '"date":' | grep -v 'tedAt":' > backup-2.clean
set -xe
diff backup-1.clean backup-2.clean
)
if [ "$CIRCLECI" = "true" ]; then
echo "Strage behavior with cross-container communication on CircleCI, skipping some tests..."
else
# bastion
ssh sshportal -l admin host create --name=testserver toto@testserver:2222
out="$(ssh sshportal -l testserver echo hello | head -n 1)"
test "$out" = '{"User":"toto","Environ":null,"Command":["echo","hello"]}'
out="$(TEST_A=1 TEST_B=2 TEST_C=3 TEST_D=4 TEST_E=5 TEST_F=6 TEST_G=7 TEST_H=8 TEST_I=9 ssh sshportal -l testserver echo hello | head -n 1)"
test "$out" = '{"User":"toto","Environ":["TEST_A=1","TEST_B=2","TEST_C=3","TEST_D=4","TEST_E=5","TEST_F=6","TEST_G=7","TEST_H=8","TEST_I=9"],"Command":["echo","hello"]}'
fi
# TODO: test more cases (forwards, scp, sftp, interactive, pty, stdin, exit code, ...)
|
struct Task {
let name: String
var isComplete: Bool
}
class TodoList {
var tasks = [Task]()
func addTask(name: String) {
let task = Task(name: name, isComplete: false)
tasks.append(task)
}
func removeTask(at index: Int) {
tasks.remove(at: index)
}
func markTaskComplete(at index: Int) {
tasks[index].isComplete = true
}
} |
<gh_stars>0
package com.wilson.java.treenode.search;
import java.util.List;
public class Node {
private String value;
private List<Node> children;
public Node(String value, List<Node> children) {
this.value = value;
this.children = children;
}
public Node() {
}
public String value() {
return this.value;
}
public void setValue(String value) {
this.value = value;
}
public void addChild(Node node) {
if (node == null) {
return;
}
this.children.add(node);
}
public List<Node> getChildren() {
return this.children;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.