text stringlengths 1 1.05M |
|---|
#!/bin/sh
docker run --rm \
-v $(pwd):/src \
-v /var/run/docker.sock:/var/run/docker.sock \
centurylink/golang-builder:latest \
centurylink/imagelayers:latest
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.ic_text_rotation_angledown = void 0;
var ic_text_rotation_angledown = {
"viewBox": "0 0 24 24",
"children": [{
"name": "path",
"attribs": {
"d": "M0 0h24v24H0z",
"fill": "none"
},
"children": []
}, {
"name": "path",
"attribs": {
"d": "M19.4 4.91l-1.06-1.06L7.2 8.27l1.48 1.48 2.19-.92 3.54 3.54-.92 2.19 1.48 1.48L19.4 4.91zm-6.81 3.1l4.87-2.23-2.23 4.87-2.64-2.64zM14.27 21v-4.24l-1.41 1.41-8.84-8.84-1.42 1.42 8.84 8.84L10.03 21h4.24z"
},
"children": []
}]
};
exports.ic_text_rotation_angledown = ic_text_rotation_angledown; |
package main
import (
"embed"
)
// Assets contains project assets.
//
//go:embed app/build
var Assets embed.FS
|
<reponame>sthagen/aquasecurity-trivy
package utils
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/aquasecurity/trivy/pkg/fanal/types"
)
func TestFormatSrcVersion(t *testing.T) {
tests := []struct {
name string
pkg types.Package
want string
}{
{
name: "happy path",
pkg: types.Package{
SrcVersion: "1.2.3",
SrcRelease: "1",
},
want: "1.2.3-1",
},
{
name: "with epoch",
pkg: types.Package{
SrcEpoch: 2,
SrcVersion: "1.2.3",
SrcRelease: "alpha",
},
want: "2:1.2.3-alpha",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := FormatSrcVersion(tt.pkg)
assert.Equal(t, tt.want, got)
})
}
}
func TestFormatVersion(t *testing.T) {
tests := []struct {
name string
pkg types.Package
want string
}{
{
name: "happy path",
pkg: types.Package{
Version: "1.2.3",
Release: "1",
},
want: "1.2.3-1",
},
{
name: "with epoch",
pkg: types.Package{
Epoch: 2,
Version: "1.2.3",
Release: "alpha",
},
want: "2:1.2.3-alpha",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := FormatVersion(tt.pkg)
assert.Equal(t, tt.want, got)
})
}
}
|
<gh_stars>10-100
package udmi.schema;
import javax.annotation.processing.Generated;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonPropertyDescription;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
/**
* Properties the expected physical location of the device.
*
*/
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonPropertyOrder({
"site",
"section",
"position"
})
@Generated("jsonschema2pojo")
public class Location__1 {
/**
* The site name according to the site model in which a device is installed in
* (Required)
*
*/
@JsonProperty("site")
@JsonPropertyDescription("The site name according to the site model in which a device is installed in")
public String site;
@JsonProperty("section")
public String section;
@JsonProperty("position")
public Position__1 position;
@Override
public int hashCode() {
int result = 1;
result = ((result* 31)+((this.site == null)? 0 :this.site.hashCode()));
result = ((result* 31)+((this.section == null)? 0 :this.section.hashCode()));
result = ((result* 31)+((this.position == null)? 0 :this.position.hashCode()));
return result;
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
}
if ((other instanceof Location__1) == false) {
return false;
}
Location__1 rhs = ((Location__1) other);
return ((((this.site == rhs.site)||((this.site!= null)&&this.site.equals(rhs.site)))&&((this.section == rhs.section)||((this.section!= null)&&this.section.equals(rhs.section))))&&((this.position == rhs.position)||((this.position!= null)&&this.position.equals(rhs.position))));
}
}
|
use anyhow::Result;
use structopt::{clap::AppSettings, StructOpt};
#[derive(StructOpt, Debug)]
pub enum Cmd {
/// Add a new item to the inventory
#[structopt(name = "add")]
Add {
#[structopt(short, long)]
name: String,
#[structopt(short, long)]
quantity: u32,
},
/// Remove an item from the inventory
#[structopt(name = "remove")]
Remove {
#[structopt(short, long)]
name: String,
},
/// List all items in the inventory
#[structopt(name = "list")]
List,
/// Display the current version
#[structopt(name = "version")]
Version,
}
struct InventoryItem {
name: String,
quantity: u32,
}
struct Inventory {
items: Vec<InventoryItem>,
}
impl Inventory {
fn new() -> Inventory {
Inventory { items: Vec::new() }
}
fn add_item(&mut self, name: String, quantity: u32) {
self.items.push(InventoryItem { name, quantity });
}
fn remove_item(&mut self, name: &str) -> Result<()> {
if let Some(index) = self.items.iter().position(|item| item.name == name) {
self.items.remove(index);
Ok(())
} else {
Err(anyhow::anyhow!("Item not found in inventory"))
}
}
fn list_items(&self) {
for item in &self.items {
println!("Name: {}, Quantity: {}", item.name, item.quantity);
}
}
}
fn main() {
let mut inventory = Inventory::new();
match Cmd::from_args() {
Cmd::Add { name, quantity } => {
inventory.add_item(name, quantity);
}
Cmd::Remove { name } => {
if let Err(err) = inventory.remove_item(&name) {
eprintln!("Error: {}", err);
}
}
Cmd::List => {
inventory.list_items();
}
Cmd::Version => {
println!("Inventory CLI v1.0");
}
}
} |
# In the original repository we'll just print the result of status checks,
# without committing. This avoids generating several commits that would make
# later upstream merges messy for anyone who forked us.
commit=true
origin=$(git remote get-url origin)
if [[ $origin == *statsig-io/statuspage* ]]
then
commit=false
fi
KEYSARRAY=()
URLSARRAY=()
urlsConfig="./urls.cfg"
echo "Reading $urlsConfig"
while read -r line
do
echo " $line"
IFS='=' read -ra TOKENS <<< "$line"
KEYSARRAY+=(${TOKENS[0]})
URLSARRAY+=(${TOKENS[1]})
done < "$urlsConfig"
echo "***********************"
echo "Starting health checks with ${#KEYSARRAY[@]} configs:"
mkdir -p logs
for (( index=0; index < ${#KEYSARRAY[@]}; index++))
do
key="${KEYSARRAY[index]}"
url="${URLSARRAY[index]}"
echo " $key=$url"
for i in 1 2 3 4;
do
response=$(curl --write-out '%{http_code}' --silent --output /dev/null $url)
if [ "$response" -eq 200 ] || [ "$response" -eq 202 ] || [ "$response" -eq 301 ] || [ "$response" -eq 307 ]; then
result="success"
else
result="failed"
fi
if [ "$result" = "success" ]; then
break
fi
sleep 5
done
dateTime=$(date +'%Y-%m-%d %H:%M')
if [[ $commit == true ]]
then
echo $dateTime, $result >> "logs/${key}_report.log"
else
echo " $dateTime, $result"
fi
done
if [[ $commit == true ]]
then
# Let's make Vijaye the most productive person on GitHub.
git config --global user.name 'Ravana69'
git config --global user.email 'ravana@hi2.in'
git add -A --force logs/
git commit -am '[Automated] Update Health Check Logs'
git push
fi
|
def sum_recursive(nums):
"""Optimizes the code to find the sum of all numbers in a list using recursion."""
# base case
if len(nums) == 1:
return nums[0]
elif len(nums) == 0:
return 0
# recursive case
return nums[0] + sum_recursive(nums[1:])
if __name__ == '__main__':
nums = [1, 2, 3, 4, 5]
print(sum_recursive(nums)) # Output: 15 |
#!/bin/bash -e
# Bastion Bootstrapping
# authors: tonynv@amazon.com, sancard@amazon.com, ianhill@amazon.com
# NOTE: This requires GNU getopt. On Mac OS X and FreeBSD you must install GNU getopt and mod the checkos function so that it's supported
# Configuration
PROGRAM='Linux Bastion'
##################################### Functions Definitions
function checkos () {
platform='unknown'
unamestr=`uname`
if [[ "$unamestr" == 'Linux' ]]; then
platform='linux'
else
echo "[WARNING] This script is not supported on MacOS or freebsd"
exit 1
fi
echo "${FUNCNAME[0]} Ended"
}
function usage () {
echo "$0 <usage>"
echo " "
echo "options:"
echo -e "--help \t Show options for this script"
echo -e "--banner \t Enable or Disable Bastion Message"
echo -e "--enable \t SSH Banner"
echo -e "--tcp-forwarding \t Enable or Disable TCP Forwarding"
echo -e "--x11-forwarding \t Enable or Disable X11 Forwarding"
}
function chkstatus () {
if [ $? -eq 0 ]
then
echo "Script [PASS]"
else
echo "Script [FAILED]" >&2
exit 1
fi
}
function osrelease () {
OS=`cat /etc/os-release | grep '^NAME=' | tr -d \" | sed 's/\n//g' | sed 's/NAME=//g'`
if [ "$OS" == "Ubuntu" ]; then
echo "Ubuntu"
elif [ "$OS" == "Amazon Linux AMI" ]; then
echo "AMZN"
elif [ "$OS" == "CentOS Linux" ]; then
echo "CentOS"
else
echo "Operating System Not Found"
fi
echo "${FUNCNAME[0]} Ended" >> /var/log/cfn-init.log
}
function harden_ssh_security () {
# Allow ec2-user only to access this folder and its content
#chmod -R 770 /var/log/bastion
#setfacl -Rdm other:0 /var/log/bastion
# Make OpenSSH execute a custom script on logins
echo -e "\nForceCommand /usr/bin/bastion/shell" >> /etc/ssh/sshd_config
# LOGGING CONFIGURATION
mkdir -p /var/log/bastion
mkdir -p /usr/bin/bastion
touch /tmp/messages
chmod 770 /tmp/messages
log_file_location="${bastion_mnt}/${bastion_log}"
log_shadow_file_location="${bastion_mnt}/.${bastion_log}"
cat <<'EOF' >> /usr/bin/bastion/shell
bastion_mnt="/var/log/bastion"
bastion_log="bastion.log"
# Check that the SSH client did not supply a command. Only SSH to instance should be allowed.
export Allow_SSH="ssh"
export Allow_SCP="scp"
if [[ -z $SSH_ORIGINAL_COMMAND ]] || [[ $SSH_ORIGINAL_COMMAND =~ ^$Allow_SSH ]] || [[ $SSH_ORIGINAL_COMMAND =~ ^$Allow_SCP ]]; then
#Allow ssh to instance and log connection
if [ -z "$SSH_ORIGINAL_COMMAND" ]; then
/bin/bash
exit 0
else
$SSH_ORIGINAL_COMMAND
fi
log_file=`echo "$log_shadow_file_location"`
DATE_TIME_WHOAMI="`whoami`:`date "+%Y-%m-%d %H:%M:%S"`"
LOG_ORIGINAL_COMMAND=`echo "$DATE_TIME_WHOAMI:$SSH_ORIGINAL_COMMAND"`
echo "$LOG_ORIGINAL_COMMAND" >> "${bastion_mnt}/${bastion_log}"
log_dir="/var/log/bastion/"
else
# The "script" program could be circumvented with some commands
# (e.g. bash, nc). Therefore, I intentionally prevent users
# from supplying commands.
echo "This bastion supports interactive sessions only. Do not supply a command"
exit 1
fi
EOF
# Make the custom script executable
chmod a+x /usr/bin/bastion/shell
release=$(osrelease)
if [ "$release" == "CentOS" ]; then
semanage fcontext -a -t ssh_exec_t /usr/bin/bastion/shell
fi
echo "${FUNCNAME[0]} Ended"
}
function amazon_os () {
echo "${FUNCNAME[0]} Started"
chown root:ec2-user /usr/bin/script
service sshd restart
echo -e "\nDefaults env_keep += \"SSH_CLIENT\"" >>/etc/sudoers
cat <<'EOF' >> /etc/bashrc
#Added by linux bastion bootstrap
declare -rx IP=$(echo $SSH_CLIENT | awk '{print $1}')
EOF
echo " declare -rx BASTION_LOG=${BASTION_MNT}/${BASTION_LOG}" >> /etc/bashrc
cat <<'EOF' >> /etc/bashrc
declare -rx PROMPT_COMMAND='history -a >(logger -t "ON: $(date) [FROM]:${IP} [USER]:${USER} [PWD]:${PWD}" -s 2>>${BASTION_LOG})'
EOF
chown root:ec2-user ${BASTION_MNT}
chown root:ec2-user ${BASTION_LOGFILE}
chown root:ec2-user ${BASTION_LOGFILE_SHADOW}
chmod 662 ${BASTION_LOGFILE}
chmod 662 ${BASTION_LOGFILE_SHADOW}
chattr +a ${BASTION_LOGFILE}
chattr +a ${BASTION_LOGFILE_SHADOW}
touch /tmp/messages
chown root:ec2-user /tmp/messages
#Install CloudWatch Log service on AMZN
yum update -y
yum install -y awslogs
export CWG=`curl http://169.254.169.254/latest/user-data/ | grep CLOUDWATCHGROUP | sed 's/CLOUDWATCHGROUP=//g'`
echo "file = $BASTION_LOGFILE_SHADOW" >> /tmp/groupname.txt
echo "log_group_name = $CWG" >> /tmp/groupname.txt
cat <<'EOF' >> ~/cloudwatchlog.conf
[/var/log/bastion]
datetime_format = %b %d %H:%M:%S
buffer_duration = 5000
log_stream_name = {instance_id}
initial_position = start_of_file
EOF
LINE=$(cat -n /etc/awslogs/awslogs.conf | grep '\[\/var\/log\/messages\]' | awk {'print $1'})
END_LINE=$(echo $(($LINE-1)))
head -$END_LINE /etc/awslogs/awslogs.conf > /tmp/awslogs.conf
cat /tmp/awslogs.conf > /etc/awslogs/awslogs.conf
cat ~/cloudwatchlog.conf >> /etc/awslogs/awslogs.conf
cat /tmp/groupname.txt >> /etc/awslogs/awslogs.conf
export TMPREGION=`cat /etc/awslogs/awscli.conf | grep region`
export Region=`curl http://169.254.169.254/latest/meta-data/placement/availability-zone | rev | cut -c 2- | rev`
sed -i.back "s/$TMPREGION/region = $Region/g" /etc/awslogs/awscli.conf
#Restart awslogs service
service awslogs restart
chkconfig awslogs on
#Run security updates
cat <<'EOF' >> ~/mycron
0 0 * * * yum -y update --security
EOF
crontab ~/mycron
rm ~/mycron
echo "${FUNCNAME[0]} Ended"
}
function ubuntu_os () {
chown syslog:adm /var/log/bastion
chown root:ubuntu /usr/bin/script
cat <<'EOF' >> /etc/bash.bashrc
#Added by linux bastion bootstrap
declare -rx IP=$(who am i --ips|awk '{print $5}')
EOF
echo " declare -rx BASTION_LOG=${BASTION_MNT}/${BASTION_LOG}" >> /etc/bash.bashrc
cat <<'EOF' >> /etc/bash.bashrc
declare -rx PROMPT_COMMAND='history -a >(logger -t "ON: $(date) [FROM]:${IP} [USER]:${USER} [PWD]:${PWD}" -s 2>>${BASTION_LOG})'
EOF
chown root:ubuntu ${BASTION_MNT}
chown root:ubuntu ${BASTION_LOGFILE}
chown root:ubuntu ${BASTION_LOGFILE_SHADOW}
chmod 662 ${BASTION_LOGFILE}
chmod 662 ${BASTION_LOGFILE_SHADOW}
chattr +a ${BASTION_LOGFILE}
chattr +a ${BASTION_LOGFILE_SHADOW}
touch /tmp/messages
chown root:ubuntu /tmp/messages
#Install CloudWatch logs on Ubuntu
export CWG=`curl http://169.254.169.254/latest/user-data/ | grep CLOUDWATCHGROUP | sed 's/CLOUDWATCHGROUP=//g'`
echo "file = $BASTION_LOGFILE_SHADOW" >> /tmp/groupname.txt
echo "log_group_name = $CWG" >> /tmp/groupname.txt
cat <<'EOF' >> ~/cloudwatchlog.conf
[general]
state_file = /var/awslogs/state/agent-state
[/var/log/bastion]
log_stream_name = {instance_id}
datetime_format = %b %d %H:%M:%S
EOF
export Region=`curl http://169.254.169.254/latest/meta-data/placement/availability-zone | rev | cut -c 2- | rev`
cat /tmp/groupname.txt >> ~/cloudwatchlog.conf
curl https://s3.amazonaws.com/aws-cloudwatch/downloads/latest/awslogs-agent-setup.py -O
export DEBIAN_FRONTEND=noninteractive
apt-get install -y python
chmod +x ./awslogs-agent-setup.py
./awslogs-agent-setup.py -n -r $Region -c ~/cloudwatchlog.conf
#Install Unit file for Ubuntu 16.04
ubuntu=`cat /etc/os-release | grep VERSION_ID | tr -d \VERSION_ID=\"`
if [ "$ubuntu" == "16.04" ]; then
cat <<'EOF' >> /etc/systemd/system/awslogs.service
[Unit]
Description=The CloudWatch Logs agent
After=rc-local.service
[Service]
Type=simple
Restart=always
KillMode=process
TimeoutSec=infinity
PIDFile=/var/awslogs/state/awslogs.pid
ExecStart=/var/awslogs/bin/awslogs-agent-launcher.sh --start --background --pidfile $PIDFILE --user awslogs --chuid awslogs &
[Install]
WantedBy=multi-user.target
EOF
fi
#Restart awslogs service
service awslogs restart
export DEBIAN_FRONTEND=noninteractive
apt-get install sysv-rc-conf -y
sysv-rc-conf awslogs on
#Restart SSH
service ssh stop
service ssh start
#Run security updates
apt-get install unattended-upgrades
cat <<'EOF' >> ~/mycron
0 0 * * * unattended-upgrades -d
EOF
crontab ~/mycron
rm ~/mycron
echo "${FUNCNAME[0]} Ended"
}
function cent_os () {
echo -e "\nDefaults env_keep += \"SSH_CLIENT\"" >>/etc/sudoers
cat <<'EOF' >> /etc/bashrc
#Added by linux bastion bootstrap
declare -rx IP=$(echo $SSH_CLIENT | awk '{print $1}')
EOF
echo "declare -rx BASTION_LOG=${BASTION_MNT}/${BASTION_LOG}" >> /etc/bashrc
cat <<'EOF' >> /etc/bashrc
declare -rx PROMPT_COMMAND='history -a >(logger -t "ON: $(date) [FROM]:${IP} [USER]:${USER} [PWD]:${PWD}" -s 2>>${BASTION_LOG})'
EOF
chown root:centos ${BASTION_MNT}
chown root:centos /usr/bin/script
chown root:centos /var/log/bastion/bastion.log
chmod 770 /var/log/bastion/bastion.log
touch /tmp/messages
chown root:centos /tmp/messages
restorecon -v /etc/ssh/sshd_config
/bin/systemctl restart sshd.service
# Install CloudWatch Log service on Centos Linux
export CWG=`curl http://169.254.169.254/latest/user-data/ | grep CLOUDWATCHGROUP | sed 's/CLOUDWATCHGROUP=//g'`
centos=`cat /etc/os-release | grep VERSION_ID | tr -d \VERSION_ID=\"`
if [ "$centos" == "7" ]; then
echo "file = $BASTION_LOGFILE_SHADOW" >> /tmp/groupname.txt
echo "log_group_name = $CWG" >> /tmp/groupname.txt
cat <<'EOF' >> ~/cloudwatchlog.conf
[general]
state_file = /var/awslogs/state/agent-state
use_gzip_http_content_encoding = true
logging_config_file = /var/awslogs/etc/awslogs.conf
[/var/log/bastion]
datetime_format = %Y-%m-%d %H:%M:%S
file = /var/log/messages
buffer_duration = 5000
log_stream_name = {instance_id}
initial_position = start_of_file
EOF
export Region=`curl http://169.254.169.254/latest/meta-data/placement/availability-zone | rev | cut -c 2- | rev`
cat /tmp/groupname.txt >> ~/cloudwatchlog.conf
curl https://s3.amazonaws.com/aws-cloudwatch/downloads/latest/awslogs-agent-setup.py -O
chmod +x ./awslogs-agent-setup.py
./awslogs-agent-setup.py -n -r $Region -c ~/cloudwatchlog.conf
cat <<'EOF' >> /etc/systemd/system/awslogs.service
[Unit]
Description=The CloudWatch Logs agent
After=rc-local.service
[Service]
Type=simple
Restart=always
KillMode=process
TimeoutSec=infinity
PIDFile=/var/awslogs/state/awslogs.pid
ExecStart=/var/awslogs/bin/awslogs-agent-launcher.sh --start --background --pidfile $PIDFILE --user awslogs --chuid awslogs &
[Install]
WantedBy=multi-user.target
EOF
service awslogs restart
chkconfig awslogs on
else
chown root:centos /var/log/bastion
yum update -y
yum install -y awslogs
export Region=`curl http://169.254.169.254/latest/meta-data/placement/availability-zone | rev | cut -c 2- | rev`
export TMPREGION=`cat /etc/awslogs/awscli.conf | grep region`
sed -i.back "s/$TMPREGION/region = $Region/g" /etc/awslogs/awscli.conf
export CWG=`curl http://169.254.169.254/latest/user-data/ | grep CLOUDWATCHGROUP | sed 's/CLOUDWATCHGROUP=//g'`
echo "file = $BASTION_LOGFILE_SHADOW" >> /tmp/groupname.txt
echo "log_group_name = $CWG" >> /tmp/groupname.txt
cat <<'EOF' >> ~/cloudwatchlog.conf
[/var/log/bastion]
datetime_format = %b %d %H:%M:%S
buffer_duration = 5000
log_stream_name = {instance_id}
initial_position = start_of_file
EOF
export TMPGROUP=`cat /etc/awslogs/awslogs.conf | grep ^log_group_name`
export TMPGROUP=`echo $TMPGROUP | sed 's/\//\\\\\//g'`
sed -i.back "s/$TMPGROUP/log_group_name = $CWG/g" /etc/awslogs/awslogs.conf
cat ~/cloudwatchlog.conf >> /etc/awslogs/awslogs.conf
cat /tmp/groupname.txt >> /etc/awslogs/awslogs.conf
yum install ec2-metadata -y
export TMPREGION=`cat /etc/awslogs/awscli.conf | grep region`
export Region=`curl http://169.254.169.254/latest/meta-data/placement/availability-zone | rev | cut -c 2- | rev`
sed -i.back "s/$TMPREGION/region = $Region/g" /etc/awslogs/awscli.conf
sleep 3
service awslogs stop
sleep 3
service awslogs start
chkconfig awslogs on
fi
#Run security updates
cat <<'EOF' >> ~/mycron
0 0 * * * yum -y update --security
EOF
crontab ~/mycron
rm ~/mycron
echo "${FUNCNAME[0]} Ended"
}
function request_eip() {
release=$(osrelease)
export Region=`curl http://169.254.169.254/latest/meta-data/placement/availability-zone | rev | cut -c 2- | rev`
#Check if EIP already assigned.
ALLOC=1
ZERO=0
INSTANCE_IP=`ifconfig -a | grep inet | awk {'print $2'} | sed 's/addr://g' | head -1`
ASSIGNED=$(aws ec2 describe-addresses --region $Region --output text | grep $INSTANCE_IP | wc -l)
if [ "$ASSIGNED" -gt "$ZERO" ]; then
echo "Already assigned an EIP."
else
aws ec2 describe-addresses --region $Region --output text > /query.txt
#Ensure we are only using EIPs from our Stack
line=`curl http://169.254.169.254/latest/user-data/ | grep EIP_LIST`
IFS=$':' DIRS=(${line//$','/:}) # Replace tabs with colons.
for (( i=0 ; i<${#DIRS[@]} ; i++ )); do
EIP=`echo ${DIRS[i]} | sed 's/\"//g' | sed 's/EIP_LIST=//g'`
if [ $EIP != "Null" ]; then
#echo "$i: $EIP"
grep "$EIP" /query.txt >> /query2.txt;
fi
done
mv /query2.txt /query.txt
AVAILABLE_EIPs=`cat /query.txt | wc -l`
if [ "$AVAILABLE_EIPs" -gt "$ZERO" ]; then
FIELD_COUNT="5"
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
echo "Running associate_eip_now"
while read name;
do
#EIP_ENTRY=$(echo $name | grep eip | wc -l)
EIP_ENTRY=$(echo $name | grep eni | wc -l)
echo "EIP: $EIP_ENTRY"
if [ "$EIP_ENTRY" -eq 1 ]; then
echo "Already associated with an instance"
echo ""
else
export EIP=`echo "$name" | sed 's/[\s]+/,/g' | awk {'print $4'}`
EIPALLOC=`echo $name | awk {'print $2'}`
echo "NAME: $name"
echo "EIP: $EIP"
echo "EIPALLOC: $EIPALLOC"
aws ec2 associate-address --instance-id $INSTANCE_ID --allocation-id $EIPALLOC --region $Region
fi
done < /query.txt
else
echo "[ERROR] No Elastic IPs available in this region"
exit 1
fi
INSTANCE_IP=`ifconfig -a | grep inet | awk {'print $2'} | sed 's/addr://g' | head -1`
ASSIGNED=$(aws ec2 describe-addresses --region $Region --output text | grep $INSTANCE_IP | wc -l)
if [ "$ASSIGNED" -eq 1 ]; then
echo "EIP successfully assigned."
else
#Retry
while [ "$ASSIGNED" -eq "$ZERO" ]
do
sleep 3
request_eip
INSTANCE_IP=`ifconfig -a | grep inet | awk {'print $2'} | sed 's/addr://g' | head -1`
ASSIGNED=$(aws ec2 describe-addresses --region $Region --output text | grep $INSTANCE_IP | wc -l)
done
fi
fi
echo "${FUNCNAME[0]} Ended"
}
function call_request_eip() {
Region=`curl http://169.254.169.254/latest/meta-data/placement/availability-zone | rev | cut -c 2- | rev`
ZERO=0
INSTANCE_IP=`ifconfig -a | grep inet | awk {'print $2'} | sed 's/addr://g' | head -1`
ASSIGNED=$(aws ec2 describe-addresses --region $Region --output text | grep $INSTANCE_IP | wc -l)
if [ "$ASSIGNED" -gt "$ZERO" ]; then
echo "Already assigned an EIP."
else
WAIT=$(shuf -i 1-30 -n 1)
sleep "$WAIT"
request_eip
fi
echo "${FUNCNAME[0]} Ended"
}
function prevent_process_snooping() {
# Prevent bastion host users from viewing processes owned by other users.
mount -o remount,rw,hidepid=2 /proc
awk '!/proc/' /etc/fstab > temp && mv temp /etc/fstab
echo "proc /proc proc defaults,hidepid=2 0 0" >> /etc/fstab
echo "${FUNCNAME[0]} Ended"
}
##################################### End Function Definitions
# Call checkos to ensure platform is Linux
checkos
## set an initial value
SSH_BANNER="LINUX BASTION"
# Read the options from cli input
TEMP=`getopt -o h: --long help,banner:,enable:,tcp-forwarding:,x11-forwarding: -n $0 -- "$@"`
eval set -- "$TEMP"
if [ $# == 1 ] ; then echo "No input provided! type ($0 --help) to see usage help" >&2 ; exit 1 ; fi
# extract options and their arguments into variables.
while true; do
case "$1" in
-h | --help)
usage
exit 1
;;
--banner)
BANNER_PATH="$2";
shift 2
;;
--enable)
ENABLE="$2";
shift 2
;;
--tcp-forwarding)
TCP_FORWARDING="$2";
shift 2
;;
--x11-forwarding)
X11_FORWARDING="$2";
shift 2
;;
--)
break
;;
*)
break
;;
esac
done
# BANNER CONFIGURATION
BANNER_FILE="/etc/ssh_banner"
if [[ $ENABLE == "true" ]];then
if [ -z ${BANNER_PATH} ];then
echo "BANNER_PATH is null skipping ..."
else
echo "BANNER_PATH = ${BANNER_PATH}"
echo "Creating Banner in ${BANNER_FILE}"
echo "curl -s ${BANNER_PATH} > ${BANNER_FILE}"
curl -s ${BANNER_PATH} > ${BANNER_FILE}
if [ $BANNER_FILE ] ;then
echo "[INFO] Installing banner ... "
echo -e "\n Banner ${BANNER_FILE}" >>/etc/ssh/sshd_config
else
echo "[INFO] banner file is not accessible skipping ..."
exit 1;
fi
fi
else
echo "Banner message is not enabled!"
fi
# LOGGING CONFIGURATION
declare -rx BASTION_MNT="/var/log/bastion"
declare -rx BASTION_LOG="bastion.log"
echo "Setting up bastion session log in ${BASTION_MNT}/${BASTION_LOG}"
mkdir -p ${BASTION_MNT}
declare -rx BASTION_LOGFILE="${BASTION_MNT}/${BASTION_LOG}"
declare -rx BASTION_LOGFILE_SHADOW="${BASTION_MNT}/.${BASTION_LOG}"
touch ${BASTION_LOGFILE}
ln ${BASTION_LOGFILE} ${BASTION_LOGFILE_SHADOW}
#Enable/Disable TCP forwarding
TCP_FORWARDING=`echo "$TCP_FORWARDING" | sed 's/\\n//g'`
#Enable/Disable X11 forwarding
X11_FORWARDING=`echo "$X11_FORWARDING" | sed 's/\\n//g'`
echo "Value of TCP_FORWARDING - $TCP_FORWARDING"
echo "Value of X11_FORWARDING - $X11_FORWARDING"
if [[ $TCP_FORWARDING == "false" ]];then
awk '!/AllowTcpForwarding/' /etc/ssh/sshd_config > temp && mv temp /etc/ssh/sshd_config
echo "AllowTcpForwarding no" >> /etc/ssh/sshd_config
harden_ssh_security
fi
if [[ $X11_FORWARDING == "false" ]];then
awk '!/X11Forwarding/' /etc/ssh/sshd_config > temp && mv temp /etc/ssh/sshd_config
echo "X11Forwarding no" >> /etc/ssh/sshd_config
fi
release=$(osrelease)
# Ubuntu Linux
if [ "$release" == "Ubuntu" ]; then
#Call function for Ubuntu
ubuntu_os
# AMZN Linux
elif [ "$release" == "AMZN" ]; then
#Call function for AMZN
amazon_os
# CentOS Linux
elif [ "$release" == "CentOS" ]; then
#Call function for CentOS
cent_os
else
echo "[ERROR] Unsupported Linux Bastion OS"
exit 1
fi
prevent_process_snooping
call_request_eip
echo "Bootstrap complete."
|
/**
Copyright [2013] [Mushroom]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
Notice : this source is extracted from Hadoop metric2 package
and some source code may changed by zavakid
*/
package com.zavakid.mushroom.impl;
import java.io.StringWriter;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Timer;
import java.util.TimerTask;
import javax.management.ObjectName;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.math.util.MathUtils;
import com.zavakid.mushroom.MetricsBuilder;
import com.zavakid.mushroom.MetricsException;
import com.zavakid.mushroom.MetricsFilter;
import com.zavakid.mushroom.MetricsRecordBuilder;
import com.zavakid.mushroom.MetricsSink;
import com.zavakid.mushroom.MetricsSource;
import com.zavakid.mushroom.MetricsSystem;
import com.zavakid.mushroom.MetricsTag;
import com.zavakid.mushroom.lib.MetricMutableCounterLong;
import com.zavakid.mushroom.lib.MetricMutableStat;
import com.zavakid.mushroom.util.Contracts;
import com.zavakid.mushroom.util.MBeans;
/**
* A base class for metrics system singletons
*
* @author Hadoop metric2 package's authors
* @since 0.1
*/
public class MetricsSystemImpl implements MetricsSystem {
private static final Log LOG = LogFactory.getLog(MetricsSystemImpl.class);
static final String MS_CONTEXT = "metricssystem";
static final String NUM_SOURCES_KEY = "num_sources";
static final String NUM_SOURCES_DESC = "Number of metrics sources";
static final String NUM_SINKS_KEY = "num_sinks";
static final String NUM_SINKS_DESC = "Number of metrics sinks";
static final String MS_NAME = "MetricsSystem";
static final String MS_STATS_NAME = MS_NAME + ",sub=Stats";
static final String MS_STATS_DESC = "Metrics system metrics";
static final String MS_CONTROL_NAME = MS_NAME + ",sub=Control";
private final Map<String, MetricsSourceAdapter> sources;
private final Map<String, MetricsSinkAdapter> sinks;
private final List<Callback> callbacks;
private final MetricsBuilderImpl metricsBuilder;
private final MetricMutableStat snapshotStat = new MetricMutableStat("snapshot",
"snapshot stats", "ops",
"time", true);
private final MetricMutableStat publishStat = new MetricMutableStat("publish",
"publishing stats",
"ops", "time", true);
private final MetricMutableCounterLong dropStat = new MetricMutableCounterLong(
"dropped_pub_all",
"number of dropped updates by all sinks",
0L);
private final List<MetricsTag> injectedTags;
// Things that are changed by init()/start()/stop()
private String prefix;
private MetricsFilter sourceFilter;
private MetricsConfig config;
private Map<String, MetricsConfig> sourceConfigs, sinkConfigs;
private boolean monitoring = false;
private Timer timer;
private int period; // seconds
private long logicalTime; // number of timer invocations * period
private ObjectName mbeanName;
private boolean publishSelfMetrics = true;
private MetricsSourceAdapter sysSource;
/**
* Construct the metrics system
*
* @param prefix for the system
*/
public MetricsSystemImpl(String prefix){
this.prefix = prefix;
sources = new LinkedHashMap<String, MetricsSourceAdapter>();
sinks = new LinkedHashMap<String, MetricsSinkAdapter>();
sourceConfigs = new HashMap<String, MetricsConfig>();
sinkConfigs = new HashMap<String, MetricsConfig>();
callbacks = new ArrayList<Callback>();
injectedTags = new ArrayList<MetricsTag>();
metricsBuilder = new MetricsBuilderImpl();
if (prefix != null) {
// prefix could be null for default ctor, which requires init later
initSystemMBean();
}
}
/**
* Construct the system but not initializing (read config etc.) it.
*/
public MetricsSystemImpl(){
this(null);
}
/**
* Initialized the metrics system with a prefix.
*
* @param prefix the system will look for configs with the prefix
*/
public synchronized void init(String prefix) {
if (monitoring) {
LOG.warn(this.prefix + " metrics system already initialized!");
return;
}
this.prefix = Contracts.checkNotNull(prefix, "prefix");
try {
start();
} catch (MetricsConfigException e) {
// Usually because hadoop-metrics2.properties is missing
// We can always start the metrics system later via JMX.
LOG.warn("Metrics system not started: " + e.getMessage());
LOG.debug("Stacktrace: ", e);
}
initSystemMBean();
}
@Override
public synchronized void start() {
Contracts.checkNotNull(prefix, "prefix");
if (monitoring) {
LOG.warn(prefix + " metrics system already started!", new MetricsException("Illegal start"));
return;
}
for (Callback cb : callbacks)
cb.preStart();
configure(prefix);
startTimer();
monitoring = true;
LOG.info(prefix + " metrics system started");
for (Callback cb : callbacks)
cb.postStart();
}
@Override
public synchronized void stop() {
if (!monitoring) {
LOG.warn(prefix + " metrics system not yet started!", new MetricsException("Illegal stop"));
return;
}
for (Callback cb : callbacks)
cb.preStop();
LOG.info("Stopping " + prefix + " metrics system...");
stopTimer();
stopSources();
stopSinks();
clearConfigs();
monitoring = false;
LOG.info(prefix + " metrics system stopped.");
for (Callback cb : callbacks)
cb.postStop();
}
@Override
public synchronized <T extends MetricsSource> T register(final String name, final String desc, final T source) {
if (monitoring) {
registerSource(name, desc, source);
}
// We want to re-register the source to pick up new config when the
// metrics system restarts.
register(new AbstractCallback() {
@Override
public void postStart() {
registerSource(name, desc, source);
}
});
return source;
}
@Override
public synchronized <T extends MetricsSource> T registerIfAbsent(final String name, final String desc,
final T source) {
MetricsSource oldSource = null;
if (monitoring) {
oldSource = registerSource(name, desc, source);
}
// the source is the oldSource, means that the source register success
if (source == oldSource) {
register(new AbstractCallback() {
@Override
public void postStart() {
registerSource(name, desc, source);
}
});
}
return (T) oldSource;
}
synchronized MetricsSource registerSource(String name, String desc, MetricsSource source) {
Contracts.checkNotNull(config, "config");
MetricsSourceAdapter sa = sources.get(name);
if (sa != null) {
LOG.warn("Source name " + name + " already exists!");
return sa.source();
}
MetricsConfig conf = sourceConfigs.get(name);
sa = conf != null ? new MetricsSourceAdapter(prefix, name, desc, source, injectedTags, period, conf) : new MetricsSourceAdapter(
prefix,
name,
desc,
source,
injectedTags,
period,
config.subset(MetricsConfig.SOURCE_KEY));
sources.put(name, sa);
sa.start();
LOG.debug("Registered source " + name);
return sa.source();
}
@Override
public synchronized <T extends MetricsSink> T register(final String name, final String description, final T sink) {
if (config != null) {
registerSink(name, description, sink);
}
// We want to re-register the sink to pick up new config
// when the metrics system restarts.
register(new AbstractCallback() {
@Override
public void postStart() {
registerSink(name, description, sink);
}
});
return sink;
}
@Override
public synchronized <T extends MetricsSink> T registerIfAbsent(final String name, final String desc, final T sink) {
MetricsSink oldSink = null;
if (monitoring) {
oldSink = registerSink(name, desc, sink);
}
// the sink is the oldSource, means that the source register success
if (sink == oldSink) {
register(new AbstractCallback() {
@Override
public void postStart() {
registerSink(name, desc, sink);
}
});
}
return (T) oldSink;
}
synchronized MetricsSink registerSink(String name, String desc, MetricsSink sink) {
Contracts.checkNotNull(config, "config");
MetricsSinkAdapter sa = sinks.get(name);
if (sa != null) {
LOG.warn("Sink name " + name + " already exists!");
return sa.sink();
}
MetricsConfig conf = sinkConfigs.get(name);
sa = conf != null ? newSink(name, desc, sink, conf) : newSink(name, desc, sink,
config.subset(MetricsConfig.SINK_KEY));
sinks.put(name, sa);
sa.start();
LOG.debug("Registered sink " + name);
return sa.sink();
}
@Override
public synchronized void register(final Callback callback) {
callbacks.add((Callback) Proxy.newProxyInstance(callback.getClass().getClassLoader(),
new Class<?>[] { Callback.class }, new InvocationHandler() {
public Object invoke(Object proxy, Method method,
Object[] args) throws Throwable {
try {
return method.invoke(callback, args);
} catch (Exception e) {
LOG.warn("Caught exception in callback "
+ method.getName(), e);
}
return null;
}
}));
}
@Override
public synchronized void refreshMBeans() {
for (MetricsSourceAdapter sa : sources.values()) {
sa.refreshMBean();
}
}
@Override
public synchronized String currentConfig() {
PropertiesConfiguration saver = new PropertiesConfiguration();
StringWriter writer = new StringWriter();
saver.copy(config);
try {
saver.save(writer);
} catch (Exception e) {
throw new MetricsConfigException("Error stringify config", e);
}
return writer.toString();
}
private synchronized void startTimer() {
if (timer != null) {
LOG.warn(prefix + " metrics system timer already started!");
return;
}
logicalTime = 0;
long millis = period * 1000;
timer = new Timer("Timer for '" + prefix + "' metrics system", true);
timer.scheduleAtFixedRate(new TimerTask() {
public void run() {
try {
onTimerEvent();
} catch (Exception e) {
LOG.warn(e);
}
}
}, millis, millis);
LOG.info("Scheduled snapshot period at " + period + " second(s).");
}
synchronized void onTimerEvent() {
logicalTime += period;
if (sinks.size() > 0) {
publishMetrics(snapshotMetrics());
}
}
/**
* snapshot all the sources for a snapshot of metrics/tags
*
* @return the metrics buffer containing the snapshot
*/
synchronized MetricsBuffer snapshotMetrics() {
metricsBuilder.clear();
MetricsBufferBuilder bufferBuilder = new MetricsBufferBuilder();
for (Entry<String, MetricsSourceAdapter> entry : sources.entrySet()) {
if (sourceFilter == null || sourceFilter.accepts(entry.getKey())) {
snapshotMetrics(entry.getValue(), bufferBuilder);
}
}
if (publishSelfMetrics) {
snapshotMetrics(sysSource, bufferBuilder);
}
MetricsBuffer buffer = bufferBuilder.get();
return buffer;
}
private void snapshotMetrics(MetricsSourceAdapter sa, MetricsBufferBuilder bufferBuilder) {
long startTime = System.currentTimeMillis();
bufferBuilder.add(sa.name(), sa.getMetrics(metricsBuilder, false));
metricsBuilder.clear();
snapshotStat.add(System.currentTimeMillis() - startTime);
LOG.debug("Snapshotted source " + sa.name());
}
/**
* Publish a metrics snapshot to all the sinks
*
* @param buffer the metrics snapshot to publish
*/
synchronized void publishMetrics(MetricsBuffer buffer) {
int dropped = 0;
for (MetricsSinkAdapter sa : sinks.values()) {
long startTime = System.currentTimeMillis();
dropped += sa.putMetrics(buffer, logicalTime) ? 0 : 1;
publishStat.add(System.currentTimeMillis() - startTime);
}
dropStat.incr(dropped);
}
private synchronized void stopTimer() {
if (timer == null) {
LOG.warn(prefix + " metrics system timer already stopped!");
return;
}
timer.cancel();
timer = null;
}
private synchronized void stopSources() {
for (Entry<String, MetricsSourceAdapter> entry : sources.entrySet()) {
MetricsSourceAdapter sa = entry.getValue();
LOG.info("Stopping metrics source " + entry.getKey() + "(" + sa.source().getClass().getName() + ")");
sa.stop();
}
sysSource.stop();
sources.clear();
}
private synchronized void stopSinks() {
for (Entry<String, MetricsSinkAdapter> entry : sinks.entrySet()) {
MetricsSinkAdapter sa = entry.getValue();
LOG.info("Stopping metrics sink " + entry.getKey() + "(" + sa.sink().getClass().getName() + ")");
sa.stop();
}
sinks.clear();
}
private synchronized void configure(String prefix) {
config = MetricsConfig.create(prefix);
configureSinks();
configureSources();
configureSystem();
}
private synchronized void configureSystem() {
injectedTags.add(new MetricsTag("hostName", "Local hostname", getHostname()));
}
private synchronized void configureSinks() {
sinkConfigs = config.getInstanceConfigs(MetricsConfig.SINK_KEY);
int confPeriod = 0;
for (Entry<String, MetricsConfig> entry : sinkConfigs.entrySet()) {
MetricsConfig conf = entry.getValue();
int sinkPeriod = conf.getInt(MetricsConfig.PERIOD_KEY, MetricsConfig.PERIOD_DEFAULT);
confPeriod = confPeriod == 0 ? sinkPeriod : MathUtils.gcd(confPeriod, sinkPeriod);
String sinkName = entry.getKey();
LOG.debug("sink " + sinkName + " config:\n" + conf);
try {
MetricsSinkAdapter sa = newSink(sinkName, conf.getString(MetricsConfig.DESC_KEY, sinkName), conf);
// we allow config of later registered sinks
if (sa != null) {
sa.start();
sinks.put(sinkName, sa);
}
} catch (Exception e) {
LOG.warn("Error creating " + sinkName, e);
}
}
period = confPeriod > 0 ? confPeriod : config.getInt(MetricsConfig.PERIOD_KEY, MetricsConfig.PERIOD_DEFAULT);
}
static MetricsSinkAdapter newSink(String name, String desc, MetricsSink sink, MetricsConfig conf) {
return new MetricsSinkAdapter(name, desc, sink, conf.getString(MetricsConfig.CONTEXT_KEY),
conf.getFilter(MetricsConfig.SOURCE_FILTER_KEY),
conf.getFilter(MetricsConfig.RECORD_FILTER_KEY),
conf.getFilter(MetricsConfig.METRIC_FILTER_KEY),
conf.getInt(MetricsConfig.PERIOD_KEY, MetricsConfig.PERIOD_DEFAULT),
conf.getInt(MetricsConfig.QUEUE_CAPACITY_KEY,
MetricsConfig.QUEUE_CAPACITY_DEFAULT),
conf.getInt(MetricsConfig.RETRY_DELAY_KEY, MetricsConfig.RETRY_DELAY_DEFAULT),
conf.getFloat(MetricsConfig.RETRY_BACKOFF_KEY,
MetricsConfig.RETRY_BACKOFF_DEFAULT),
conf.getInt(MetricsConfig.RETRY_COUNT_KEY, MetricsConfig.RETRY_COUNT_DEFAULT));
}
static MetricsSinkAdapter newSink(String name, String desc, MetricsConfig conf) {
MetricsSink sink = conf.getPlugin("");
if (sink == null) return null;
return newSink(name, desc, sink, conf);
}
private void configureSources() {
sourceFilter = config.getFilter(MetricsConfig.PREFIX_DEFAULT + MetricsConfig.SOURCE_FILTER_KEY);
Map<String, MetricsConfig> confs = config.getInstanceConfigs(MetricsConfig.SOURCE_KEY);
for (Entry<String, MetricsConfig> entry : confs.entrySet()) {
sourceConfigs.put(entry.getKey(), entry.getValue());
}
registerSystemSource();
}
private void clearConfigs() {
sinkConfigs.clear();
sourceConfigs.clear();
injectedTags.clear();
config = null;
}
static String getHostname() {
try {
return InetAddress.getLocalHost().getHostName();
} catch (Exception e) {
LOG.error("Error getting localhost name. Using 'localhost'...", e);
}
return "localhost";
}
private void registerSystemSource() {
sysSource = new MetricsSourceAdapter(prefix, MS_STATS_NAME, MS_STATS_DESC, new MetricsSource() {
@Override
public void getMetrics(MetricsBuilder builder, boolean all) {
int numSources, numSinks;
synchronized (MetricsSystemImpl.this) {
numSources = sources.size();
numSinks = sinks.size();
}
MetricsRecordBuilder rb = builder.addRecord(MS_NAME).setContext(MS_CONTEXT)
.addGauge(NUM_SOURCES_KEY, NUM_SOURCES_DESC, numSources)
.addGauge(NUM_SINKS_KEY, NUM_SINKS_DESC, numSinks);
synchronized (MetricsSystemImpl.this) {
for (MetricsSinkAdapter sa : sinks.values()) {
sa.snapshot(rb, all);
}
}
snapshotStat.snapshot(rb, all);
publishStat.snapshot(rb, all);
dropStat.snapshot(rb, all);
}
}, injectedTags, null, null, period);
sysSource.start();
}
private void initSystemMBean() {
Contracts.checkNotNull(prefix, "prefix should not be null here!");
mbeanName = MBeans.register(prefix, MS_CONTROL_NAME, this);
}
@Override
public synchronized void shutdown() {
if (monitoring) {
try {
stop();
} catch (Exception e) {
LOG.warn("Error stopping the metrics system", e);
}
}
MBeans.unregister(mbeanName);
}
}
|
import React from 'react'
import {connect} from 'react-redux'
import {addToCartThunk, makeCartThunk} from '../store/cart'
const addToCartButton = props => {
return (
<form
onSubmit={event => {
event.preventDefault()
props.addToCart(props.id, event.target.quantity.value, props.cartId)
}}
>
<input name="quantity" type="number" min="1" defaultValue="1" />
<button id={props.id} type="submit">
Add To Cart
</button>
</form>
)
}
const mapStateToProps = state => ({
cartId: state.cart.id
})
const mapDispatchToProps = dispatch => {
return {
addToCart: (productId, quantity, cartId) => {
cartId
? dispatch(addToCartThunk(productId, cartId, quantity))
: dispatch(makeCartThunk(productId, quantity))
}
}
}
export default connect(mapStateToProps, mapDispatchToProps)(addToCartButton)
|
package io.sjitech.demo.service;
import io.sjitech.demo.exception.AppException;
import io.sjitech.demo.exception.MijinException;
import io.sjitech.demo.model.*;
import io.sjitech.demo.util.CommonUtil;
import io.sjitech.demo.util.MijinUtil;
import io.sjitech.demo.util.NisExtendApiId;
import org.nem.core.connect.HttpJsonPostRequest;
import org.nem.core.connect.client.NisApiId;
import org.nem.core.messages.PlainMessage;
import org.nem.core.model.*;
import org.nem.core.model.mosaic.*;
import org.nem.core.model.namespace.Namespace;
import org.nem.core.model.namespace.NamespaceId;
import org.nem.core.model.namespace.NamespaceIdPart;
import org.nem.core.model.ncc.MosaicDefinitionMetaDataPair;
import org.nem.core.model.ncc.MosaicIdSupplyPair;
import org.nem.core.model.ncc.NemAnnounceResult;
import org.nem.core.model.ncc.RequestAnnounce;
import org.nem.core.model.primitive.Amount;
import org.nem.core.model.primitive.Quantity;
import org.nem.core.model.primitive.Supply;
import org.nem.core.serialization.BinarySerializer;
import org.nem.core.serialization.Deserializer;
import org.nem.core.time.TimeInstant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
import java.util.function.Supplier;
/**
* Created by wang on 2016/07/27.
*/
@Service
public class MosaicService {
private static final Logger log = LoggerFactory.getLogger(MosaicService.class);
@Autowired
private MijinUtil mijinUtil;
public MijinNamespace getNamespace(String name) {
log.info("get namespace {} data from mijin", name);
return getNamespaceData(name);
}
private MijinNamespace getNamespaceData(String namespaceFullName) {
final CompletableFuture<Deserializer> future = mijinUtil.getConnector().getAsync(
mijinUtil.getMijinNodeEndpoint(),
NisExtendApiId.NIS_REST_NAMESPACE,
"namespace=" + namespaceFullName
);
MijinNamespace namespace = new MijinNamespace();
future.thenAccept(d -> {
log.info("--------------------------------------------------------------------------------");
namespace.setFqn(d.readString("fqn"));
namespace.setOwner(d.readString("owner"));
namespace.setHeight(d.readInt("height"));
log.info(String.format("\n{\n\t\"fqn\":\"%s\",\n\t\"owner\":\"%s\",\n\t\"height\":\"%s\"\n}\n",
namespace.getFqn(),
namespace.getOwner(),
namespace.getHeight()
));
}).exceptionally(e -> {
log.warn(String.format("could not get %s data by %s , reason: %s",
namespaceFullName,
NisExtendApiId.NIS_REST_NAMESPACE,
e.getMessage()), e);
return null;
}).join();
return namespace;
}
public MijinMosaic getMosaic(String namespaceId, String mosaicId) {
log.info("get namespace {} mosaic {} data from mijin", namespaceId, mosaicId);
return getMosaicDefinition(namespaceId, mosaicId);
}
private MijinMosaic getMosaicDefinition(String namespaceId, String mosaicId) {
final CompletableFuture<Deserializer> future = mijinUtil.getConnector().getAsync(
mijinUtil.getMijinNodeEndpoint(),
NisExtendApiId.NIS_REST_MOSAIC_DEFINITION_PAGE,
"namespace=" + namespaceId
);
MijinMosaic mosaic = new MijinMosaic();
future.thenAccept(d -> {
log.info("get mosaic definitions successfully! ");
final List<MosaicDefinitionMetaDataPair> metaDataPairList =
d.readOptionalObjectArray("data", MosaicDefinitionMetaDataPair::new);
if (metaDataPairList != null && metaDataPairList.size() > 0) {
metaDataPairList.stream()
.filter(pair -> mosaicId.equals(pair.getEntity().getId().getName()))
.forEach(pair -> {
log.info("--------------------------------------------------------------------------------");
mosaic.setNamespaceId(namespaceId);
mosaic.setName(mosaicId);
mosaic.setCreator(pair.getEntity().getCreator().getAddress().toString());
mosaic.setId(pair.getMetaData().getId());
mosaic.setDescription(CommonUtil.urlDecode(pair.getEntity().getDescriptor().toString()));
mosaic.setDivisibility(pair.getEntity().getProperties().getDivisibility());
mosaic.setInitialSupply(pair.getEntity().getProperties().getInitialSupply());
mosaic.setSupplyMutable(pair.getEntity().getProperties().isSupplyMutable());
mosaic.setTransferable(pair.getEntity().getProperties().isTransferable());
mosaic.setHasLevy(pair.getEntity().isMosaicLevyPresent());
if (pair.getEntity().isMosaicLevyPresent()) {
mosaic.setLevyType(pair.getEntity().getMosaicLevy().getType());
mosaic.setRecipientAddress(pair.getEntity().getMosaicLevy().getRecipient().getAddress().toString());
mosaic.setLevyFee(pair.getEntity().getMosaicLevy().getFee().getRaw());
}
log.info(String.format("\n{\n\t\"namespace\":\"%s\",\n\t\"mosaic\":\"%s\",\n\t\"initial supply\":%d,\n\t\"divisibility\":%d\n}\n",
mosaic.getNamespaceId(),
mosaic.getName(),
mosaic.getInitialSupply(),
mosaic.getDivisibility()
));
});
}
if (mosaic.getCreator() == null) {
log.warn("mosaic {} definition can not be found in namespace {}", mosaicId, namespaceId);
}
}).exceptionally(e -> {
log.warn(String.format("could not get %s*%s definition from %s , reason: %s",
namespaceId,
mosaicId,
NisExtendApiId.NIS_REST_MOSAIC_DEFINITION_PAGE,
e.getMessage()), e);
return null;
}).join();
return mosaic;
}
public TransactionResult sendMosaics(Account sender, TransferParameter parameter) {
if (parameter == null || parameter.getMosaics() == null || parameter.getMosaics().isEmpty()) {
throw new AppException(String.format("mosaic data can not be empty"));
}
Supplier<TransferTransaction> transactionSupplier = () -> {
final Account recipient = new Account(Address.fromEncoded(parameter.getAddress()));
TransferTransactionAttachment attachment = new TransferTransactionAttachment();
if (parameter.getMessage() != null) {
PlainMessage plainMessage = new PlainMessage(parameter.getMessage().getBytes());
attachment.setMessage(plainMessage);
}
parameter.getMosaics().forEach(mosaicData -> {
MosaicId mosaicId = new MosaicId(new NamespaceId(mosaicData.getNamespace()), mosaicData.getMosaic());
Mosaic mosaic = new Mosaic(mosaicId, Quantity.fromValue(mosaicData.getQuantity()));
attachment.addMosaic(mosaic);
});
// prepare transaction data and sign it
final int version = 2;
final TimeInstant timeInstant = mijinUtil.getTimeProvider().getCurrentTime();
final TransferTransaction transaction = new TransferTransaction(
version, // version
timeInstant, // time instant
sender, // sender
recipient, // recipient
Amount.fromNem(parameter.getAmount()), // amount in xem
attachment); // attachment (message, mosaics)
TransactionFeeCalculator calculator = new DefaultTransactionFeeCalculator(
id -> {
for (MosaicParameter mosaicData : parameter.getMosaics()) {
MosaicId mosaicId = new MosaicId(new NamespaceId(mosaicData.getNamespace()), mosaicData.getMosaic());
if (id.equals(mosaicId)) {
MosaicDefinition mosaicDefinition = retrieveMosaicDefinition(mosaicId);
return new MosaicFeeInformation(
Supply.fromValue(mosaicDefinition.getProperties().getInitialSupply()),
mosaicDefinition.getProperties().getDivisibility());
}
}
return null;
},
() -> mijinUtil.getNewFeeApplyForkHeight(),
mijinUtil.getNewFeeApplyForkHeight());
transaction.setFee(calculator.calculateMinimumFee(transaction));
log.info("mosaic transfer fee is {}", transaction.getFee().getNumNem());
transaction.setDeadline(timeInstant.addHours(23));
transaction.sign();
return transaction;
};
return mijinUtil.postTransactionAnnounce(sender, transactionSupplier, (deserializer -> {
TransactionResult transactionResult = new TransactionResult();
final NemAnnounceResult result = new NemAnnounceResult(deserializer);
transactionResult.setCode(result.getCode());
transactionResult.setType(result.getType());
transactionResult.setMessage(result.getMessage());
if (result.getCode() == 1) {
log.info(String.format("successfully send mosaics from %s to %s",
sender.getAddress(),
parameter.getAddress()));
transactionResult.setSuccess(true);
transactionResult.setTransactionHash("" + result.getTransactionHash());
transactionResult.setInnerTransactionHash("" + result.getInnerTransactionHash());
} else {
log.warn(String.format("could not send mosaics from %s to %s, reason: %s",
sender.getAddress(),
parameter.getAddress(),
result.getMessage()));
}
return transactionResult;
}));
}
public TransactionResult transferMosaic(Account sender, String recipientAddress,
String namespaceName, String mosaicName, long amount) {
TransactionResult result = null;
Account recipient = new Account(Address.fromEncoded(recipientAddress));
MijinMosaic mosaicDef = getMosaic(namespaceName, mosaicName);
if (mosaicDef == null || mosaicDef.getCreator() == null) {
throw new MijinException(
String.format("can not get mosaic %s definition within namespace %s", mosaicName, namespaceName));
}
final MosaicId mosaicId = new MosaicId(new NamespaceId(namespaceName), mosaicName);
final Mosaic mosaic = new Mosaic(mosaicId, Quantity.fromValue(amount));
final TransferTransactionAttachment attachment = new TransferTransactionAttachment();
attachment.addMosaic(mosaic);
return transferMosaic(sender, recipient, 1, attachment, mosaic, mosaicDef);
}
private TransactionResult transferMosaic(Account sender, Account recipient,
long amountOfXem,
TransferTransactionAttachment attachment,
Mosaic mosaic,
MijinMosaic mosaicDef) {
if (attachment == null || attachment.getMosaics() == null || attachment.getMosaics().isEmpty()) {
throw new AppException(String.format("mosaic %s*%s attachment can not be empty",
mosaicDef.getNamespaceId(), mosaicDef.getName()));
}
// prepare transaction data and sign it
int version = 2;
final TimeInstant timeInstant = mijinUtil.getTimeProvider().getCurrentTime();
final TransferTransaction transaction = new TransferTransaction(
version, // version
timeInstant, // time instant
sender, // sender
recipient, // recipient
Amount.fromNem(amountOfXem), // amount in micro xem
attachment); // attachment (message, mosaics)
final MosaicFeeInformation feeInfo =
new MosaicFeeInformation(Supply.fromValue(mosaicDef.getInitialSupply()), mosaicDef.getDivisibility());
TransactionFeeCalculator calculator = new DefaultTransactionFeeCalculator(
id -> feeInfo,
() -> mijinUtil.getNewFeeApplyForkHeight(),
mijinUtil.getNewFeeApplyForkHeight());
transaction.setFee(calculator.calculateMinimumFee(transaction));
transaction.setDeadline(timeInstant.addHours(23));
transaction.sign();
// send transaction and get result
final byte[] data = BinarySerializer.serializeToBytes(transaction.asNonVerifiable());
final RequestAnnounce request = new RequestAnnounce(data, transaction.getSignature().getBytes());
final CompletableFuture<Deserializer> future = mijinUtil.getConnector().postAsync(
mijinUtil.getMijinNodeEndpoint(),
NisApiId.NIS_REST_TRANSACTION_ANNOUNCE,
new HttpJsonPostRequest(request));
TransactionResult transactionResult = new TransactionResult();
future.thenAccept(d -> {
final NemAnnounceResult result = new NemAnnounceResult(d);
transactionResult.setCode(result.getCode());
transactionResult.setType(result.getType());
transactionResult.setMessage(result.getMessage());
if (result.getCode() == 1) {
log.info(String.format("successfully send %d %s*%s from %s to %s",
mosaic.getQuantity().getRaw(),
mosaicDef.getNamespaceId(),
mosaicDef.getName(),
sender.getAddress(),
recipient.getAddress()));
transactionResult.setSuccess(true);
transactionResult.setTransactionHash("" + result.getTransactionHash());
transactionResult.setInnerTransactionHash("" + result.getInnerTransactionHash());
} else {
log.warn(String.format("could not send %s*%s from %s to %s, reason: %s",
mosaicDef.getNamespaceId(),
mosaicDef.getName(),
sender.getAddress(),
recipient.getAddress(),
result.getMessage()));
}
}).exceptionally(e -> {
log.warn(String.format("could not send %s*%s from %s to %s, reason: %s",
mosaicDef.getNamespaceId(),
mosaicDef.getName(),
sender.getAddress(),
recipient.getAddress(),
e.getMessage()), e);
return null;
}).join();
return transactionResult;
}
public Namespace getNamespace(final NamespaceId id) {
final CompletableFuture<Deserializer> future = mijinUtil.getConnector().getAsync(
mijinUtil.getMijinNodeEndpoint(),
NisExtendApiId.NIS_REST_NAMESPACE,
String.format("namespace=%s", id.toString()));
final Deserializer deserializer = future.join();
return new Namespace(deserializer);
}
public TransactionResult createRootNamespace(final Account sender, final String rootNamespaceName) {
final NamespaceIdPart namespaceIdPart = new NamespaceIdPart(rootNamespaceName);
return createNamespace(sender, namespaceIdPart, null);
}
public TransactionResult createNamespace(final Account sender,
final NamespaceIdPart newPart,
final NamespaceId parent) {
Supplier<ProvisionNamespaceTransaction> transactionSupplier = () -> {
// prepare transaction data and sign it
final TimeInstant timeInstant = mijinUtil.getTimeProvider().getCurrentTime();
final ProvisionNamespaceTransaction transaction = new ProvisionNamespaceTransaction(
timeInstant,
sender,
newPart,
parent);
// transaction.setFee(Amount.fromNem(108));
transaction.setDeadline(timeInstant.addHours(23));
transaction.sign();
return transaction;
};
return mijinUtil.postTransactionAnnounce(sender, transactionSupplier, (deserializer -> {
TransactionResult transactionResult = new TransactionResult();
final NemAnnounceResult result = new NemAnnounceResult(deserializer);
transactionResult.setCode(result.getCode());
transactionResult.setType(result.getType());
transactionResult.setMessage(result.getMessage());
if (result.getCode() == 1) {
log.info(String.format("successfully provisioned new namespace %s for owner %s",
null == parent ? newPart.toString() : parent.concat(newPart).toString(),
sender.getAddress()));
transactionResult.setSuccess(true);
transactionResult.setTransactionHash("" + result.getTransactionHash());
transactionResult.setInnerTransactionHash("" + result.getInnerTransactionHash());
} else {
log.warn(String.format("could not provisioned new namespace %s for owner %s, reason: %s",
null == parent ? newPart.toString() : parent.concat(newPart).toString(),
sender.getAddress(),
result.getMessage()));
}
return transactionResult;
}));
}
public MijinMosaic retrieveMosaicDefinition(String namespaceName, String mosaicName) {
final NamespaceId namespaceId = new NamespaceId(namespaceName);
final MosaicId mosaicId = new MosaicId(namespaceId, mosaicName);
MijinMosaic mosaic = new MijinMosaic();
MosaicDefinition definition = retrieveMosaicDefinition(mosaicId);
mosaic.setNamespaceId(definition.getId().getNamespaceId().toString());
mosaic.setName(definition.getId().getName());
mosaic.setCreator(definition.getCreator().getAddress().toString());
mosaic.setDescription(CommonUtil.urlDecode(definition.getDescriptor().toString()));
mosaic.setDivisibility(definition.getProperties().getDivisibility());
mosaic.setInitialSupply(definition.getProperties().getInitialSupply());
mosaic.setSupplyMutable(definition.getProperties().isSupplyMutable());
mosaic.setTransferable(definition.getProperties().isTransferable());
mosaic.setHasLevy(definition.isMosaicLevyPresent());
if (definition.isMosaicLevyPresent()) {
mosaic.setLevyType(definition.getMosaicLevy().getType());
mosaic.setRecipientAddress(definition.getMosaicLevy().getRecipient().getAddress().toString());
mosaic.setLevyFee(definition.getMosaicLevy().getFee().getRaw());
}
return mosaic;
}
public MosaicDefinition retrieveMosaicDefinition(final MosaicId id) {
final CompletableFuture<Deserializer> future = mijinUtil.getConnector().getAsync(
mijinUtil.getMijinNodeEndpoint(),
NisExtendApiId.NIS_REST_MOSAIC_DEFINITION,
String.format("mosaicId=%s", CommonUtil.urlEncode(id.toString())));
final Deserializer deserializer = future.join();
return new MosaicDefinition(deserializer);
}
public TransactionResult createMosaic(final Account sender, final String namespaceName, final String mosaicName) {
final NamespaceId namespaceId = new NamespaceId(namespaceName);
Supplier<MosaicDefinitionCreationTransaction> transactionSupplier = () -> {
// prepare transaction data and sign it
final TimeInstant timeInstant = mijinUtil.getTimeProvider().getCurrentTime();
final MosaicDefinitionCreationTransaction transaction = new MosaicDefinitionCreationTransaction(
timeInstant,
sender,
createMosaicDefinition(sender, namespaceId, mosaicName));
// transaction.setFee(Amount.fromNem(108));
transaction.setDeadline(timeInstant.addHours(23));
transaction.sign();
return transaction;
};
return mijinUtil.postTransactionAnnounce(sender, transactionSupplier, (deserializer -> {
TransactionResult transactionResult = new TransactionResult();
final NemAnnounceResult result = new NemAnnounceResult(deserializer);
transactionResult.setCode(result.getCode());
transactionResult.setType(result.getType());
transactionResult.setMessage(result.getMessage());
if (result.getCode() == 1) {
log.info(String.format("successfully created mosaic definition %s*%s for owner %s",
namespaceId.toString(),
mosaicName,
sender.getAddress()));
transactionResult.setSuccess(true);
transactionResult.setTransactionHash("" + result.getTransactionHash());
transactionResult.setInnerTransactionHash("" + result.getInnerTransactionHash());
} else {
log.warn(String.format("could not create mosaic definition %s*%s for owner %s, reason: %s",
namespaceId.toString(),
mosaicName,
sender.getAddress(),
result.getMessage()));
}
return transactionResult;
}));
}
// The creator of a mosaic definition must always be the sender of the transaction that publishes the mosaic definition.
// The namespace in which the mosaic will reside is given by its id.
// The mosaic name within the namespace must be unique.
private MosaicDefinition createMosaicDefinition(
final Account creator,
final NamespaceId namespaceId,
final String mosaicName) {
final MosaicId mosaicId = new MosaicId(namespaceId, mosaicName);
final MosaicDescriptor descriptor =
new MosaicDescriptor(CommonUtil.urlEncode("provide a description for the mosaic here"));
// This shows how properties of the mosaic can be chosen.
// If no custom properties are supplied default values are taken.
final Properties properties = new Properties();
properties.put("initialSupply", Long.toString(1000000000));
properties.put("divisibility", Long.toString(3));
properties.put("supplyMutable", Boolean.toString(true));
properties.put("transferable", Boolean.toString(true));
final MosaicProperties mosaicProperties = new DefaultMosaicProperties(properties);
// Optionally a levy can be supplied. It is allowed to be null.
final MosaicLevy levy = new MosaicLevy(
MosaicTransferFeeType.Absolute, // levy specifies an absolute value
creator, // levy is send to the creator
MosaicConstants.MOSAIC_ID_XEM, // levy is paid in XEM
Quantity.fromValue(1000) // Each transfer of the mosaic will transfer 1000 micro XEM
// from the mosaic sender to the recipient (here the creator)
);
return new MosaicDefinition(creator, mosaicId, descriptor, mosaicProperties, null);
}
public MosaicIdSupplyPair retrieveMosaicSupply(final MosaicId id) {
final CompletableFuture<Deserializer> future = mijinUtil.getConnector().getAsync(
mijinUtil.getMijinNodeEndpoint(),
NisExtendApiId.NIS_REST_MOSAIC_SUPPLY,
String.format("mosaicId=%s", CommonUtil.urlEncode(id.toString())));
final Deserializer deserializer = future.join();
return new MosaicIdSupplyPair(deserializer);
}
public TransactionResult createMosaicSupply(final Account sender, String nsName,
String mosaicName, final long supplyDelta) {
final NamespaceId namespaceId = new NamespaceId(nsName);
final MosaicId mosaicId = new MosaicId(namespaceId, mosaicName);
Supplier<MosaicSupplyChangeTransaction> transactionSupplier = () -> {
// prepare transaction data and sign it
final TimeInstant timeInstant = mijinUtil.getTimeProvider().getCurrentTime();
final MosaicSupplyChangeTransaction transaction = new MosaicSupplyChangeTransaction(
timeInstant,
sender,
mosaicId,
MosaicSupplyType.Create, // increase supply
Supply.fromValue(supplyDelta)); // change in supply (always in whole units, not subunits)
// transaction.setFee(Amount.fromNem(108));
transaction.setDeadline(timeInstant.addHours(23));
transaction.sign();
return transaction;
};
return mijinUtil.postTransactionAnnounce(sender, transactionSupplier, (deserializer -> {
TransactionResult transactionResult = new TransactionResult();
final NemAnnounceResult result = new NemAnnounceResult(deserializer);
transactionResult.setCode(result.getCode());
transactionResult.setType(result.getType());
transactionResult.setMessage(result.getMessage());
if (result.getCode() == 1) {
log.info(String.format("successfully changed supply for mosaic %s, %d units added ",
mosaicId,
supplyDelta));
transactionResult.setSuccess(true);
transactionResult.setTransactionHash("" + result.getTransactionHash());
transactionResult.setInnerTransactionHash("" + result.getInnerTransactionHash());
} else {
log.warn(String.format("could not change supply for mosaic %s, reason: %s",
mosaicId,
result.getMessage()));
}
return transactionResult;
}));
}
public TransactionResult createMosaic(Account sender, MosaicParameter parameter) {
Supplier<MosaicDefinitionCreationTransaction> transactionSupplier = () -> {
final NamespaceId namespaceId = new NamespaceId(parameter.getNamespace());
final MosaicId mosaicId = new MosaicId(namespaceId, parameter.getMosaic());
final MosaicDescriptor descriptor =
new MosaicDescriptor(CommonUtil.urlEncode(parameter.getDescription()));
// This shows how properties of the mosaic can be chosen.
final Properties properties = new Properties();
properties.put("initialSupply", Long.toString(parameter.getInitialSupply()));
properties.put("divisibility", Long.toString(parameter.getDivisibility()));
properties.put("supplyMutable", Boolean.toString(parameter.isSupplyMutable()));
properties.put("transferable", Boolean.toString(parameter.isTransferable()));
final MosaicProperties mosaicProperties = new DefaultMosaicProperties(properties);
// no levy
final MosaicDefinition mosaicDefinition =
new MosaicDefinition(sender, mosaicId, descriptor, mosaicProperties, null);
// prepare transaction data and sign it
final TimeInstant timeInstant = mijinUtil.getTimeProvider().getCurrentTime();
final MosaicDefinitionCreationTransaction transaction = new MosaicDefinitionCreationTransaction(
timeInstant,
sender,
mosaicDefinition);
// transaction.setFee(Amount.fromNem(108));
transaction.setDeadline(timeInstant.addHours(23));
transaction.sign();
return transaction;
};
return mijinUtil.postTransactionAnnounce(sender, transactionSupplier, (deserializer -> {
TransactionResult transactionResult = new TransactionResult();
final NemAnnounceResult result = new NemAnnounceResult(deserializer);
transactionResult.setCode(result.getCode());
transactionResult.setType(result.getType());
transactionResult.setMessage(result.getMessage());
if (result.getCode() == 1) {
log.info(String.format("successfully created mosaic definition %s*%s for owner %s",
parameter.getNamespace(),
parameter.getMosaic(),
sender.getAddress()));
transactionResult.setSuccess(true);
transactionResult.setTransactionHash("" + result.getTransactionHash());
transactionResult.setInnerTransactionHash("" + result.getInnerTransactionHash());
} else {
log.warn(String.format("could not create mosaic definition %s*%s for owner %s, reason: %s",
parameter.getNamespace(),
parameter.getMosaic(),
sender.getAddress(),
result.getMessage()));
}
return transactionResult;
}));
}
}
|
echo "Please Enter the command as follows:"
echo "./tweets-k-means <numberOfClusters> <initialSeedsFile> <TweetsDataFile> <outputFile>"
read arg0 arg1 arg2 arg3 arg4
arg2=$PWD"/"$arg2
arg3=$PWD"/"$arg3
arg4=$PWD"/"$arg4
echo "starting java program!"
cd ./bin/ && java part2.Launcher "$arg1" "$arg2" "$arg3" "$arg4"
|
/////////////////////////////////////////////////////////////////////////////
// Name: src/qt/spinbutt.cpp
// Author: <NAME>, <NAME>
// Copyright: (c) 2010 wxWidgets dev team
// Licence: wxWindows licence
/////////////////////////////////////////////////////////////////////////////
// For compilers that support precompilation, includes "wx.h".
#include "wx/wxprec.h"
#include "wx/spinbutt.h"
#include "wx/qt/private/winevent.h"
#include <QtWidgets/QSpinBox>
class wxQtSpinButton : public wxQtEventSignalHandler< QSpinBox, wxSpinButton >
{
public:
wxQtSpinButton( wxWindow *parent, wxSpinButton *handler );
private:
void valueChanged(int value);
virtual void stepBy(int steps) wxOVERRIDE; // see QAbstractSpinBox::stepBy()
};
wxQtSpinButton::wxQtSpinButton( wxWindow *parent, wxSpinButton *handler )
: wxQtEventSignalHandler< QSpinBox, wxSpinButton >( parent, handler )
{
connect(this, static_cast<void (QSpinBox::*)(int index)>(&QSpinBox::valueChanged),
this, &wxQtSpinButton::valueChanged);
}
void wxQtSpinButton::valueChanged(int value)
{
wxSpinButton *handler = GetHandler();
if ( handler )
{
wxSpinEvent event( wxEVT_SPIN, handler->GetId() );
event.SetInt( value );
EmitEvent( event );
}
}
void wxQtSpinButton::stepBy(int steps)
{
wxSpinButton* const handler = GetHandler();
if ( !handler )
return;
int eventType = steps < 0 ? wxEVT_SPIN_DOWN : wxEVT_SPIN_UP;
wxSpinEvent directionEvent(eventType, handler->GetId());
directionEvent.SetPosition(value());
directionEvent.SetInt(value() + steps * singleStep());
directionEvent.SetEventObject(handler);
if ( !handler->HandleWindowEvent(directionEvent) || directionEvent.IsAllowed() )
{
QSpinBox::stepBy(steps);
}
}
wxSpinButton::wxSpinButton() :
m_qtSpinBox(NULL)
{
}
wxSpinButton::wxSpinButton(wxWindow *parent,
wxWindowID id,
const wxPoint& pos,
const wxSize& size,
long style,
const wxString& name)
{
Create( parent, id, pos, size, style, name );
}
bool wxSpinButton::Create(wxWindow *parent,
wxWindowID id,
const wxPoint& pos,
const wxSize& size,
long style,
const wxString& name)
{
m_qtSpinBox = new wxQtSpinButton( parent, this );
m_qtSpinBox->setRange(wxSpinButtonBase::GetMin(), wxSpinButtonBase::GetMax());
// Modify the size so that the text field is not visible.
// TODO: Find out the width of the buttons i.e. take the style into account (QStyleOptionSpinBox).
wxSize newSize( size );
newSize.SetWidth( 18 );
return QtCreateControl( parent, id, pos, newSize, style, wxDefaultValidator, name );
}
void wxSpinButton::SetRange(int min, int max)
{
wxSpinButtonBase::SetRange(min, max); // cache the values
if ( m_qtSpinBox )
{
m_qtSpinBox->setRange(min, max);
}
}
int wxSpinButton::GetValue() const
{
return m_qtSpinBox->value();
}
void wxSpinButton::SetValue(int val)
{
m_qtSpinBox->setValue( val );
}
QWidget *wxSpinButton::GetHandle() const
{
return m_qtSpinBox;
}
|
var PORT = 8080;
var fs = require('fs')
var path = require('path')
var express = require('express');
var app = express();
var morgan = require('morgan')
// create a write stream (in append mode)
var accessLogStream = fs.createWriteStream('/miktex/work/log/access.log', { flags: 'a' });
app.use(morgan(':date[iso]: :status :method :url size: :res[content-length] \ttime: :response-time ms', { stream: accessLogStream }));
app.use(express.static('/miktex/work/dist/'));
app.use(express.static('/miktex/work/log/'));
// app.use(express.static('/miktex/.miktex/texmfs/data/miktex/log/pdflatex.log'));
app.listen(PORT, function () {
console.log('SlideCrafting HTTP-server app listening on port ' + PORT + '!');
});
|
<filename>frontend/app/src/static/app/admin/AdminController.js
/* Copyright 2013-2016 Extended Mind Technologies Oy
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
function AdminController($scope, AdminService, AuthenticationService, AnalyticsService, BackendClientService,
DateService) {
AnalyticsService.visit('admin');
AdminService.getStatistics().then(function(response) {
$scope.userCount = response.users;
$scope.itemCount = response.items;
});
$scope.gotoUsers = function gotoUsers() {
AdminService.getOwners().then(function(response) {
$scope.users = response.users;
$scope.adminMode = 'users';
});
};
$scope.getDateString = function getDateString(date) {
return DateService.getYYYYMMDD(new Date(date));
};
$scope.gotoStatistics = function gotoStatistics() {
$scope.adminMode = undefined;
};
$scope.destroyUser = function destroyUser(user) {
if (user.destroy === 'destroy ' + user.email){
AdminService.destroyUser(user).then(function() {
removeUser(user);
});
}
};
function resetOffineOnlineText(){
$scope.offlineOnlineText =
BackendClientService.getForceOffline() ? 'go online' : 'go offline';
};
resetOffineOnlineText();
$scope.toggleOffline = function(){
if (BackendClientService.getForceOffline()){
BackendClientService.setForceOffline(false);
}else{
BackendClientService.setForceOffline(true);
}
resetOffineOnlineText();
};
function removeUser(user) {
var index = $scope.users.indexOf(user);
if (index > -1) {
$scope.users.splice(index, 1);
$scope.userCount -= 1;
return true;
}
}
}
AdminController['$inject'] = [
'$scope', 'AdminService', 'AuthenticationService', 'AnalyticsService', 'BackendClientService',
'DateService'];
angular.module('em.admin').controller('AdminController', AdminController);
|
import JSONModel from "sap/ui/model/json/JSONModel";
import Device from "sap/ui/Device";
export default {
createDeviceModel(): JSONModel {
//TODO|ui5ts: generate constructors
var oModel = new JSONModel(Device);
oModel.setDefaultBindingMode(sap.ui.model.BindingMode.OneWay);
return oModel;
}
}; |
#!/bin/sh
#Compute gcd of two numbers
if [ $1 -le $2 ]; then
a=$2
b=$1
else
a=$1
b=$2
fi
if [ ${b} -ne 0 ]; then
while [ ${b} -ne 0 ]; do
t=${b}
b=$(expr ${a} % ${b})
a=${t}
done
fi
echo "gcd of $1 and $2 is ${a}"
|
#!/usr/bin/env bash
set -e
PROJECT_PWD=${PWD}
DEP=$1
current_dir_name=${PROJECT_PWD##*/}
if [ "${current_dir_name}" != "mediasoup-client" ] ; then
echo ">>> [ERROR] $(basename $0) must be called from mediasoup-client/ root directory" >&2
exit 1
fi
function get_dep()
{
GIT_REPO="$1"
GIT_TAG="$2"
DEST="$3"
echo ">>> [INFO] getting dep '${DEP}' ..."
if [ -d "${DEST}" ] ; then
echo ">>> [INFO] deleting ${DEST} ..."
git rm -rf --ignore-unmatch ${DEST} > /dev/null
rm -rf ${DEST}
fi
echo ">>> [INFO] cloning ${GIT_REPO} ..."
git clone ${GIT_REPO} ${DEST}
cd ${DEST}
echo ">>> [INFO] setting '${GIT_TAG}' git tag ..."
git checkout --quiet ${GIT_TAG}
set -e
echo ">>> [INFO] adding dep source code to the repository ..."
rm -rf .git
git add .
echo ">>> [INFO] got dep '${DEP}'"
cd ${PROJECT_PWD}
}
function get_libmediasoupclient()
{
GIT_REPO="https://github.com/versatica/libmediasoupclient.git"
GIT_TAG="3.0.4"
DEST="deps/libmediasoupclient"
get_dep "${GIT_REPO}" "${GIT_TAG}" "${DEST}"
}
case "${DEP}" in
'-h')
echo "Usage:"
echo " ./scripts/$(basename $0) [libmediasoupclient]"
echo
;;
libmediasoupclient)
get_libmediasoupclient
;;
*)
echo ">>> [ERROR] unknown dep '${DEP}'" >&2
exit 1
esac
if [ $? -eq 0 ] ; then
echo ">>> [INFO] done"
else
echo ">>> [ERROR] failed" >&2
exit 1
fi
|
/*eslint no-unused-vars: ["warn", { "argsIgnorePattern": "opt|filter|params" }]*/
import {epoch_now} from 'epoetin'
class ModelPG {
constructor(db, tablename, definition, options) {
this.db = db
this.tablename = tablename
this.definition = definition
this.config = {}
this.config.customHooks= options?.customHooks || {}
this.config.useDates= options?.useDates === true
this.config.checkBeforeDelete= options?.checkBeforeDelete
}
get fields() {
return Object.keys(this.definition)
}
_objToTuple(obj) {
let fields = [], values = []
for (const fld in obj) {
if (this.fields.indexOf(fld) >= 0) {
fields.push(fld)
values.push(obj[fld])
}
}
return [fields, values]
}
ensureDefs(data) {
data.map((record) => {
this.fields.map((fld) => {
if (record[fld]===null) {
const fdef= this.definition[fld]
if (Object.prototype.hasOwnProperty.call(fdef, 'default')) {
record[fld]= fdef.default
}
}
})
})
}
prepareQuery(filter, options) {
const sselect = options?.fields != undefined
? options.fields.join(',')
: '*'
const [wfields, wvalues] = this._objToTuple(filter)
let swhere= ''
if (wfields.length > 0)
swhere= ' WHERE ' + wfields.map((f, i) => {
if (typeof wvalues[i] == 'object' && wvalues[i].constructor.name=='Array') {
return f + ' IN ($' + (i + 1) + ':csv)'
}
else if (wvalues[i] === null || wvalues[i] === undefined) {
return f + ' IS NULL'
}
else {
return f + ' = $' + (i + 1)
}
}).join(' AND ')
let query = `SELECT ${sselect} FROM ${this.tablename} ${swhere}`
if (options?.sortby) {
let name= '', dir= 1
if (typeof options.sortby == 'object') {
name= options.sortby[0]
dir=options.sortby[1]
} else {
name= options.sortby
}
query+= ` SORT BY ${name} ${!dir ? 'DESC' : 'ASC'}`
}
if (! isNaN(options?.limit)) {
query += ` LIMIT ${options.limit} `
}
if (! isNaN(options?.offset)) {
query += ` OFFSET ${options.offset}`
}
return [query, wvalues]
}
async beforeRead(filter, options) {
return Promise.resolve([
filter, options, true
])
}
async afterRead(data, filter, options) {
return Promise.resolve(
data
)
}
async read(pfilter, poptions) {
const [filter, options, goon] = await this.beforeRead(pfilter, poptions)
if (! goon)
return []
const [query, values] = this.prepareQuery(filter, options)
let data= await this.db.select(query, values, options)
this.ensureDefs(data)
data= await this.afterRead(data, filter, options)
return data
}
async keyList(filt, options) {
let data = await this.read(filt, {fields: ['id', 'name'], transaction: options?.transaction})
this.ensureDefs(data)
let res= {}
data.map((d) => {res[d.id]= d.name})
return res
}
async distinct(field, filt, options) {
const data = await this.read(filt, {fields: [`DISTINCT ${field}`], transaction: options?.transaction})
const res= data.map((d) => d[field])
return res
}
async count(filt, options) {
let field
if (options?.distinct!=undefined) {
field= `COUNT(DISTINCT ${options.distinct}) AS cnt`
} else {
field= 'COUNT(1) AS cnt'
}
const data = await this.read(filt, {fields: [field], transaction: options?.transaction})
try {
return data[0].cnt
} catch(error) {
this.db.log.error(`${this.tablename} ERROR:`)
this.db.log.error(error.constructor.name)
this.db.log.error(error.stack)
}
return 0
}
async find(id, options) {
if (isNaN(id) || id <= 0) {
const msg = this.tablename + ': cannot find, invalid Id <' + id + '>'
this.db.log.error(msg)
throw new Error(msg)
}
const data= await this.read({id: id}, options)
let odata= {}
if (Array.isArray(data)) {
this.ensureDefs(data)
odata= data[0]
} else {
this.db.log.warn(`${this.tablename}: Id ${id} does not exist`)
}
return odata
}
prepareObj(obj) {
let out = {}
Object.keys(obj)
.filter((k) => this.fields.indexOf(k) >=0)
.map((k) => {
out[k] = obj[k]
})
return out
}
async beforeInsert(params, options) {
if (this.config.useDates) {
const now= epoch_now()
params.created_at= now
}
let allow= true
if (this.config.customHooks.beforeInsert != undefined) {
[params, options, allow]= await this.config.customHooks.beforeInsert(params, options)
}
return Promise.resolve([
params, options, allow
])
}
async afterInsert(id, params, options) {
if (this.config.customHooks.afterInsert != undefined) {
id= await this.config.customHooks.afterInsert(id, params, options)
}
return Promise.resolve(
id
)
}
async insert(data, poptions) {
data= this.prepareObj(data)
let [params, options, goon] = await this.beforeInsert(data, poptions)
if (! goon)
return []
const ituple = this._objToTuple(params)
const ifields = ituple[0]
const ivalues = ituple[1]
const sfields = ifields.join(',')
const sinsert = ifields.map((f, i) => '$' + (i + 1)).join(',')
const query = `INSERT INTO ${this.tablename} (${sfields}) VALUES (${sinsert}) RETURNING id`
const ndata = await this.db.select_one(query, ivalues, options)
const id= await this.afterInsert(ndata.id, params, options)
if (id == null) {
const msg = this.tablename + ': cannot save ' + JSON.stringify(data)
this.db.log.error(msg)
} else {
if (options?.log!==false) {
this.db.log.debug(`Created with Id ${id}`)
}
}
return id
}
async beforeUpdate(params, filter, options) {
if (this.config.useDates) {
const now= epoch_now()
params.last_update_at= now
}
let allow= true
if (this.config.customHooks.beforeUpdate != undefined) {
[params, filter, options, allow]= await this.config.customHooks.beforeUpdate(params, filter, options)
}
return Promise.resolve([
params, filter, options, allow
])
}
async afterUpdate(rows, params, filter, options) {
if (this.config.customHooks.afterUpdate != undefined) {
rows= await this.config.ustomHooks.afterUpdate(rows, params, filter, options)
}
return Promise.resolve(
rows
)
}
async update(data, filt, poptions) {
data= this.prepareObj(data)
delete data.id
let [params, filter, options, goon] = await this.beforeUpdate(data, filt, poptions)
if (! goon)
return []
const utuple = this._objToTuple(params)
const ufields = utuple[0]
const uvalues = utuple[1]
if (ufields.length == 0) {
this.db.log.error(`${this.tablename} ERROR: Nothing to update`)
return 0
}
const sfields = 'SET ' + ufields.map((f, i) => f + ' = $' + (i + 1)).join(',')
const wtuple = this._objToTuple(filter)
const wfields = wtuple[0]
const wvalues = wtuple[1]
let swhere = ''
if (wfields.length > 0)
swhere = ' WHERE ' + wfields.map((f, i) => f + ' = $' + (i + 1 + ufields.length)).join(' AND ')
const allvalues= uvalues.concat(wvalues)
const query = `WITH rows as (UPDATE ${this.tablename} ${sfields} ${swhere} RETURNING 1) SELECT count(*) as cnt FROM rows`
const ndata= await this.db.select_one(query, allvalues, options)
const count= await this.afterUpdate(ndata.cnt, params, filter, options)
if (count == 0) {
const msg = this.tablename + ': no record updated with filter ' + JSON.stringify(filt) + ' -- ' + JSON.stringify(data)
this.db.log.warn(msg)
} else {
if (options?.log!==false) {
this.db.log.debug(`Updated ${count} records`)
}
}
return count
}
async beforeDelete(filter, options) {
let allow= true
if (this.config.checkBeforeDelete!=undefined) {
try {
if (filter.id != undefined) {
let found= 0
for (const check of this.config.checkBeforeDelete) {
const [checkTable, checkField]= check.split('.')
const qry= `SELECT COUNT(1) as cnt FROM ${checkTable} WHERE ${checkField} = $1`
const filt= [filter.id]
const res= await this.db.select_one(qry, filt, options)
found += res.cnt
}
allow= found==0
}
} catch(e) {}
}
if (this.config.customHooks.beforeDelete != undefined) {
[filter, options, allow] = await this.config.customHooks.beforeDelete(filter, options)
}
return Promise.resolve([
filter, options, allow
])
}
async afterDelete(rows, filter, options) {
if (this.config.customHooks.afterDelete != undefined) {
rows = await this.config.customHooks.afterDelete(rows, filter, options)
}
return Promise.resolve(
rows
)
}
async delete(filt, poptions) {
let [filter, options, goon] = await this.beforeDelete(filt, poptions)
if (! goon) {
const msg = this.tablename + ': Cannot delete for filter ' + JSON.stringify(filt)
this.db.log.warn(msg)
return 0
}
const wtuple = this._objToTuple(filter)
const wfields = wtuple[0]
const wvalues = wtuple[1]
let swhere = ''
if (wfields.length > 0)
swhere = ' WHERE ' + wfields.map((f, i) => f + ' = $' + (i + 1)).join(' AND ')
const query = `WITH rows as (DELETE FROM ${this.tablename} ${swhere} RETURNING 1) SELECT count(*) AS cnt FROM rows`
const ndata= await this.db.select_one(query, wvalues, options)
const count= await this.afterDelete(ndata.cnt, filter, options)
if (options?.log!==false) {
this.db.log.debug(`Deleted ${count} records`)
}
return count
}
}
export default ModelPG |
var App = function () {
this.server = new Server(this);
this.controller = new Controller(this);
};
|
#!/usr/bin/env bash
# This script runs a RISC-V assembly test in RTL simulation at the three
# supported abstraction levels and captures the necessary portions of the log
# to calculate simulation rates
#
# Abstraction levels:
# Target -> Just the target RTL
# MIDAS -> The target post-transformations, fpga-hosted models & widgets
# FPGA -> The whole RTL design pre-synthesis
#
# This requires a VCS license.
# Berkeley users: If running on millenium machines, source scripts/setup_vcsmx_env.sh
# The ISA test to run
TEST=rv64ui-v-add
#TEST=rv64ui-p-simple
# The file into which we dump all the relevant pieces of simulation log. Some
# post-processing is still required.
REPORT_FILE=$(pwd)/runtime.rpt
MAKE_THREADS=4
cd $(dirname $0)/..
firesim_root=$(pwd)
test_path=$RISCV/riscv64-unknown-elf/share/riscv-tests/isa/$TEST
echo -e "FireSim RTL Simulation Execution Rates\n" > $REPORT_FILE
################################################################################
# TARGET level
################################################################################
export DESIGN=FireSimNoNIC
export TARGET_CONFIG=FireSimRocketChipConfig
export PLATFORM_CONFIG=BaseF1Config
export SIM_ARGS=+verbose
export TIME="%C %E real, %U user, %S sys"
for optlevel in 0 1 2
do
echo -e "\nVerilator TARGET-level Simulation, -O${optlevel}\n" >> $REPORT_FILE
## Verilator
cd $firesim_root/target-design/chipyard/verisim
sim=simulator-example-DefaultExampleConfig
# Hack...
sed -i "s/-O[0-3]/-O${optlevel}/" Makefile
make clean
/usr/bin/time -a -o $REPORT_FILE make
/usr/bin/time -a -o $REPORT_FILE make debug
echo -e "\nNo Waves\n" >> $REPORT_FILE
/usr/bin/time -a -o $REPORT_FILE ./$sim $SIM_ARGS $test_path &> nowaves.log
tail nowaves.log >> $REPORT_FILE
/usr/bin/time -a -o $REPORT_FILE ./$sim-debug $SIM_ARGS -vtest.vcd $test_path &> waves.log
echo -e "\nWaves Enabled\n" >> $REPORT_FILE
tail waves.log >> $REPORT_FILE
done
echo -e "\nTarget-level VCS\n" >> $REPORT_FILE
cd $firesim_root/target-design/chipyard/vsim/
sim=simv-example-DefaultExampleConfig
/usr/bin/time -a -o $REPORT_FILE make -j$MAKE_THREADS
/usr/bin/time -a -o $REPORT_FILE make -j$MAKE_THREADS debug
echo -e "\nNo Waves\n" >> $REPORT_FILE
/usr/bin/time -a -o $REPORT_FILE ./$sim $SIM_ARGS $test_path &> nowaves.log
tail nowaves.log >> $REPORT_FILE
echo -e "\nWaves Enabled\n" >> $REPORT_FILE
/usr/bin/time -a -o $REPORT_FILE ./$sim-debug $SIM_ARGS $test_path &> waves.log
tail waves.log >> $REPORT_FILE
################################################################################
## MIDAS level
################################################################################
ml_output_dir=$firesim_root/sim/output/f1/$DESIGN-$TARGET_CONFIG-$PLATFORM_CONFIG
test_symlink=$ml_output_dir/$TEST
for optlevel in 0 1 2
do
echo -e "\nMIDAS-level Simulation, -O${optlevel}\n" >> $REPORT_FILE
cd $firesim_root/sim
make clean
make -j$MAKE_THREADS
/usr/bin/time -a -o $REPORT_FILE make -j$MAKE_THREADS VERILATOR_CXXOPTS=-O${optlevel} verilator
/usr/bin/time -a -o $REPORT_FILE make -j$MAKE_THREADS VERILATOR_CXXOPTS=-O${optlevel} verilator-debug
/usr/bin/time -a -o $REPORT_FILE make -j$MAKE_THREADS VCS_CXXOPTS=-O${optlevel} vcs
/usr/bin/time -a -o $REPORT_FILE make -j$MAKE_THREADS VCS_CXXOPTS=-O${optlevel} vcs-debug
mkdir -p $ml_output_dir
# Symlink it twice so we have unique targets for vcs and verilator
ln -sf $test_path $ml_output_dir/$TEST
ln -sf $test_path $ml_output_dir/$TEST-vcs
echo -e "\nWaves Off, -O${optlevel}\n" >> $REPORT_FILE
make EMUL=vcs ${test_symlink}-vcs.out
make ${test_symlink}.out
grep -Eo "simulation speed = .*" $ml_output_dir/*out >> $REPORT_FILE
echo -e "\nWaves On, -O${optlevel}\n" >> $REPORT_FILE
make EMUL=vcs ${test_symlink}-vcs.vpd
make ${test_symlink}.vpd
grep -Eo "simulation speed = .*" $ml_output_dir/*out >> $REPORT_FILE
done
################################################################################
# FPGA level
################################################################################
# Unlike the other levels, the driver and dut communicate through pipes
cd $firesim_root/sim
echo -e "\nFPGA-level XSIM - Waves On\n" >> $REPORT_FILE
make xsim
make xsim-dut | tee dut.out &
# Wait for the dut to come up; Compilation time is long.
while [[ $(grep driver_to_xsim dut.out) == '' ]]; do sleep 1; done
make run-xsim SIM_BINARY=$test_path &> driver.out
# These are too slow for the reported simulation rate to be non-zero; so tail
tail driver.out >> $REPORT_FILE
echo -e "\nFPGA-level VCS - Waves On\n" >> $REPORT_FILE
make xsim
make xsim-dut VCS=1 | tee vcs-dut.out &
# Wait for the dut to come up; Compilation time is long.
while [[ $(grep driver_to_xsim vcs-dut.out) == '' ]]; do sleep 1; done
make run-xsim SIM_BINARY=$test_path &> vcs-driver.out
# These are too slow for the reported simulation rate to be non-zero; so tail
tail vcs-driver.out >> $REPORT_FILE
|
def longer_than(usernames, max_length):
result = []
for username in usernames:
if len(username) > max_length:
result.append(username)
return result
print(longer_than(usernames, max_length)) # ["bob123", "zackery12"] |
#!/bin/sh
#
# Daniele Brugnara
#
# usage:
# meteor mongo xyz.meteor.com --url | ./do.sh
#
read mongo_auth
db_name=`echo $mongo_auth | awk '{split($0,array,"/")} END{print array[4]}'`
ar=`echo $mongo_auth | tr '//' '\n' | grep client | tr ':' '\n' | head -n 2 | tr '@' '\n' | tr '\n' ':'`
username=`echo $ar | awk '{split($0,array,":")} END{print array[1]}'`
password=`echo $ar | awk '{split($0,array,":")} END{print array[2]}'`
host=`echo $ar | awk '{split($0,array,":")} END{print array[3]}'`
# echo $host
# echo $username
# echo $password
# echo $db_name
mongodump -h $host --port 27017 --username $username --password $password --excludeCollectionsWithPrefix=system -d $db_name
|
<filename>vendor/bitbucket.org/atlassian/go-asap/keyprovider/keyprovider.go
package keyprovider
import "crypto"
// PublicKeyProvider provides public keys given a keyID.
type PublicKeyProvider interface {
GetPublicKey(keyID string) (crypto.PublicKey, error)
}
// PrivateKeyProvider provides a fixed private key.
type PrivateKeyProvider interface {
GetPrivateKey() (crypto.PrivateKey, error)
}
|
<gh_stars>0
export {ActivityEntryIndexComponent} from './index/activity_entry_index.component';
export * from './training';
export {TrainingResultComponent} from './training_result';
export * from './entries.component';
export * from './entry.component';
|
def foo(x, y):
if x >= y:
return x
else:
return y
assert foo(2, 4) == 4 |
<filename>test/api/polling.spec.js
import { expect } from 'chai';
import sinon from 'sinon';
import Polling from '~/api/polling';
describe('Polling', function () {
const sandbox = sinon.sandbox.create();
let apiRequestFn;
let polling;
beforeEach(function () {
apiRequestFn = sandbox.spy();
polling = Polling({
apiRequestFn: apiRequestFn,
});
});
afterEach(function () {
sandbox.restore();
});
it('defines a simple function constructor', function () {
expect(Polling).to.exist;
expect(polling).to.exist;
expect(typeof polling).to.equal('object');
});
it('defines an interface', function () {
expect(polling.start).to.exist;
expect(polling.stop).to.exist;
});
it('will execute a provided function', function () {
this.timeout(50);
const options = {
apiRequestFn: async function (done) {
polling.stop();
done();
},
timeout: 10,
};
polling = Polling(options);
polling.start();
});
});
|
#!/bin/bash
# Adapted from https://github.com/wnameless/docker-oracle-xe-11g
# Thank you Wei-Ming Wu <wnameless@gmail.com>!
LISTENERS_ORA=/u01/app/oracle/product/11.2.0/xe/network/admin/listener.ora
cp "${LISTENERS_ORA}.tmpl" "$LISTENERS_ORA" &&
sed -i "s/%hostname%/$HOSTNAME/g" "${LISTENERS_ORA}" &&
sed -i "s/%port%/1521/g" "${LISTENERS_ORA}" &&
service oracle-xe start
export ORACLE_HOME=/u01/app/oracle/product/11.2.0/xe
$ORACLE_HOME/bin/sqlplus system/oracle@localhost < /opt/app/init.sql
cd /opt/app
mkdir ~/app
mv node_modules ~/app
ln -s ~/app/node_modules .
npm rebuild
npm install oracledb
|
<gh_stars>1-10
import {Readable} from 'stream';
import toArray from 'stream-to-array';
import unzipper, {OnFileCallback} from './unzipper';
// Zip file that contains a single file called 'sample.txt' with the text 'This is a sample file.\n'.
const zipFileBuffer = Buffer.from([
80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 92, 113, 46, 80, 44, 4, 208, 177, 23, 0, 0, 0, 23, 0, 0, 0, 10, 0, 28, 0, 115, 97, 109, 112, 108, 101, 46, 116,
120, 116, 85, 84, 9, 0, 3, 80, 32, 30, 94, 84, 32, 30, 94, 117, 120, 11, 0, 1, 4, 232, 3, 0, 0, 4, 232, 3, 0, 0, 84, 104, 105, 115, 32, 105, 115,
32, 97, 32, 115, 97, 109, 112, 108, 101, 32, 102, 105, 108, 101, 46, 10, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 92, 113, 46, 80, 44, 4, 208, 177,
23, 0, 0, 0, 23, 0, 0, 0, 10, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129, 0, 0, 0, 0, 115, 97, 109, 112, 108, 101, 46, 116, 120, 116, 85, 84, 5, 0,
3, 80, 32, 30, 94, 117, 120, 11, 0, 1, 4, 232, 3, 0, 0, 4, 232, 3, 0, 0, 80, 75, 5, 6, 0, 0, 0, 0, 1, 0, 1, 0, 80, 0, 0, 0, 91, 0, 0, 0, 0, 0,
]);
describe('unzipper', () => {
it('should process files in a zip file', async () => {
const readStream = new Readable();
// eslint-disable-next-line no-underscore-dangle
readStream._read = () => {};
readStream.push(zipFileBuffer);
readStream.push(null);
const callback: OnFileCallback = ({mimetype, filename, stream}): Promise<void> =>
new Promise(resolve => {
expect(mimetype).toBe(undefined);
expect(filename).toBe('sample.txt');
toArray(stream, (err, arr) => {
const fileContents = Buffer.from(arr[0]).toString('utf8');
expect(fileContents).toBe('This is a sample file.\n');
resolve();
});
});
await unzipper(readStream, callback);
});
it('should fail processing a file that is not a zip file', async () => {
const readStream = new Readable();
// eslint-disable-next-line no-underscore-dangle
readStream._read = () => {};
readStream.push(Buffer.from([75, 80, 2])); // Something that is NOT a zip file
readStream.push(null);
const callback: OnFileCallback = ({mimetype, filename, stream}): Promise<void> =>
new Promise(resolve => {
expect(mimetype).toBe(undefined);
expect(filename).toBe('sample.txt');
toArray(stream, (err, arr) => {
const fileContents = Buffer.from(arr[0]).toString('utf8');
expect(fileContents).toBe('This is a sample file.\n');
resolve();
});
});
expect.assertions(1);
try {
await unzipper(readStream, callback);
} catch (e) {
expect(e).toEqual(new Error('end of central directory record signature not found'));
}
});
});
|
/*
* Copyright 2002-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.spring.nohttp;
import java.util.regex.Pattern;
/**
* Utility for creating {@link HttpReplacer} and {@link HttpMatcher} that finds Gradle's
* DLSs that use http (i.e. mavenCentral() and jcenter()).
*
* Since the API is not aware of external files, neither the {@link HttpReplacer} nor the
* {@link HttpMatcher} are aware of what Gradle version is being used, so users should
* first detect that (i.e. using {@link io.spring.nohttp.file.PreGradle21Scanner})
*
* @author <NAME>
*/
public abstract class GradleHttpDsl {
/**
* Creates an {@link HttpReplacer} that finds Gradle's
* DLSs that use http (i.e. mavenCentral() and jcenter()).
* @return matcher to use
*/
public static HttpReplacer createReplacer() {
return createGradleDslMatcher();
}
/**
* Creates an {@link HttpReplacer} that replaces Gradle's
* DLSs that use http (i.e. mavenCentral() and jcenter()).
* @return the replacer to use
*/
public static HttpMatcher createMatcher() {
return createGradleDslMatcher();
}
private static RegexHttpMatcher createGradleDslMatcher() {
RegexHttpMatcher matcher = new RegexHttpMatcher(h -> false);
matcher.setPattern(Pattern.compile("(mavenCentral\\(\\)|jcenter\\(\\))"));
matcher.setHttpReplacer(http -> {
if (http.equals("mavenCentral()")) {
return "maven { url 'https://repo.maven.apache.org/maven2/' }";
} else if (http.equals("jcenter()")) {
return "maven { url 'https://jcenter.bintray.com/' }";
} else {
throw new IllegalArgumentException("Expected either mavenCentral() or jcenter() but got '" + http + "'");
}
});
return matcher;
}
private GradleHttpDsl() {}
}
|
<gh_stars>1-10
/*
* %CopyrightBegin%
*
* Copyright Ericsson AB 2004-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
*
* %CopyrightEnd%
*/
import com.ericsson.otp.erlang.*;
class Nodename {
/*
Implements test case jinterface_SUITE:nodename/1
*/
public static void main(String argv[]) {
String host = argv[0];
try {
OtpNode node = new OtpNode("javanode");
System.out.println("Given host: " + host +
" Host: " + node.host() +
" Alive: " + node.alive() +
" Node: " + node.node());
if (!node.host().equals(host)) fail(1);
if (!node.alive().equals("javanode")) fail(2);
if (!node.node().equals("javanode@" + host)) fail(3);
}
catch (Exception e) {
System.out.println("" + e);
fail(4);
}
}
private static void fail(int reason) {
System.exit(reason);
}
}
|
<gh_stars>1-10
import * as d3 from 'd3'
import { capitalizedName } from '../Utilities/CommonUsedFunctions'
const segColor = ['#eb4d55', '#00BCD4']
export default class BudgetChartD3 {
constructor (element, data) {
const budgetData = {
labels: [
'Employee', 'Advertising', 'Gifts',
'Hospitality', 'Office', 'Printing', 'Travel'
],
series: [data[0], data[1]]
}
const chartWidth = 800
const barHeight = 30
const groupHeight = barHeight * budgetData.series.length
const gapBetweenGroups = 15
const spaceForLabels = 150
const spaceForLegend = 200
// Zip the series data together (first values, second values, etc.)
const zippedData = []
for (let i = 0; i < budgetData.labels.length; i++) {
for (let j = 0; j < budgetData.series.length; j++) {
zippedData.push(budgetData.series[j].values[i])
}
}
const chartHeight = barHeight * zippedData.length + gapBetweenGroups * budgetData.labels.length
const x = d3.scaleLinear()
.domain([0, d3.max(zippedData)])
.range([90, chartWidth])
const y = d3.scaleLinear()
.range([chartHeight + gapBetweenGroups, 0])
const yAxis = d3.axisLeft(y).tickFormat('')
.tickSize(0)
// Specify the chart area and dimensions
const chart = d3.select(element).append('svg')
.attr('width', spaceForLabels + chartWidth + spaceForLegend)
.attr('height', chartHeight)
// Create bars
const bar = chart.selectAll('g')
.data(zippedData)
.enter().append('g')
.attr('transform', function (d, i) {
return 'translate(' + spaceForLabels + ',' + (i * barHeight + gapBetweenGroups * (0.5 + Math.floor(i / budgetData.series.length))) + ')'
})
let prevIndex = 0
// Create rectangles of the correct width
bar.append('rect')
.attr('fill', function (d, i) {
return segColor[i % budgetData.series.length]
})
.attr('class', 'bar')
.attr('width', x)
.attr('height', barHeight - 1)
.on('mouseover', function (actual, i) {
d3.select(this).attr('fill', 'red')
prevIndex = i
})
.on('mouseleave', function (i, d) {
d3.select(this).attr('fill', (d, i) => {
return segColor[prevIndex % budgetData.series.length]
})
})
// Add text label in bar
bar.append('text')
.attr('x', function (d) {
return x(d) - 66
})
.attr('y', barHeight / 2)
.style('fill', 'white')
.attr('dy', '.35em')
.text(function (d) {
return d
})
// Draw labels
bar.append('text')
.attr('class', 'label')
.attr('x', function (d) {
return -110
})
.attr('y', groupHeight / 2)
.attr('dy', '.35em')
.text(function (d, i) {
if (i % budgetData.series.length === 0) { return budgetData.labels[Math.floor(i / budgetData.series.length)] } else { return '' }
})
chart.append('g')
.attr('class', 'y axis')
.attr('transform', 'translate(' + spaceForLabels + ', ' + -gapBetweenGroups / 2 + ')')
.call(yAxis)
// Draw legend
const legendRectSize = 18
const legendSpacing = 4
const legend = chart.selectAll('.legend')
.data(budgetData.series)
.enter()
.append('g')
.attr('transform', function (d, i) {
const height = legendRectSize + legendSpacing
const offset = -gapBetweenGroups / 2
const horz = spaceForLabels + chartWidth + 40 - legendRectSize
const vert = i * height - offset
return 'translate(' + horz + ',' + vert + ')'
})
legend.append('rect')
.attr('width', legendRectSize)
.attr('height', legendRectSize)
.style('fill', function (d, i) {
return segColor[i]
})
.style('stroke', function (d, i) {
return 'black'
})
legend.append('text')
.attr('class', 'legend')
.attr('x', legendRectSize + legendSpacing)
.attr('y', legendRectSize - legendSpacing)
.text(function (d) {
return capitalizedName(d.label)
})
}
}
|
package com.banana.volunteer.aspect;
import com.banana.volunteer.enums.ResultEnum;
import com.banana.volunteer.exception.NoLoginException;
import com.banana.volunteer.holder.UserOrgHolder;
import com.banana.volunteer.holder.UserStatusHolder;
import com.banana.volunteer.service.CheckLoginService;
import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Pointcut;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
@Aspect
@Component
public class VerificationAspect {
@Autowired
private CheckLoginService checkLoginService;
@Pointcut("execution(* com.banana.volunteer.controller.*.*ADMIN(..))")
public void checkAdmin() {
}
@Pointcut("execution(* com.banana.volunteer.controller.*.*ROOT(..))")
public void checkRoot() {
}
/**
* 1.检验管理员及以上级别身份
*/
@Around("checkAdmin()")
public Object verification_ADMIN(ProceedingJoinPoint joinPoint) throws Throwable {
//如果没有登录 这里要抛出异常
if (!checkLoginService.verification_ADMIN()) {
throw new NoLoginException(ResultEnum.LOGIN_ERROR);
}
Object obj = joinPoint.proceed();
UserStatusHolder.remove();
UserOrgHolder.remove();
return obj;
}
/**
* 2.检验超级管理员身份
*/
@Around("checkRoot()")
public Object verification_ROOT(ProceedingJoinPoint joinPoint) throws Throwable {
// 如果没有登录 这里要抛出异常
if (!checkLoginService.verification_ROOT()) {
throw new NoLoginException(ResultEnum.LOGIN_ERROR);
}
Object obj = joinPoint.proceed();
UserStatusHolder.remove();
UserOrgHolder.remove();
return obj;
}
}
|
#!/usr/bin/env node
var util = require('util'),
spawn = require('child_process').spawn;
var argv = require('optimist').argv;
var cmd = argv.cmd;
var args = argv.args
var option = argv.opt
var ls = spawn(cmd , [args ,option]);
ls.stdout.on('data', function (data) {
if (!data || !!data) console.log(' i believe it');
});
ls.stderr.on('data', function (data) {
console.log("It\'s a miracle!");
});
ls.on('exit', function (code) {
console.log("it.justHappened\()");
});
|
#!/bin/bash
#SBATCH -J Act_relu_1
#SBATCH --mail-user=eger@ukp.informatik.tu-darmstadt.de
#SBATCH --mail-type=FAIL
#SBATCH -e /work/scratch/se55gyhe/log/output.err.%j
#SBATCH -o /work/scratch/se55gyhe/log/output.out.%j
#SBATCH -n 1 # Number of cores
#SBATCH --mem-per-cpu=6000
#SBATCH -t 23:59:00 # Hours, minutes and seconds, or '#SBATCH -t 10' -only mins
#module load intel python/3.5
python3 /home/se55gyhe/Act_func/sequence_tagging/arg_min/PE-my.py relu 340 Adadelta 3 0.4930319417522221 0.7644108722985884 id 0.3
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.sparql.core;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.jena.query.Dataset ;
import org.apache.jena.query.DatasetFactory ;
import org.apache.jena.query.ReadWrite ;
import org.apache.jena.sparql.core.DatasetGraphFactory ;
import org.apache.jena.sparql.core.DatasetGraphWithLock ;
import org.junit.Assert;
import org.junit.Test;
public class TestDatasetGraphWithLock extends AbstractTestDataset {
@Override
protected Dataset createDataset() {
return DatasetFactory.wrap(new DatasetGraphWithLock(DatasetGraphFactory.create()));
}
@Test
public synchronized void dsg_with_lock_concurrency_01() throws InterruptedException, ExecutionException, TimeoutException {
ExecutorService executor = Executors.newFixedThreadPool(2);
try {
final DatasetGraphWithLock dsg = new DatasetGraphWithLock(DatasetGraphFactory.create());
Callable<Boolean> callable = new Callable<Boolean>() {
@Override
public Boolean call() {
dsg.begin(ReadWrite.READ);
// Hold the lock for a few seconds
try {
Thread.sleep(3000);
} catch (InterruptedException e) {
// Ignore error
}
dsg.commit();
return true;
}
};
// Fire off two threads
Future<Boolean> f1 = executor.submit(callable);
Future<Boolean> f2 = executor.submit(callable);
Assert.assertTrue(f1.get(4, TimeUnit.SECONDS));
Assert.assertTrue(f2.get(4, TimeUnit.SECONDS));
} finally {
executor.shutdownNow();
}
}
@Test
public synchronized void dsg_with_lock_concurrency_02() throws InterruptedException, ExecutionException, TimeoutException {
ExecutorService executor = Executors.newCachedThreadPool();
try {
final DatasetGraphWithLock dsg = new DatasetGraphWithLock(DatasetGraphFactory.create());
Callable<Boolean> callable = new Callable<Boolean>() {
@Override
public Boolean call() {
dsg.begin(ReadWrite.READ);
// Hold the lock for a few seconds
try {
Thread.sleep(500);
} catch (InterruptedException e) {
// Ignore error
}
dsg.commit();
return true;
}
};
// Run the callable a bunch of times
List<Future<Boolean>> futures = new ArrayList<>();
for (int i = 0; i < 100; i++) {
futures.add(executor.submit(callable));
}
// Check all the futures come back OK
for (Future<Boolean> f : futures) {
Assert.assertTrue(f.get(3, TimeUnit.SECONDS));
}
} finally {
executor.shutdownNow();
}
}
}
|
<reponame>weareopensource/roMEANet-SOOS<filename>modules/core/client/directives/page-title.client.directive.js
(function() {
'use strict';
angular.module('core')
.directive('pageTitle', pageTitle);
pageTitle.$inject = ['$rootScope', '$interpolate', '$state'];
function pageTitle($rootScope, $interpolate, $state) {
var directive = {
restrict: 'A',
scope: {
kind: '=kind'
},
link: link
};
return directive;
function link(scope, element, attribute) {
$rootScope.$on('$stateChangeSuccess', listener);
function listener(event, toState) {
var applicationCoreTitle = 'Home',
separeteBy = '',
stateTitle = '';
switch (attribute.kind) {
case 'header':
separeteBy = ' / ';
break;
case 'page':
separeteBy = ' - ';
break;
default:
separeteBy = ' - ';
}
_.forEach(toState.name.split('.'), function(value, index) {
if (attribute.kind === 'header') {
stateTitle = stateTitle + '<span class="topTitlePart' + index + '">' + _.capitalize(value) + '</span>' + separeteBy;
} else {
stateTitle = stateTitle + _.capitalize(value) + separeteBy;
}
});
if (toState.data && toState.data.pageTitle) {
stateTitle = $interpolate(stateTitle + toState.data.pageTitle + separeteBy)(($state.$current.locals.globals));
}
stateTitle = stateTitle.slice(0, -3);
element.html(stateTitle);
}
}
}
}());
|
<reponame>Finaps/EventSourcing<gh_stars>1-10
/**
* A Cosmos DB stored procedure that bulk deletes documents for a given partitionId and aggregateId.<br/>
* Note: You may need to execute this sproc multiple times (depending whether the sproc is able to delete every document within the execution timeout limit).
*
* @function
* @param {string} containerId - The id of the container where the aggregate exists
* @param {string} partitionId - The partition id of the aggregate that is to be deleted
* @param {string} aggregateId - The aggregate id of the aggregate that is to be deleted
* @param {int} recordKind - The integer corresponding to the RecordKind value 'Event'
* @returns {Object.<number, boolean>} Returns an object with the two properties:<br/>
* deleted - contains a count of documents deleted<br/>
* continuation - a boolean whether you should execute the sproc again (true if there are more documents to delete; false otherwise).
*/
function deleteAllEvents(containerId, partitionId, aggregateId, recordKind) {
var collection = getContext().getCollection();
var collectionLink = collection.getSelfLink();
var response = getContext().getResponse();
var responseBody = {
deleted: 0,
continuation: true
};
// Validate input.
if(!containerId) throw new Error('Invalid container id');
if(!partitionId) throw new Error('Invalid partition id');
if(!aggregateId) throw new Error('Invalid aggregate id');
if(!recordKind) throw new Error('Invalid record kind');
var query = `SELECT * FROM ${containerId} e WHERE e.PartitionId = '${partitionId}' AND e.AggregateId = '${aggregateId}' AND e.Kind = ${recordKind}`;
// Get the current aggregate version -> Create reservation event -> Delete reservation event -> Query and delete all documents related to the aggregate
getIndex();
// Find the current version of the aggregate by getting the maximal Index of the events
function getIndex() {
var versionQuery = `SELECT Max(e.Index) AS Index FROM ${containerId} e WHERE e.PartitionId = '${partitionId}' AND e.AggregateId = '${aggregateId}' AND e.Kind = ${recordKind}`;
var isAccepted = collection.queryDocuments(collectionLink, versionQuery, {}, createReservation);
}
// Create reservation event to prevent concurrency issues when deleting
function createReservation(err, retrievedDocs, responseOptions) {
if (err) throw err;
var index = retrievedDocs[0].Index >= 0 ? retrievedDocs[0].Index + 1 : 0;
var isAccepted = collection.createDocument(
collectionLink,
{
id: `Event|${aggregateId}[${index}]`,
PartitionId: partitionId,
AggregateId: aggregateId,
Index: index,
Kind: recordKind},
{},
deleteReservation);
}
// Delete reservation
function deleteReservation(err, resource, responseOptions) {
if (err) throw err;
var isAccepted = collection.deleteDocument(resource._self, {}, (err, resource, responseOptions) => {
if (err) throw err;
// Continue with querying and deleting
tryQueryAndDelete();
});
}
// Recursively runs the query w/ support for continuation tokens.
// Calls tryDelete(documents) as soon as the query returns documents.
function tryQueryAndDelete(continuation) {
var requestOptions = {continuation: continuation};
var isAccepted = collection.queryDocuments(collectionLink, query, requestOptions, (err, retrievedDocs, responseOptions) => {
if (err) throw err;
if (retrievedDocs.length > 0) {
// Begin deleting documents as soon as documents are returned form the query results.
// tryDelete() resumes querying after deleting; no need to page through continuation tokens.
// - this is to prioritize writes over reads given timeout constraints.
tryDelete(retrievedDocs);
} else if (responseOptions.continuation) {
// Else if the query came back empty, but with a continuation token; repeat the query w/ the token.
tryQueryAndDelete(responseOptions.continuation);
} else {
// Else if there are no more documents and no continuation token - we are finished deleting documents.
responseBody.continuation = false;
response.setBody(responseBody);
}
});
// If we hit execution bounds - return continuation: true.
if (!isAccepted) {
response.setBody(responseBody);
}
}
// Recursively deletes documents passed in as an array argument.
// Attempts to query for more on empty array.
function tryDelete(documents) {
if (documents.length > 0) {
// Delete the first document in the array.
var isAccepted = collection.deleteDocument(documents[0]._self, {},(err, responseOptions) => {
if (err) throw err;
responseBody.deleted++;
documents.shift();
// Delete the next document in the array.
tryDelete(documents);
});
// If we hit execution bounds - return continuation: true.
if (!isAccepted) {
response.setBody(responseBody);
}
} else {
// If the document array is empty, query for more documents.
tryQueryAndDelete();
}
}
}
|
package Model;
import java.util.List;
import java.util.Objects;
public class Route {
private String id;
private List<RouteIncludes> includesList;
private List<TransportPrice> priceList;
private List<String> departures0;
private List<String> departures1;
public String getId() {
return id;
}
public Route(String routeId) {
this.id = routeId;
}
public List<RouteIncludes> getIncludesList() {
return includesList;
}
public void setIncludesList(List<RouteIncludes> includesList) {
this.includesList = includesList;
}
public List<TransportPrice> getPriceList() {
return priceList;
}
public void setPriceList(List<TransportPrice> priceList) {
this.priceList = priceList;
}
public List<String> getDepartures0() {
return departures0;
}
public void setDepartures0(List<String> departures0) {
this.departures0 = departures0;
}
public List<String> getDepartures1() {
return departures1;
}
public void setDepartures1(List<String> departures1) {
this.departures1 = departures1;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Route route = (Route) o;
return id.equals(route.id);
}
@Override
public int hashCode() {
return Objects.hash(id);
}
@Override
public String toString() {
return id;
}
} |
import numpy as np
def fourier_inverse_curl(Bx, By, Bz, x, y, z, method='fourier', pad=True):
"""
Invert curl with pseudo-spectral method described in MacKay 2006.
Parameters:
Bx, By, Bz : numpy.ndarray
Arrays representing the x, y, and z components of the vector field.
x, y, z : numpy.ndarray
Arrays representing the spatial coordinates.
method : str, optional
Method to use for inversion (default is 'fourier').
pad : bool, optional
Indicates whether to pad the input arrays (default is True).
Returns:
numpy.ndarray
Result of the inverse curl calculation.
"""
shape = Bx.shape
Bx_copy = np.array(Bx)
By_copy = np.array(By)
Bz_copy = np.array(Bz)
# Implement the pseudo-spectral method for inverting the curl
# ...
# Perform the inverse curl calculation using the specified method
# Return the result of the inverse curl calculation
return result |
package com.infamous.zod.storage.converter;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import com.infamous.zod.storage.model.StorageFile;
import com.infamous.zod.storage.model.StorageFileVO;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
class StorageFileConverterTest {
private StorageFileConverter m_converter;
@BeforeEach
public void setup() {
m_converter = new StorageFileConverter();
}
@Test
public void testConvertToVO() {
StorageFileVO vo = m_converter.toDTO(mockEntity());
assertNotNull(vo);
assertEquals("1", vo.getId());
assertTrue(vo.isEnabled());
assertEquals("file-name.txt", vo.getFileName());
}
@Test
public void testConvertToEntity() {
StorageFile entity = m_converter.toEntity(mockDTO());
assertNotNull(entity);
assertEquals("1", entity.getId());
assertTrue(entity.isEnabled());
assertEquals("file-name.txt", entity.getFileName());
}
private StorageFile mockEntity() {
return StorageFile.builder()
.id("1")
.enabled(true)
.fileName("file-name.txt")
.build();
}
private StorageFileVO mockDTO() {
return StorageFileVO.builder()
.id("1")
.enabled(true)
.fileName("file-name.txt")
.build();
}
} |
<gh_stars>0
"use strict";
import { Component, OnInit, OnDestroy, ElementRef, Input, Output, EventEmitter } from "@angular/core";
import { NgForm, Location } from "@angular/common";
import { Router, ActivatedRoute, Params } from "@angular/router";
import { Subscription } from "rxjs/Subscription";
import { User, UserService } from "../Services/users.service";
import { StandingData } from "../Services/standing.data.service";
import { AuthService } from "../Services/authentication.service";
import { RestoreService } from "../Services/restore.service";
@Component ({
templateUrl: "register.component.html",
styleUrls: [ "register.component.css" ],
providers: [ UserService, RestoreService ]
})
export class UserRegistrationComponent implements OnInit, OnDestroy {
@Output() canceled = new EventEmitter();
@Output() saved = new EventEmitter();
@Input()
set user (user: User) {
this.restoreService.setItem(user);
}
get user () {
return this.restoreService.getItem();
}
private password2: string;
private roles: string[];
private currentUser: User;
private isAdmin: boolean;
private sub: Subscription;
private isEditingUser: boolean;
private errorMsg: string;
private statusMsg: string;
private active: boolean;
public constructor(private authService: AuthService,
private userService: UserService,
private standingData: StandingData,
private restoreService: RestoreService<User>,
private location: Location,
private element: ElementRef,
private route: ActivatedRoute,
private router: Router) {
this.user = new User("", "", "");
}
// make sure two-way binding is working
public get diagnostic(): string { return JSON.stringify(this.user); }
public imgChange(event) {
let target = EventTarget;
let image = this.element.nativeElement.querySelector('.user-image-input');
let reader: any = new FileReader();
var self = this;
reader.onload = function(e) {
self.user.photo = e.target.result;
image.src = self.user.photo;
}
reader.readAsDataURL(event.target.files[0]);
} // imgChange()
public onSubmit(): void {
this.errorMsg = "";
this.statusMsg = "";
if (this.isEditingUser) {
this.modifyUser(this.user);
} else {
this.addUser(this.user);
}
if (this.errorMsg) {
this.active = true;
} else {
if (this.isEditingUser)
setTimeout(() => this.location.back(), 1000);
else
setTimeout(() => this.router.navigate([ "/" ]), 1000);
}
} // onSubmit()
public clearForm(): void {
// features a temporary workaround while we await a proper form reset feature
this.user = new User("" /* id */, "" /* username */, "" /* password */);
this.password2 = "";
this.active = false;
setTimeout(() => this.active = true, 0);
} // clearForm()
private addUser(user: User): void {
this.userService.addUser(this.user)
.subscribe( // we want an Observable returned
userData => { // function to invoke for each element in the observable sequence
this.user = userData;
this.password2 = "";
this.isAdmin = this.user.role === "admin";
this.statusMsg = "User created successfully";
this.active = false;
},
error => { // function to invoke on exceptional termination of the obs. sequence
this.errorMsg = <any>error;
},
() => {
// function to invoke upon graceful termination of the observable sequence
console.info("UserRegistrationComponent.addUser()'s observable completed gracefully");
});
} // addUser()
private modifyUser(user: User): void {
this.userService.putUser(this.user)
.subscribe(
userData => {
this.user = userData;
this.password2 = "";
this.statusMsg = "User modified successfully";
this.active = true;
},
error => {
this.errorMsg = <any>error;
},
() =>{
console.info("UserRegistrationComponent.modifyUser()'s observable completed gracefully");
});
} // modifyUser()
ngOnInit() {
this.currentUser = this.authService.currentUser;
this.isAdmin = this.authService.usrRole === "admin";
this.isEditingUser = false;
this.password2 = "";
this.sub = this.route.params.subscribe(
params => {
let id = params["id"];
if (id) {
this.isEditingUser = true;
this.userService.getUser(id).subscribe(
userData => this.user = userData,
error => this.errorMsg = <any>error
);
}
}
);
// this.roles = [ "admin", "supervisor", "regular" ];
this.roles = this.standingData.lists.roles;
this.errorMsg = "";
this.statusMsg = "";
this.active = true;
} // ngOnInit()
ngOnDestroy() {
if (this.sub)
this.sub.unsubscribe();
} // ngOnDestroy()
} // class UserRegistrationComponent |
$(document).ready(function(){
$('#konfirmasi_hapus_akun').hide();
$('#hapus_akun').on('click', function(){
$('#konfirmasi_hapus_akun').toggle(); // Toggles the visibility of the confirmation message
});
}); |
<reponame>kuun/shipwheel<gh_stars>1-10
package org.ship.core.resource.sys;
import org.ship.core.service.sys.ISysService;
import org.ship.core.vo.sys.ManAddr;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RestController;
/**
* Created by wx on 2017/5/1.
*/
@RestController
@RequestMapping(value = "/ship/sys/manAddr")
public class ManAddrResource {
@Autowired
private ISysService sysService;
@RequestMapping(method = RequestMethod.GET)
public ManAddr getManAddr() {
return sysService.getManAddr();
}
@RequestMapping(method = RequestMethod.PUT)
public ManAddr modManAddr(@RequestBody ManAddr manAddr) throws Exception {
return sysService.modManAddr(manAddr);
}
}
|
#!/usr/bin/env bash
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source ./common.sh
for svc in "${CLUSTERS[@]}" ; do
CTX="${svc%%:*}"
printf "\n\n Calling ZonePrinter on $CTX...\n"
kubectx $CTX
IP=`kubectl get service/istio-ingressgateway -n istio-system -o jsonpath='{.status.loadBalancer.ingress[0].ip}'`
curl $IP
done
|
<reponame>jlbuganan/learning-and-stuff
const fs = require('fs');
const data = fs.readFile('./file.md', 'utf8', function (err, data) {
if (err) {
throw err;
}
console.log(data);
})
console.log('More work'); |
<gh_stars>10-100
#pragma once
#include "gui/objects/Color.h"
#include "gui/objects/Point.h"
#include "math/Curve.h"
#include "objects/containers/Array.h"
#include "objects/wrappers/Function.h"
#include "objects/wrappers/Optional.h"
#include <algorithm>
#include <wx/dcclient.h>
#include <wx/frame.h>
#include <wx/panel.h>
#include <wx/propgrid/editors.h>
#include <wx/propgrid/propgrid.h>
#include <wx/sizer.h>
NAMESPACE_SPH_BEGIN
class CurvePanel : public wxPanel {
private:
Curve curve;
wxPoint mousePosition = wxDefaultPosition;
Optional<Size> lockedIdx;
Optional<Size> highlightIdx;
Optional<Size> highlightSegment;
public:
CurvePanel(wxWindow* parent);
void setCurve(const Curve& newCurve) {
curve = newCurve;
}
Curve getCurve() const {
return curve;
}
private:
void onPaint(wxPaintEvent& evt);
void onLeftDown(wxMouseEvent& evt);
void onLeftUp(wxMouseEvent& evt);
void onRightUp(wxMouseEvent& evt);
void onMouseMotion(wxMouseEvent& evt);
const static int padding = 30;
template <typename TPoint, typename T = int>
TPoint curveToWindow(const CurvePoint& p) const;
CurvePoint windowToCurve(const wxPoint2DDouble p) const;
Optional<Size> getIdx(const wxPoint mousePos) const;
Optional<Size> getSegment(const wxPoint mousePos) const;
};
class CurveEditor : public wxPGEditor {
public:
virtual wxPGWindowList CreateControls(wxPropertyGrid* propgrid,
wxPGProperty* property,
const wxPoint& pos,
const wxSize& size) const override;
virtual void UpdateControl(wxPGProperty* property, wxWindow* ctrl) const override;
virtual void DrawValue(wxDC& dc,
const wxRect& rect,
wxPGProperty* property,
const wxString& text) const override;
virtual bool OnEvent(wxPropertyGrid* propgrid,
wxPGProperty* property,
wxWindow* wnd_primary,
wxEvent& event) const override;
};
class CurveProperty : public wxStringProperty {
private:
Curve curve;
public:
CurveProperty(const wxString& label, const Curve& curve)
: wxStringProperty(label, "curve")
, curve(curve) {}
virtual const wxPGEditor* DoGetEditorClass() const override {
static wxPGEditor* editor = wxPropertyGrid::RegisterEditorClass(new CurveEditor(), "MyEditor");
return editor;
}
void setCurve(const Curve& newCurve) {
curve = newCurve;
}
const Curve& getCurve() const {
return curve;
}
};
class CurveDialog : public wxFrame {
private:
Function<void(const Curve&)> curveChanged;
public:
CurveDialog(wxWindow* parent, const Curve& curve, Function<void(const Curve&)> curveChanged);
};
NAMESPACE_SPH_END
|
#!/usr/bin/env bash
# prerequisite checks
echo "-- Prerequisite checks"
command -v oc >/dev/null 2>&1 || { echo >&2 " * [XX] I require 'oc' but it's not installed. Aborting."; exit 1; }
echo " * [OK] Found 'oc' application"
oc status >/dev/null 2>&1 || { echo >&2 " * [XX] Not logged into an openshift cluster. Aborting."; exit 1; }
echo " * [OK] Logged into OpenShift cluster"
oc get project sa-telemetry >/dev/null 2>&1 || { echo >&2 " * [--] Project not found. Creating."; oc new-project sa-telemetry >/dev/null 2>&1; }
oc project sa-telemetry >/dev/null 2>&1
echo " * [OK] Switched to sa-telemetry project"
# setup our default method
method="CREATE"
# checking if we're deleting or creating
if [[ "$1" != "" ]]; then
if [[ "$1" != "CREATE" && "$1" != "DELETE" ]]; then
echo " * [XX] Must use a method of 'CREATE' or 'DELETE'"
exit 0
fi
method="$1"
fi
echo "-- We are going to $method the telemetry framework"
# declare the list of objects we're going to build out
declare -a operator_list=(
'operators/prometheus/service_account.yaml'
'operators/prometheus/clusterrole.yaml'
'operators/prometheus/clusterrolebinding.yaml'
'operators/prometheus/operator.yaml'
'operators/qdrouterd/crds/interconnectedcloud_v1alpha1_qdr_crd.yaml'
'operators/qdrouterd/service_account.yaml'
'operators/qdrouterd/role.yaml'
'operators/qdrouterd/role_binding.yaml'
'operators/qdrouterd/cluster_role.yaml'
'operators/qdrouterd/cluster_role_binding.yaml'
'operators/qdrouterd/operator.yaml'
'operators/smartgateway/crds/smartgateway_v1alpha1_smartgateway_crd.yaml'
'operators/smartgateway/service_account.yaml'
'operators/smartgateway/role.yaml'
'operators/smartgateway/role_binding.yaml'
'operators/smartgateway/operator.yaml'
)
declare -a application_list=(
'service-assurance/qdrouterd/qdrouterd-edge.yaml'
'service-assurance/smartgateway/smartgateway.yaml'
'service-assurance/prometheus/service_account.yaml'
'service-assurance/prometheus/role.yaml'
'service-assurance/prometheus/rolebinding.yaml'
'service-assurance/prometheus/service_monitor.yaml'
'service-assurance/prometheus/prometheus.yaml'
'service-assurance/prometheus/route.yaml'
'service-assurance/prometheusrules/prometheusrules.yaml'
'service-assurance/alertmanager/service_account.yaml'
'service-assurance/alertmanager/secret.yaml'
'service-assurance/alertmanager/alertmanager.yaml'
'service-assurance/alertmanager/service.yaml'
'service-assurance/alertmanager/route.yaml'
)
create() {
# create our objects by building a list
built_list=""
object_list=("$@")
for spec in ${object_list[@]}; do
built_list+=" -f $spec"
done
oc create --save-config=true $built_list
}
delete() {
# delete our objects by building a list
built_list=""
object_list=("$@")
for (( idx=${#object_list[@]}-1 ; idx >= 0 ; idx-- )); do
built_list+=" -f ${object_list[idx]}"
done
oc delete --wait=true $built_list
}
# create the objects
if [ "$method" == "CREATE" ]; then
echo " * [ii] Creating the operators" ; create ${operator_list[@]} && sleep 30
echo " * [ii] Creating the application" ; create ${application_list[@]}
fi
# delete the objects
if [ "$method" == "DELETE" ]; then
echo " * [ii] Deleteing the application" ; delete ${application_list[@]} && sleep 5
echo " * [ii] Deleteing the operators" ; delete ${operator_list[@]}
fi
echo "-- Completed."
exit 0
|
public class Factorial {
private int number;
public Factorial(int n) {
number = n;
}
public int calculateFactorial() {
if (number == 1) {
return 1;
}
return number * new Factorial(number - 1).calculateFactorial();
}
public static void main(String[] args) {
int num = 4;
System.out.println(new Factorial(num).calculateFactorial());
}
} |
<reponame>rickygv99/metagov-prototype<filename>metagov/metagov/core/views.py
import base64
import importlib
import json
import logging
from http import HTTPStatus
import jsonschema
import metagov.core.openapi_schemas as MetagovSchemas
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseRedirect, JsonResponse
from django.shortcuts import redirect
from django.utils.decorators import decorator_from_middleware
from django.views.decorators.csrf import csrf_exempt
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from metagov.core import utils
from metagov.core.errors import PluginAuthError
from metagov.core.middleware import CommunityMiddleware
from metagov.core.models import Community, Plugin, ProcessStatus
from metagov.core.openapi_schemas import Tags
from metagov.core.plugin_constants import AuthorizationType
from metagov.core.plugin_decorators import plugin_registry
from metagov.core.serializers import CommunitySerializer, GovernanceProcessSerializer, PluginSerializer
from requests.models import PreparedRequest
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.exceptions import APIException, ValidationError
from rest_framework.parsers import JSONParser
community_middleware = decorator_from_middleware(CommunityMiddleware)
logger = logging.getLogger(__name__)
def index(request):
return redirect("/redoc")
@swagger_auto_schema(
method="post",
operation_id="Create community",
operation_description="Create a new community",
request_body=MetagovSchemas.create_community_schema,
responses={200: CommunitySerializer, 201: CommunitySerializer},
tags=[Tags.COMMUNITY],
)
@api_view(["POST"])
def create_community(request):
data = JSONParser().parse(request)
community_serializer = CommunitySerializer(data=data)
if not community_serializer.is_valid():
return JsonResponse(community_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
community_serializer.save()
return JsonResponse(community_serializer.data, status=status.HTTP_201_CREATED)
@swagger_auto_schema(
method="delete",
operation_id="Delete community",
manual_parameters=[MetagovSchemas.community_slug_in_path],
operation_description="Delete an existing community",
tags=[Tags.COMMUNITY],
)
@swagger_auto_schema(
method="get",
operation_id="Get community",
operation_description="Get the configuration for an existing community",
manual_parameters=[MetagovSchemas.community_slug_in_path],
responses={200: CommunitySerializer},
tags=[Tags.COMMUNITY],
)
@swagger_auto_schema(
method="put",
operation_id="Update community",
operation_description="Update the configuration for an existing community",
manual_parameters=[MetagovSchemas.community_slug_in_path],
request_body=CommunitySerializer,
responses={200: CommunitySerializer, 201: CommunitySerializer},
tags=[Tags.COMMUNITY],
)
@api_view(["GET", "PUT", "DELETE"])
def community(request, slug):
try:
community = Community.objects.get(slug=slug)
except Community.DoesNotExist:
return HttpResponseNotFound()
if request.method == "GET":
# get community
community_serializer = CommunitySerializer(community)
return JsonResponse(community_serializer.data, safe=False)
elif request.method == "PUT":
# update community (change readable name or enable/disable plugins)
data = JSONParser().parse(request)
community_serializer = CommunitySerializer(community, data=data)
if not community_serializer.is_valid():
return JsonResponse(community_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
community_serializer.save()
return JsonResponse(community_serializer.data)
elif request.method == "DELETE":
community.delete()
return JsonResponse({"message": "Community was deleted successfully"}, status=status.HTTP_204_NO_CONTENT)
def decorated_enable_plugin_view(plugin_name):
"""
Decorate the `enable_plugin` view with swagger schema properties defined by the plugin author
"""
cls = plugin_registry[plugin_name]
@community_middleware
@api_view(["POST"])
def enable_plugin(request):
plugin_config = JSONParser().parse(request)
# Create or re-create the plugin (only one instance per community supported for now!)
plugin, created = utils.create_or_update_plugin(plugin_name, plugin_config, request.community)
# Serialize and return the Plugin instance
serializer = PluginSerializer(plugin)
resp_status = status.HTTP_201_CREATED if created else status.HTTP_200_OK
return JsonResponse(serializer.data, status=resp_status)
request_body_schema = utils.json_schema_to_openapi_object(cls.config_schema) if cls.config_schema else {}
return swagger_auto_schema(
method="post",
responses={
201: openapi.Response(
"Plugin enabled",
PluginSerializer,
),
200: openapi.Response(
"The Plugin was already enabled. Plugin was updated if the config changed.",
PluginSerializer,
),
},
operation_id=f"Enable {plugin_name}",
tags=[Tags.COMMUNITY],
operation_description=f"Enable {plugin_name} plugin.",
manual_parameters=[MetagovSchemas.community_header],
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
**request_body_schema.get("properties", {}),
},
required=request_body_schema.get("required", []),
),
)(enable_plugin)
@swagger_auto_schema(
method="delete",
operation_id="Disable plugin",
operation_description="Delete a plugin instance. This is an irreversible action.",
manual_parameters=[MetagovSchemas.plugin_name_in_path],
responses={204: "Plugin disabled successfully"},
tags=[Tags.COMMUNITY],
)
@api_view(["DELETE"])
def delete_plugin(request, plugin_name, id):
try:
plugin = Plugin.objects.get(pk=id)
except Plugin.DoesNotExist:
return HttpResponseNotFound()
logger.info(f"Deleting plugin {plugin}")
plugin.delete()
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(**MetagovSchemas.plugin_authorize)
@api_view(["GET"])
def plugin_authorize(request, plugin_name):
plugin_cls = plugin_registry.get(plugin_name)
if not plugin_cls:
return HttpResponseBadRequest(f"No such plugin: {plugin_name}")
# auth type (user login or app installation)
type = request.GET.get("type", AuthorizationType.APP_INSTALL)
# community to install to (optional for installation, ignored for user login)
community_slug = request.GET.get("community")
# where to redirect after auth flow is done
redirect_uri = request.GET.get("redirect_uri")
# state to pass along to final redirect after auth flow is done
received_state = request.GET.get("state")
request.session["received_authorize_state"] = received_state
if type != AuthorizationType.APP_INSTALL and type != AuthorizationType.USER_LOGIN:
return HttpResponseBadRequest(f"Parameter 'type' must be '{AuthorizationType.APP_INSTALL}' or '{AuthorizationType.USER_LOGIN}'")
community = None
if type == AuthorizationType.APP_INSTALL:
if community_slug:
try:
community = Community.objects.get(slug=community_slug)
except Community.DoesNotExist:
return HttpResponseBadRequest(f"No such community: {community_slug}")
else:
community = Community.objects.create()
# TODO: delete the community if installation fails.
logger.debug(f"Created new community for installing {plugin_name}: {community}")
community_slug = str(community.slug)
# Create the state
nonce = utils.generate_nonce()
state = {nonce: {"community": community_slug, "redirect_uri": redirect_uri, "type": type}}
state_str = json.dumps(state).encode("ascii")
state_encoded = base64.b64encode(state_str).decode("ascii")
# Store nonce in the session so we can validate the callback request
request.session["nonce"] = nonce
# FIXME: figure out a better way to register these functions
plugin_views = importlib.import_module(f"metagov.plugins.{plugin_name}.views")
url = plugin_views.get_authorize_url(state_encoded, type, community)
if type == AuthorizationType.APP_INSTALL:
logger.info(f"Redirecting to authorize '{plugin_name}' for community {community}")
elif type == AuthorizationType.USER_LOGIN:
logger.info(f"Redirecting to authorize user for '{plugin_name}'")
return HttpResponseRedirect(url)
def redirect_with_params(url, params):
req = PreparedRequest()
req.prepare_url(url, params)
return HttpResponseRedirect(req.url)
@swagger_auto_schema(method="GET", auto_schema=None)
@api_view(["GET"])
def plugin_auth_callback(request, plugin_name):
logger.debug(f"Plugin auth callback received request: {request.GET}")
plugin_cls = plugin_registry.get(plugin_name)
if not plugin_cls:
return HttpResponseBadRequest(f"No such plugin: {plugin_name}")
state_str = request.GET.get("state")
if not state_str:
return HttpResponseBadRequest("missing state")
# Validate and decode state
nonce = request.session.get("nonce")
if not nonce:
return HttpResponseBadRequest("missing session nonce")
state_obj = json.loads(base64.b64decode(state_str).decode("ascii"))
logger.debug(f"Decoded state: {state_obj}")
state = state_obj.get(nonce)
type = state.get("type")
community_slug = state.get("community")
redirect_uri = state.get("redirect_uri")
state_to_pass = request.session.get("received_authorize_state")
if not redirect_uri:
return HttpResponseBadRequest("bad state: redirect_uri is missing")
if request.GET.get("error"):
return redirect_with_params(redirect_uri, {"state": state_to_pass, "error": request.GET.get("error")})
code = request.GET.get("code")
if not code:
return redirect_with_params(redirect_uri, {"state": state_to_pass, "error": "server_error"})
community = None
if type == AuthorizationType.APP_INSTALL:
# For installs, validate the community
if not community_slug:
return redirect_with_params(redirect_uri, {"state": state_to_pass, "error": "bad_state"})
try:
community = Community.objects.get(slug=community_slug)
except Community.DoesNotExist:
return redirect_with_params(redirect_uri, {"state": state_to_pass, "error": "community_not_found"})
# FIXME: figure out a better way to register these functions
plugin_views = importlib.import_module(f"metagov.plugins.{plugin_name}.views")
try:
return plugin_views.auth_callback(
type=type,
code=code,
redirect_uri=redirect_uri,
community=community,
state=state_to_pass,
)
except PluginAuthError as e:
return redirect_with_params(
redirect_uri, {"state": state_to_pass, "error": e.get_codes(), "error_description": e.detail}
)
@swagger_auto_schema(**MetagovSchemas.plugin_metadata)
@api_view(["GET"])
def plugin_metadata(request, plugin_name):
cls = plugin_registry.get(plugin_name)
if not cls:
return HttpResponseBadRequest(f"No such plugin: {plugin_name}")
return JsonResponse(
{
"name": cls.name,
"auth_type": cls.auth_type,
"uses_webhook": utils.plugin_uses_webhooks(cls),
"schemas": {
"config": cls.config_schema,
"actions": utils.get_action_schemas(cls),
"events": utils.get_event_schemas(cls),
"processes": utils.get_process_schemas(cls),
},
}
)
@swagger_auto_schema(**MetagovSchemas.plugin_schemas)
@api_view(["GET"])
def plugin_config_schemas(request):
plugins = {}
for (name, cls) in plugin_registry.items():
plugins[name] = cls.config_schema
return JsonResponse(plugins)
@csrf_exempt
@swagger_auto_schema(method="post", auto_schema=None)
@api_view(["POST"])
def receive_webhook(request, community, plugin_name, webhook_slug=None):
"""
API endpoint for receiving webhook requests from external services
"""
try:
community = Community.objects.get(slug=community)
except Community.DoesNotExist:
return HttpResponseNotFound()
# Lookup plugin
plugin = get_plugin_instance(plugin_name, community)
# Validate slug if the plugin has `webhook_slug` configured
expected_slug = plugin.config.get(utils.WEBHOOK_SLUG_CONFIG_KEY)
if webhook_slug != expected_slug:
logger.error(f"Received request at {webhook_slug}, expected {expected_slug}. Rejecting.")
return HttpResponseBadRequest()
plugin_cls = plugin_registry[plugin_name]
if plugin_cls._webhook_receiver_function:
webhook_receiver = getattr(plugin, plugin_cls._webhook_receiver_function)
logger.info(f"Passing webhook request to: {plugin}")
try:
webhook_receiver(request)
except Exception as e:
logger.error(f"Plugin '{plugin}' failed to process webhook: {e}")
# Call `receive_webhook` on each of the GovernanceProcess proxy models
proxy_models = plugin_cls._process_registry.values()
for cls in proxy_models:
processes = cls.objects.filter(plugin=plugin, status=ProcessStatus.PENDING.value)
logger.info(f"{processes.count()} pending processes for plugin instance '{plugin}'")
for process in processes:
logger.info(f"Passing webhook request to: {process}")
try:
process.receive_webhook(request)
except Exception as e:
logger.error(e)
return HttpResponse()
@csrf_exempt
@swagger_auto_schema(method="post", auto_schema=None)
@api_view(["POST"])
def receive_webhook_global(request, plugin_name):
"""
API endpoint for receiving webhook requests from external services.
For plugins that receive events for multiple communities to a single URL -- like Slack and Discord
"""
# FIXME: figure out a better way to register the event processing function
try:
plugin_views = importlib.import_module(f"metagov.plugins.{plugin_name}.views")
except ModuleNotFoundError:
logger.error(f"no receiver for {plugin_name}")
return HttpResponse()
if not hasattr(plugin_views, "process_event"):
logger.error(f"no receiver for {plugin_name}")
return HttpResponse()
logger.debug(f"Processing incoming event for {plugin_name}")
return plugin_views.process_event(request)
def decorated_create_process_view(plugin_name, slug):
# get process model proxy class
cls = plugin_registry[plugin_name]._process_registry[slug]
prefixed_slug = f"{plugin_name}.{slug}"
"""
Decorate the `create_process_endpoint` view with swagger schema properties defined by the plugin author
"""
@community_middleware
@api_view(["POST"])
def create_process(request):
# Look up plugin instance (throws if plugin is not installed for this community)
plugin = get_plugin_instance(plugin_name, request.community)
payload = JSONParser().parse(request)
callback_url = payload.pop("callback_url", None) # pop to remove it
# Validate payload
if cls.input_schema:
try:
jsonschema.validate(payload, cls.input_schema)
except jsonschema.exceptions.ValidationError as err:
raise ValidationError(err.message)
# Create new process instance
new_process = cls.objects.create(name=slug, callback_url=callback_url, plugin=plugin)
logger.info(f"Created process: {new_process}")
# Start process
try:
new_process.start(payload)
except APIException as e:
new_process.delete()
raise e
except Exception as e:
# Catch any other exceptions so that we can delete the model.
new_process.delete()
raise e
logger.info(f"Started process: {new_process}")
# return 202 with resource location in header
response = HttpResponse(status=HTTPStatus.ACCEPTED)
response["Location"] = f"/{utils.construct_process_url(plugin_name, slug)}/{new_process.pk}"
return response
request_body_schema = utils.json_schema_to_openapi_object(cls.input_schema) if cls.input_schema else {}
return swagger_auto_schema(
method="post",
responses={
202: "Process successfully started. Use the URL from the `Location` header in the response to get the status and outcome of the process."
},
operation_id=f"Start {prefixed_slug}",
tags=[Tags.GOVERNANCE_PROCESS],
operation_description=f"Start a new governance process of type '{prefixed_slug}'",
manual_parameters=[MetagovSchemas.community_header],
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
"callback_url": openapi.Schema(
type=openapi.TYPE_STRING, description="URL to POST outcome to when process is completed"
),
**request_body_schema.get("properties", {}),
},
required=request_body_schema.get("required", []),
),
)(create_process)
def decorated_get_process_view(plugin_name, slug):
# get process model proxy class
cls = plugin_registry[plugin_name]._process_registry[slug]
prefixed_slug = f"{plugin_name}.{slug}"
@swagger_auto_schema(
method="delete",
operation_id=f"Close {prefixed_slug}",
operation_description=f"Close the {prefixed_slug} process",
tags=[Tags.GOVERNANCE_PROCESS],
)
@swagger_auto_schema(
method="get",
operation_id=f"Check status of {prefixed_slug}",
operation_description=f"Poll the pending {prefixed_slug} governance process",
tags=[Tags.GOVERNANCE_PROCESS],
responses={
200: openapi.Response(
"Current process record. Check the `status` field to see if the process has completed. If the `errors` field has data, the process failed.",
GovernanceProcessSerializer,
),
404: "Process not found",
},
)
@api_view(["GET", "DELETE"])
def get_process(request, process_id):
try:
process = cls.objects.get(pk=process_id)
except cls.DoesNotExist:
return HttpResponseNotFound()
# 'DELETE' means close the process and return it. This will update process state.
if request.method == "DELETE":
if process.status == ProcessStatus.COMPLETED.value:
raise ValidationError("Can't close process, it has already completed")
try:
logger.info(f"Closing: {process}")
process.close()
except NotImplementedError:
raise APIException(
f"{process.plugin.name}.{process.name} does not support manually closing the process."
)
if process.status != ProcessStatus.COMPLETED.value:
raise APIException("Failed to close process")
serializer = GovernanceProcessSerializer(process)
logger.info(f"Returning serialized process: {serializer.data}")
return JsonResponse(serializer.data)
return get_process
def decorated_perform_action_view(plugin_name, slug, tags=[]):
cls = plugin_registry[plugin_name]
meta = cls._action_registry[slug]
prefixed_slug = f"{plugin_name}.{slug}"
@community_middleware
@api_view(["POST"])
def perform_action(request):
"""
Perform an action on a platform
"""
# 1. Look up plugin instance
plugin = get_plugin_instance(plugin_name, request.community)
action_function = getattr(plugin, meta.function_name)
# 2. Validate input parameters
parameters = {}
if request.method == "POST" and request.body:
payload = JSONParser().parse(request)
parameters = payload.get("parameters", {})
# TODO: add back support for GET. Should be allowed if params are simple enough.
if request.method == "GET":
parameters = request.GET.dict() # doesnt support repeated params 'a=2&a=3'
utils.restruct(parameters)
if meta.input_schema:
try:
jsonschema.validate(parameters, meta.input_schema)
except jsonschema.exceptions.ValidationError as err:
raise ValidationError(err.message)
# 3. Invoke action function
response = action_function(parameters)
# 4. Validate response
if meta.output_schema:
try:
jsonschema.validate(response, meta.output_schema)
except jsonschema.exceptions.ValidationError as err:
raise ValidationError(err.message)
# 5. Return response
try:
return JsonResponse(response)
except TypeError:
logger.error(f"Failed to serialize {response}")
raise
arg_dict = {
"method": "post",
"operation_description": meta.description,
"manual_parameters": [MetagovSchemas.community_header],
"operation_id": prefixed_slug,
"tags": tags or [Tags.ACTION],
}
if meta.input_schema:
properties = {"parameters": utils.json_schema_to_openapi_object(meta.input_schema)}
arg_dict["request_body"] = openapi.Schema(type=openapi.TYPE_OBJECT, properties={**properties})
if meta.output_schema:
arg_dict["responses"] = {200: utils.json_schema_to_openapi_object(meta.output_schema)}
else:
arg_dict["responses"] = {200: "action was performed successfully"}
return swagger_auto_schema(**arg_dict)(perform_action)
def get_plugin_instance(plugin_name, community):
"""get the right proxy of a plugin instance"""
cls = plugin_registry.get(plugin_name)
if not cls:
raise ValidationError(f"Plugin '{plugin_name}' not found")
plugin = cls.objects.filter(name=plugin_name, community=community).first()
if not plugin:
raise ValidationError(f"Plugin '{plugin_name}' not enabled for community '{community}'")
return plugin
|
export KEYS_PATH=the_dir_path_of_your_prepared_keys
export SECURE_PASSWORD_PATH=the_dir_path_of_your_prepared_password
export LOCAL_IP=your_local_ip_of_the_sgx_server
sudo docker run -itd \
-e REDIS_HOST=127.0.0.1 \
--privileged \
--net=host \
--cpuset-cpus="0-30" \
--oom-kill-disable \
--device=/dev/sgx \
-v /var/run/aesmd/aesm.socket:/var/run/aesmd/aesm.socket \
-v $KEYS_PATH:/opt/keys \
-v $PWD/conf:/opt/conf \
-v $SECURE_PASSWORD_PATH:/opt/password \
--name=trusted-cluster-serving-local \
-e LOCAL_IP=$LOCAL_IP \
-e CORE_NUM=30 \
intelanalytics/analytics-zoo-ppml-trusted-realtime-ml-scala-occlum:0.12.0-SNAPSHOT \
bash -c "cd /opt/ && ./start-all.sh && tail -f /dev/null"
|
#? Description:
#? Move the account out of AWS SES sandbox by creating support case.
#?
#? Usage:
#? @move [-r REGION]
#?
#? Options:
#? [-r REGION] Move out for the given region.
#? * us-east-1
#? * us-west-2
#? * eu-west-1
#? Defalt is to use the region in your AWS CLI profile.
#?
#? @xsh /trap/err -e
#? @subshell
#?
function move () {
declare OPTIND OPTARG opt
declare region
declare -a region_opt
while getopts r: opt; do
case $opt in
r)
region=$OPTARG
region_opt=(-r "${OPTARG:?}")
;;
*)
return 255
;;
esac
done
if [[ -z $region ]]; then
region=$(aws configure get default.region)
fi
printf "checking if this account is inside SES sandbox ... "
if xsh aws/ses/sandbox/inside "${region_opt[@]}"; then
# inside sandbox
printf "[yes]\n" | xsh /file/mark
else
# outside sandbox
printf "[no]\n" | xsh /file/mark
return
fi
printf "checking if this account is able to create support case over CLI ... "
if xsh aws/spt/is-callable; then
# callable
printf "[yes]\n" | xsh /file/mark
printf "checking if there is an existing support case ... "
if [[ -n $case_id ]]; then
# there is a case
printf "[yes]\n" | xsh /file/mark
printf "checking the support case status ... "
declare status
status=$(aws --region us-east-1 \
--query '[].status' \
support describe-cases \
--case-id-list "$case_id" \
--include-resolved-cases)
printf "[%s]\n" "$status" | xsh /file/mark
if [[ $status == Resolved ]]; then
printf 'continue to recheck the sandbox status.\n'
else
printf 'please wait for the support case to be resolved, then continue.\n'
fi
else
# there is no case
printf "[no]\n" | xsh /file/mark
if xsh /io/confirm -m 'shall I create a support case for you?'; then
declare -a body
body+=("Limit increase request 1")
body+=("Service: SES Sending Limits")
body+=("Region: $region")
body+=("Limit name: Desired Daily Sending Quota")
body+=("New limit value: 1000")
body+=("------------")
body+=("Limit increase request 2")
body+=("Service: SES Sending Limits")
body+=("Region: $region")
body+=("Limit name: Desired Maximum Send Rate")
body+=("New limit value: 10")
body+=("------------")
body+=("Use case description: My service needs to move my account out of the SES sandbox.")
body+=("Mail Type: System Notifications")
body+=("My email-sending complies with the <a href=\"http://aws.amazon.com/service-terms/\" target=\"_blank\">AWS Service Terms</a> and <a href=\"http://aws.amazon.com/aup/\" target=\"_blank\">AUP</a>: Yes")
body+=("I only send to recipients who have specifically requested my mail: Yes")
body+=("I have a process to handle bounces and complaints: Yes")
declare case_id
case_id=$(xsh aws/spt/create \
-j "Limit Increase: SES Sending Limits" \
-b "$(printf '%s\n' "${body[@]}")" \
-s ses \
-c "Service Limit Increase, SES Sending Limits" \
-l en)
fi
fi
else
# not callable
printf "[no]\n" | xsh /file/mark
declare -a msg
msg+=("go to below URL to create a support case to move your account out of AWS SES sandbox:")
msg+=(" * https://aws.amazon.com/ses/extendedaccessrequest/")
msg+=("here's the help document about how to create this case:")
msg+=(" * https://docs.aws.amazon.com/ses/latest/DeveloperGuide/request-production-access.html")
msg+=("if your account is still in the Amazon SES sandbox, you may only send to verified addresses")
msg+=("or domains, or to email addresses associated with the Amazon SES Mailbox Simulator.")
printf '%s\n' "${msg[@]}"
fi
read -n 1 -s -p "press any key to continue, CTRL-C to exit."
printf '\n\n'
@move "${region_opt[@]}"
}
|
#!/bin/sh
set -e
UNSIGNED=$1
SIGNATURE=$2
ARCH=x86_64
ROOTDIR=dist
BUNDLE=${ROOTDIR}/Charitycoin-Qt.app
TEMPDIR=signed.temp
OUTDIR=signed-app
if [ -z "$UNSIGNED" ]; then
echo "usage: $0 <unsigned app> <signature>"
exit 1
fi
if [ -z "$SIGNATURE" ]; then
echo "usage: $0 <unsigned app> <signature>"
exit 1
fi
rm -rf ${TEMPDIR} && mkdir -p ${TEMPDIR}
tar -C ${TEMPDIR} -xf ${UNSIGNED}
tar -C ${TEMPDIR} -xf ${SIGNATURE}
if [ -z "${PAGESTUFF}" ]; then
PAGESTUFF=${TEMPDIR}/pagestuff
fi
if [ -z "${CODESIGN_ALLOCATE}" ]; then
CODESIGN_ALLOCATE=${TEMPDIR}/codesign_allocate
fi
for i in `find ${TEMPDIR} -name "*.sign"`; do
SIZE=`stat -c %s ${i}`
TARGET_FILE=`echo ${i} | sed 's/\.sign$//'`
echo "Allocating space for the signature of size ${SIZE} in ${TARGET_FILE}"
${CODESIGN_ALLOCATE} -i ${TARGET_FILE} -a ${ARCH} ${SIZE} -o ${i}.tmp
OFFSET=`${PAGESTUFF} ${i}.tmp -p | tail -2 | grep offset | sed 's/[^0-9]*//g'`
if [ -z ${QUIET} ]; then
echo "Attaching signature at offset ${OFFSET}"
fi
dd if=$i of=${i}.tmp bs=1 seek=${OFFSET} count=${SIZE} 2>/dev/null
mv ${i}.tmp ${TARGET_FILE}
rm ${i}
echo "Success."
done
mv ${TEMPDIR}/${ROOTDIR} ${OUTDIR}
rm -rf ${TEMPDIR}
echo "Signed: ${OUTDIR}"
|
<gh_stars>0
package wallet
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"io/ioutil"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/simone-trubian/blockchain-tutorial/database"
"github.com/simone-trubian/blockchain-tutorial/fs"
)
// The password for testing keystore files:
// ./node/test_simone--3eb92807f1f91a8d4d85bc908c7f86dcddb1df57
// ./node/test_tanya--6fdc0d8d15ae6b4ebf45c52fd2aafbcbb19a65c8
const testKeystoreAccountsPwd = "<PASSWORD>"
func TestSign(t *testing.T) {
privKey, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader)
if err != nil {
t.Fatal(err)
}
pubKey := privKey.PublicKey
pubKeyBytes := elliptic.Marshal(crypto.S256(), pubKey.X, pubKey.Y)
pubKeyBytesHash := crypto.Keccak256(pubKeyBytes[1:])
account := common.BytesToAddress(pubKeyBytesHash[12:])
msg := []byte("the Web3Coach students are awesome")
sig, err := Sign(msg, privKey)
if err != nil {
t.Fatal(err)
}
recoveredPubKey, err := Verify(msg, sig)
if err != nil {
t.Fatal(err)
}
recoveredPubKeyBytes := elliptic.Marshal(crypto.S256(), recoveredPubKey.X, recoveredPubKey.Y)
recoveredPubKeyBytesHash := crypto.Keccak256(recoveredPubKeyBytes[1:])
recoveredAccount := common.BytesToAddress(recoveredPubKeyBytesHash[12:])
if account.Hex() != recoveredAccount.Hex() {
t.Fatalf("msg was signed by account %s but signature recovery produced an account %s", account.Hex(), recoveredAccount.Hex())
}
}
func TestSignTxWithKeystoreAccount(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "wallet_test")
if err != nil {
t.Fatal(err)
}
defer fs.RemoveDir(tmpDir)
simone, err := NewKeystoreAccount(tmpDir, testKeystoreAccountsPwd)
if err != nil {
t.Error(err)
return
}
tanya, err := NewKeystoreAccount(tmpDir, testKeystoreAccountsPwd)
if err != nil {
t.Error(err)
return
}
tx := database.NewTx(simone, tanya, 100, "")
signedTx, err := SignTxWithKeystoreAccount(tx, simone, testKeystoreAccountsPwd, GetKeystoreDirPath(tmpDir))
if err != nil {
t.Error(err)
return
}
spew.Dump(signedTx.Encode())
ok, err := signedTx.IsAuthentic()
if err != nil {
t.Error(err)
return
}
if !ok {
t.Fatal("the TX was signed by 'from' account and should have been authentic")
}
}
func TestSignForgedTxWithKeystoreAccount(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "wallet_test")
if err != nil {
t.Fatal(err)
}
defer fs.RemoveDir(tmpDir)
hacker, err := NewKeystoreAccount(tmpDir, testKeystoreAccountsPwd)
if err != nil {
t.Error(err)
return
}
tanya, err := NewKeystoreAccount(tmpDir, testKeystoreAccountsPwd)
if err != nil {
t.Error(err)
return
}
forgedTx := database.NewTx(tanya, hacker, 100, "")
signedTx, err := SignTxWithKeystoreAccount(forgedTx, hacker, testKeystoreAccountsPwd, GetKeystoreDirPath(tmpDir))
if err != nil {
t.Error(err)
return
}
ok, err := signedTx.IsAuthentic()
if err != nil {
t.Error(err)
return
}
if ok {
t.Fatal("the TX 'from' attribute was forged and should have not be authentic")
}
}
|
<reponame>strawberry/strawberry-components
import { Component, Host, h, Element, Event, EventEmitter, Prop, State } from '@stencil/core';
@Component({
tag: 'sc-add-to-cart',
styleUrl: 'sc-add-to-cart.css',
shadow: true,
})
export class ScAddToCart {
@Element() el: HTMLElement;
@Prop() productId: number;
@Prop() buttonText: string = 'Add to Cart';
@Prop() quantitySelector: boolean = false;
@State() quantity: number = 1;
@Event({
eventName: 'sc:add-to-cart:emitted',
composed: true,
cancelable: true,
bubbles: true,
}) scAddToCartEmiitted: EventEmitter;
componentDidLoad() {
}
handleButton(event) {
if(this.quantitySelector) {
const selector = document.querySelector('sc-quantity-selector');
this.quantity = Number(selector.dataset.quantity);
}
if(!event.defaultPrevented && this.productId) {
this.scAddToCartEmiitted.emit({
productId: this.productId,
quantity: this.quantity
});
}
}
render() {
return (
<Host>
<button part="button" onClick={(event) => this.handleButton(event)}>{this.buttonText}</button>
</Host>
);
}
}
|
package com.fleschier;
import java.util.List;
import org.apache.mahout.cf.taste.common.TasteException;
import org.apache.mahout.cf.taste.impl.neighborhood.NearestNUserNeighborhood;
import org.apache.mahout.cf.taste.impl.recommender.GenericUserBasedRecommender;
import org.apache.mahout.cf.taste.impl.similarity.PearsonCorrelationSimilarity;
import org.apache.mahout.cf.taste.model.DataModel;
import org.apache.mahout.cf.taste.neighborhood.UserNeighborhood;
import org.apache.mahout.cf.taste.recommender.RecommendedItem;
import org.apache.mahout.cf.taste.recommender.Recommender;
import org.apache.mahout.cf.taste.similarity.UserSimilarity;
/**
* Created in Intellij IDEA
* User: Fleschier
* 2018-11-6
*/
public class UserCF {
public static List<RecommendedItem> userCF(DataModel model, int userID, int RECOMMENDER_NUM) {
//announce the recommendations for return. because in the try-catch part the value is unavailable outside
List<RecommendedItem> recommendations = null;
try {
/*
FileDataModel要求输入文件中的字段分隔符为逗号或者制表符,
如果你想使用其他分隔符,你可以扩展一个FileDataModel的实现,
例如,mahout中已经提供了一个解析MoiveLens的数据集(分隔符为::)的实现GroupLensDataModel。
*/
//用户相似度,使用基于皮尔逊相关系数计算相似度
UserSimilarity similarity = new PearsonCorrelationSimilarity(model);
//选择邻居用户,使用NearestNUserNeighborhood实现UserNeighborhood接口,选择邻近的4个用户
UserNeighborhood neighborhood = new NearestNUserNeighborhood(2, similarity, model);
Recommender recommender = new GenericUserBasedRecommender(model, neighborhood, similarity);
//第一个参数是第几个用户,第二个参数是推荐几个物品
recommendations = recommender.recommend(userID, RECOMMENDER_NUM);
} catch (TasteException e) {
e.printStackTrace();
}
return recommendations;
}
}
|
from pyspark import SparkContext, SparkConf
import re
class Utils():
COMMA_DELIMITER = re.compile(''',(?=(?:[^"]*"[^"]*")*[^"]*$)''')
if __name__ == "__main__":
conf = SparkConf().setAppName('StackOverFlowSurvey').setMaster("local[*]")
sc = SparkContext(conf = conf)
# Initialize accumulators
# Accumulator are write-only variables from worker's perspective
total = sc.accumulator(0)
missingSalaryMidPoint = sc.accumulator(0)
# Load the data
responseRDD = sc.textFile("data/2016-stack-overflow-survey-responses.csv")
def filterResponseFromCanada(response):
splits = Utils.COMMA_DELIMITER.split(response)
# Increament the accumulator to get number of rows.
total.add(1)
# If the salary is missing (15th column -> column with index 14),
# increment the accumulator.
if not splits[14]:
missingSalaryMidPoint.add(1)
# Return true of false if the country columns is Canada
return splits[2] == "Canada"
responseFromCanada = responseRDD.filter(filterResponseFromCanada)
# Countr number of rows in the responseFromCanada RDD.
print("Count of responses from Canada: {}".format(responseFromCanada.count()))
# Get value from the "total" accumulator
print("Total count of responses: {}".format(total.value))
# Get value from the "missingSalaryMidPoint" accumulator
print("Count of responses missing salary middle point: {}".format(missingSalaryMidPoint.value))
|
import fetchMock from 'fetch-mock';
import { store } from '../utils';
import saveKeyInUser from './saveKeyInUser';
describe('Create new key/value', () => {
beforeEach(() => {
store.setState({ persona: 'https://10darts.com/api/v1/personas/1234/' });
fetchMock.post('*', { status: 201 });
});
afterEach(() => {
fetchMock.reset();
fetchMock.restore();
store.setState({});
});
test('create new tag', () => {
const label = 'label';
return saveKeyInUser(label).then(() => {
expect(fetchMock.called()).toBeTruthy();
expect(JSON.parse(fetchMock.lastOptions().body).key).toEqual({ label });
});
});
test('create new key/value', () => {
const label = 'label';
const value = 'value';
return saveKeyInUser(label, value).then(() => {
expect(fetchMock.called()).toBeTruthy();
const body = JSON.parse(fetchMock.lastOptions().body);
expect(body.key).toEqual({ label });
expect(body.value).toEqual(value);
expect(body.kind).toEqual(2);
});
});
test('create new key/value with diferent kind', () => {
const label = 'label';
const value = 10;
const kind = 1;
return saveKeyInUser(label, value, kind).then(() => {
expect(fetchMock.called()).toBeTruthy();
const body = JSON.parse(fetchMock.lastOptions().body);
expect(body.key).toEqual({ label });
expect(body.value).toEqual(value);
expect(body.kind).toEqual(kind);
});
});
});
|
DELETE FROM Students WHERE Entry_Date < CURRENT_DATE - INTERVAL 1 YEAR; |
<reponame>berardino/taska
import Vue from "vue";
import App from "./App.vue";
import "./registerServiceWorker";
import router from "./router";
import { BootstrapVue, IconsPlugin } from "bootstrap-vue";
import "bootstrap/dist/css/bootstrap.css";
import "bootstrap-vue/dist/bootstrap-vue.css";
import { FontAwesomeIcon } from "@fortawesome/vue-fontawesome";
import { library } from "@fortawesome/fontawesome-svg-core";
import { fas } from "@fortawesome/free-solid-svg-icons";
import { far } from "@fortawesome/free-regular-svg-icons";
import { fab } from "@fortawesome/free-brands-svg-icons";
library.add(far);
library.add(fab);
library.add(fas);
Vue.component("font-awesome-icon", FontAwesomeIcon);
Vue.use(BootstrapVue);
Vue.use(IconsPlugin);
Vue.config.productionTip = false;
new Vue({
router,
render: h => h(App)
}).$mount("#app");
|
# get user input
input_str = input('Enter a sentence: ')
# split into words
words = input_str.split(' ')
# create an empty dictionary
dic = {}
# loop over words
for word in words:
# update dictionary
if word in dic:
dic[word] += 1
else:
dic[word] = 1
# find the most common word
most_common_word = ''
highest_count = 0
for word, count in dic.items():
if count > highest_count:
most_common_word = word
highest_count = count
# print the result
print(f'The most common word is "{most_common_word}" with {highest_count} occurences.') |
// Get the battery level
int batteryLevel = getBatteryLevel();
// Create a TextView
TextView battery = new TextView(this);
battery.setTextSize(20);
// Set the battery level text in the TextView
String text = "Battery Level: " + batteryLevel + "%";
battery.setText(text);
// Add the TextView to the layout
LinearLayout layout = (LinearLayout) findViewById(R.id.layout);
layout.addView(battery); |
<reponame>jaidis/yay<filename>src/screens/Home/HomeStyles.js
import { StyleSheet } from "react-native";
export default StyleSheet.create({
//
container: {
flex: 1,
backgroundColor: "#2E3248"
},
//
mainTitle: {
textAlign: "center",
paddingTop: 30,
padding: 20,
color: "#FFF"
},
//
title: {
textAlign: "center",
padding: 20,
color: "#FFF"
},
//
swiper_view: {
height: 200,
marginTop: 5,
marginBottom: 5,
marginLeft: 20,
marginRight: 20,
borderRadius: 10,
flexWrap: "nowrap",
backgroundColor: "#FFF",
shadowColor: "#000",
shadowOffset: { width: 0, height: 4 },
shadowOpacity: 0.3,
shadowRadius: 4.65,
elevation: 8
},
//
swiper_prev_button: {
marginLeft: -10,
padding: 10
},
//
swiper_next_button: {
marginRight: -10,
padding: 10
},
//
swiper_internal_view: {
flex: 1,
justifyContent: "center",
alignItems: "center"
},
//
swiper_image_background_style: {
flex: 1,
height: "100%",
width: "100%",
justifyContent: "center",
alignItems: "center",
borderRadius: 10
},
//
swiper_image_background_image_style: {
opacity: 0.5,
borderRadius: 10,
backgroundColor: "#000"
},
//
swiper_text: {
color: "#fff",
textAlign: "center",
fontWeight: "normal",
padding: 20
},
//
nearby_grid: {
marginBottom: 20
},
nearby_image: {
flex: 1,
height: 200,
width: null
},
//
nearby_text: {
marginLeft: 10,
marginBottom: 10,
fontSize: 20
},
//
nearby_right: {
marginRight: 15
},
//
nearby_categories_view: {
margin: 3
}
});
|
<gh_stars>0
package com.lypeer.fcpermission.adapter;
import android.content.Context;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.BaseAdapter;
import android.widget.ImageView;
import android.widget.TextView;
import com.lypeer.fcpermission.R;
import java.util.List;
/**
* Created by Administrator on 2017/3/3/0003.
*/
public class PermissionAdapter extends BaseAdapter {
private final List<String> strings;
private final Context context;
private LayoutInflater inflater;
private List<Integer> pictures;
public PermissionAdapter(Context context, List<Integer> pictures, List<String> strings) {
super();
this.pictures = pictures;
this.strings = strings;
this.context = context;
inflater = LayoutInflater.from(this.context);
}
@Override
public int getCount() {
if (null != pictures) {
return pictures.size();
} else {
return 0;
}
}
@Override
public Object getItem(int position) {
return pictures.get(position);
}
@Override
public long getItemId(int position) {
return position;
}
@Override
public View getView(int position, View convertView, ViewGroup parent) {
ViewHolder viewHolder;
if (convertView == null) {
viewHolder = new ViewHolder();
convertView = inflater.inflate(R.layout.item_view_fcpermission_dialog, null);
viewHolder.text = (TextView) convertView.findViewById(R.id.text);
viewHolder.image = (ImageView) convertView.findViewById(R.id.image);
convertView.setTag(viewHolder);
} else {
viewHolder = (ViewHolder) convertView.getTag();
}
viewHolder.image.setImageResource(pictures.get(position));
viewHolder.text.setText(strings.get(position));
return convertView;
}
}
class ViewHolder {
public TextView text;
public ImageView image;
} |
import unittest
def write_quantified(d, span_origin, n, filename):
def gather_many(data, span, test_case, quantiles):
# Implementation of gather_many function
pass
def quantilify_each(dataset):
# Implementation of quantilify_each function
pass
dataset = quantilify_each(gather_many(d, span_origin, unittest.TestCase(), n))
with open(filename, 'w') as fp:
for i, sample in enumerate(dataset):
fp.write(f"{i} {sample}\n") |
import time
import torch
from typing import Tuple, Tensor
def process_dataset_time(sample: Tuple[Tensor, Tensor], device: str) -> float:
start_t = time.time()
train_X = sample[0].to(device)
train_Y = sample[1].to(device)
elapsed_time = time.time() - start_t
return elapsed_time |
/*
* Page Scroller LITE - jQuery Plugin
* A simple plugin to add smooth scroll interaction to your website
*
* Support at: http://www.pagescroller.com
*
* Copyright (c) 2012 <NAME>. All Rights Reserved
*
* Version: 1.0.1 (6/6/2012)
* Requires: jQuery v1.4+
*
* Page Scroller is released under the GNU General Public License
* (http://www.gnu.org/licenses/). By using Page Scroller, you
* acknowledge and agree to the Terms of Service found here:
* (http://www.pagescroller.com/tos/)
*
*/
var pageScroller = {};
(function (d) {
d.fn.extend({
pageScroller: function (h) {
h = d.extend({
currentSection: 0,
sectionClass: "homeblock",
linkClass: "link",
navigation: [],
navigationClass: "standardNav",
animationSpeed: 500,
scrollOffset: 0,
HTML5mode: !1
}, h);
pageScroll = function (c, a) {
d.fx.interval = 5;
pageScroller.scrollDocument = d(document);
pageScroller.scrollWindow = d(window);
pageScroller.scrollBody = d("body");
pageScroller.scrollPosition = pageScroller.scrollWindow.scrollTop();
pageScroller.currentSectionHeight = pageScroller.scrollWindow.height();
pageScroller.options = a;
pageScroller.options.scrollOffset = parseInt(pageScroller.options.scrollOffset);
var e = "div";
a.HTML5mode && (e = "nav");
pageScroller.options.navigation instanceof Array && (c.append("<" + e + ' class="pageScrollerNav ' + a.navigationClass + '"><ul></ul></' + e + ">"), pageScroller.wrapper = d("." + a.navigationClass.replace(/\s/g, "."), c), pageScroller.navigation = d("ul", pageScroller.wrapper), pageScroller.wrapper.addClass("left"), c.css({
position: "relative"
}));
a.HTML5mode ? pageScroller.sections = d("section", c) : pageScroller.sections = d("." + a.sectionClass, c);
pageScroller.sections.each(function (b) {
var c = d(this),
e = pageScroller.sections.eq(b).attr("title"),
f = a.linkClass + " " + a.linkClass + "_" + (b + 1);
b == pageScroller.sections.length - 1 && (f += " " + a.linkClass + "_last");
c.css({
display: "block",
position: "relative",
"float": "none"
});
c.addClass(pageScroller.options.sectionClass + "_" + (b + 1));
pageScroller.options.navigation instanceof Array ? pageScroller.options.navigation.length ? pageScroller.navigation.append('<li class="' + f + '"><a href="#pageScroll' + b + '">' + pageScroller.options.navigation[b] + "</a></li>") : e && "" != e ? pageScroller.navigation.append('<li class="' + f + '"><a href="#pageScroll' + b + '">' + e + "</a></li>") : pageScroller.navigation.append('<li class="' + f + '"><a href="#pageScroll' + b + '">Navigation ' + (b + 1) + "</a></li>") : pageScroller.navigation = d(pageScroller.options.navigation)
});
pageScroller.pageLinks = d("a", pageScroller.navigation);
pageScroller.pageLinks.each(function (b) {
d(this).bind("click", function (a) {
a.preventDefault();
pageScroller.scrollBody.is(":animated") || (pageScroller.pageLinks.parent("li").removeClass("active"),
d(this).parent("li").addClass("active"));
j(c, pageScroller.sections.eq(b), b)
})
});
pageScroller.next = function () {
var b = pageScroller.options.currentSection + 1;
if (b != pageScroller.sections.length) {
var a = pageScroller.sections.eq(b);
j(c, a, b)
}
};
pageScroller.prev = function () {
var b = pageScroller.options.currentSection - 1;
0 >= b && (b = 0);
var a = pageScroller.sections.eq(b);
j(c, a, b)
};
pageScroller.goTo = function (a) {
goTo(c, pageScroller.options.currentSection, a)
};
pageScroller.scrollWindow.bind("scroll", function () {
k()
});
setTimeout(function () {
0 == pageScroller.scrollPosition && k()
}, 200)
};
var k = function () {
pageScroller.scrollPosition = pageScroller.scrollWindow.scrollTop();
pageScroller.scrollDistance = pageScroller.scrollPosition + pageScroller.currentSectionHeight;
for (i = 0; i < pageScroller.sections.length; i++) {
var c = pageScroller.sections.eq(i).offset().top;
pageScroller.options.scrollOffset && c && (c += parseInt(pageScroller.options.scrollOffset));
var a = 0;
if (i < pageScroller.sections.length - 1) {
var d = pageScroller.sections.eq(i + 1);
pageScroller.options.scrollOffset ? a = parseInt(d.offset().top + pageScroller.options.scrollOffset) : a = d.offset().top;
var d = pageScroller.pageLinks.eq(i).parent("li"),
b = pageScroller.pageLinks.eq(pageScroller.sections.length - 1).parent("li")
}
if (pageScroller.scrollBody.is(":animated")) return !1;
if (pageScroller.scrollDocument.height() == pageScroller.scrollDistance) {
if (!b.hasClass("active")) return updateTo = pageScroller.sections.length - 1, g(updateTo), !1
} else if (a) {
if (pageScroller.scrollPosition >= c && pageScroller.scrollPosition < a && !d.hasClass("active")) return updateTo = i, g(updateTo), !1
} else if (pageScroller.scrollPosition >= c && i == pageScroller.sections.length - 1 && !b.hasClass("active")) return updateTo = pageScroller.sections.length - 1, g(updateTo), !1
}
}, j = function (c, a, e) {
var c = d("html, body"),
b = d(window).scrollTop(),
a = a.offset().top;
pageScroller.options.scrollOffset && (a += parseInt(pageScroller.options.scrollOffset));
0 > a && (a = 0);
a != b && !c.is(":animated") && c.animate({
scrollTop: a
}, pageScroller.options.animationSpeed, "swing").promise().done(function () {
g(e)
})
}, g = function (c) {
pageScroller.pageLinks.parent("li").removeClass("active");
pageScroller.pageLinks.eq(c).parent("li").addClass("active");
pageScroller.options.currentSection = c
pageScroller.pageLinks.closest("li.active").prev().addClass("previ-item");
pageScroller.pageLinks.closest("li.active").removeClass("previ-item");
pageScroller.pageLinks.closest("li.active").prev().prev().removeClass("previ-item");
};
if (!pageScroller.options) return pageScroll(this, h)
}
})
})(jQuery); |
<reponame>eengineergz/Lambda<gh_stars>0
const knex = require('knex');
const dbConfig = require('../knexfile');
const db = knex(dbConfig.development);
function get(id){
if(id){
return db('actions').where('id', id)
.then(action => {
return action[0]
})
}
return db('actions')
.then(actions => {
return actions
})
}
function add(action){
return db('actions').insert(action)
.then( ([id]) => this.get(id) )
}
function getActions(id){
return db('actions').where('project_id', id)
.then(actions => {
const mapped = actions.map( x => {
let newObj = {
id: x.id,
description: x.action_description,
notes: x.action_notes,
completed: x.completed
}
return newObj
})
return mapped
})
}
function remove(id){
return db('actions').where('id', id).del()
}
module.exports = {
get, add, getActions, remove,
}; |
customer_id,first,last,email,age,gender
1,John,Doe,johndoe@fake.com,26,Male
2,Harry,Potter,hpotter@genius.com,16,Male
3,Jane,Smith,janesmith@example.com,25,Female
4,Feya,Johnson,fjohnson@myweb.net,29,Female
5,Alexander,Petrov,apetrov@hotmail.ru,35,Male
6,Rosa,Klimova,rklimova@icloud.com,50,Female
7,Adam,Lennon,adamlennon@live.com,18,Male
8,Amy,Robinson,arobinson@yahoo.co.uk,41,Female |
package com.ableneo.liferay.portal.setup.core.util;
/*
* #%L
* Liferay Portal DB Setup core
* %%
* Original work Copyright (C) 2016 - 2018 mimacom ag
* Modified work Copyright (C) 2018 - 2020 ableneo, s. r. o.
* %%
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
* #L%
*/
import java.util.ArrayList;
import java.util.List;
import com.liferay.asset.kernel.model.AssetCategory;
import com.liferay.asset.kernel.model.AssetEntry;
import com.liferay.asset.kernel.model.AssetVocabulary;
import com.liferay.asset.kernel.service.AssetCategoryLocalServiceUtil;
import com.liferay.asset.kernel.service.AssetVocabularyLocalServiceUtil;
import com.liferay.document.library.kernel.util.DLUtil;
import com.liferay.dynamic.data.lists.model.DDLRecordSet;
import com.liferay.dynamic.data.lists.service.DDLRecordSetLocalServiceUtil;
import com.liferay.dynamic.data.mapping.model.DDMStructure;
import com.liferay.dynamic.data.mapping.model.DDMTemplate;
import com.liferay.dynamic.data.mapping.service.DDMStructureLocalServiceUtil;
import com.liferay.dynamic.data.mapping.service.DDMTemplateLocalServiceUtil;
import com.liferay.journal.model.JournalArticle;
import com.liferay.journal.service.JournalArticleLocalServiceUtil;
import com.liferay.petra.string.StringPool;
import com.liferay.portal.kernel.dao.orm.DynamicQuery;
import com.liferay.portal.kernel.dao.orm.PropertyFactoryUtil;
import com.liferay.portal.kernel.exception.PortalException;
import com.liferay.portal.kernel.exception.SystemException;
import com.liferay.portal.kernel.log.Log;
import com.liferay.portal.kernel.log.LogFactoryUtil;
import com.liferay.portal.kernel.model.GroupConstants;
import com.liferay.portal.kernel.model.Layout;
import com.liferay.portal.kernel.model.Organization;
import com.liferay.portal.kernel.model.UserGroup;
import com.liferay.portal.kernel.repository.model.FileEntry;
import com.liferay.portal.kernel.service.*;
import com.liferay.portal.kernel.workflow.WorkflowConstants;
/**
* The util allows to specify special placeholders within configured data. The placeholders will be resolved to values
* in the class.
*/
public final class ResolverUtil {
// CHECKSTYLE:OFF
public static final int ID_TYPE_ID = 0;
public static final int ID_TYPE_UUID = 1;
public static final int ID_TYPE_RESOURCE = 2;
public static final int ID_TYPE_FILE = 3;
public static final String IDTYPE = "%%IDTYPE%%";
public static final String LOOKUPTYPE = "%%LOOKUPTYPE%%";
public static final String CLOSING_TAG = "$}}";
public static final String ARTICLE_BY_ART_ID = "{{$ARTICLE-%%IDTYPE%%-BY-ARTICLE-ID=";
public static final String TEMPLATE_BY_KEY = "{{$%%PREFIX%%-TEMPLATE-%%IDTYPE%%-BY-KEY=";
public static final String STRUCTURE_BY_KEY = "{{$%%PREFIX%%-STRUCTURE-%%IDTYPE%%-BY-KEY=";
public static final String FILE_REFERENCE_URL = "{{$FILE-URL=";
public static final String FILE_REFERENCE_ID = "{{$FILE-ID=";
public static final String FILE_REFERENCE_UUID = "{{$FILE-UUID=";
public static final String CLASS_ID_BY_NAME = "{{$CLASS-ID-BY-NAME=";
public static final String PAGE_ID_BY_FRIENDLY_URL = "{{$%%PTYPE%%-PAGE-%%LAYOUTID%%-BY-FRIENDLY_URL=";
public static final String DDL_REC_SET_BY_KEY = "{{$DDL-REC-SET-ID-BY-KEY=";
public static final String TEMPLATE_CATEGORY = "{{$CATEGORY-ID-BY-VOCABULARY-AND-PATH=";
public static final String ID_OF_SITE_WITH_NAME_KEY = "{{$ID_OF_SITE_WITH_NAME=";
public static final String VALUE_SPLIT = "::";
public static final String ID_OF_ORG_USER_GROUP_WITH_NAME_KEY = "{{$%%IDTYPE%%_OF_%%LOOKUPTYPE%%_WITH_NAME=";
public static final String LAYOUTID = "%%LAYOUTID%%";
private static final Log LOG = LogFactoryUtil.getLog(ResolverUtil.class);
private static final String COULD_NOT_RESOLVE_SITE_NAME =
"Could not resolve site name, as the syntax is offended, closing tag (%2$s) is missing for %1$s";
// CHECKSTYLE:ON
private ResolverUtil() {}
/**
* Resolves a value with a given key inside a given content. Every occurrence
* of one of the following expressions is substituted by the resolved value:
* <ul>
* <li>{{$ARTICLE-ID-BY-ARTICLE-ID=[:: name of the site ::]< article id
* to look for > $}}</li>
* <li>{{$ARTICLE-UUIID-BY-ARTICLE-ID=[:: name of the site ::]< article
* id to look for >$}}</li>
* <li>{{$ARTICLE-RESID-BY-ARTICLE-ID=[:: name of the site ::]< article
* id to look for >$}}</li>
* <li>{{$ART-TEMPLATE-ID-BY-KEY=[:: name of the site ::]< article id to
* look for > $}}</li>
* <li>{{$ART-TEMPLATE-UUID-BY-KEY=[:: name of the site ::]< article id
* to look for > $}}</li>
* <li>{{$ART-STRUCTURE-ID-BY-KEY=[:: name of the site ::]< article id to
* look for > $}}</li>
* <li>{{$ART-STRUCTURE-UUID-BY-KEY=[:: name of the site ::]< article id
* to look for > $}}</li>
* <li>{{$ADT-TEMPLATE-ID-BY-KEY=[:: name of the site ::]< article id to
* look for > $}}</li>
* <li>{{$ADT-TEMPLATE-UUID-BY-KEY=[:: name of the site ::]< article id
* to look for > $}}</li>
* <li>{{$FILE-URL=[:: name of the site ::]< documents and media folder
* path to the file >< documents and media folder title of the file
* >$}}</li>
* <li>{{$FILE-ID=[:: name of the site ::]< documents and media folder
* path to the file >< documents and media folder title of the file
* >$}}</li>
* <li>{{$FILE-UUID=[:: name of the site ::]< documents and media folder
* path to the file >< documents and media folder title of the file
* >$}}</li>
* <li>{{$CLASS-ID-BY-NAME=< fully qualified class name >$}}</li>
* <li>{{$PRIV-PAGE-ID-BY-FRIENDLY_URL=[:: name of the site ::]< friendly
* url of the private page >$}}</li>
* <li>{{$PUB-PAGE-ID-BY-FRIENDLY_URL=[:: name of the site ::]< friendly
* url of the public page >$}}</li>
* <li>{{$PRIV-PAGE-PLID-BY-FRIENDLY_URL=[:: name of the site ::]<
* friendly url of the private page >$}}</li>
* <li>{{$PUB-PAGE-PLID-BY-FRIENDLY_URL=[:: name of the site ::]<
* friendly url of the public page >$}}</li>
* <li>{{$PUB-PAGE-UUID-BY-FRIENDLY_URL=[:: name of the site ::]<
* friendly url of the public page >$}}</li>
* <li>{{$DDL-REC-SET-ID-BY-KEYL=[:: name of the site ::]< key of the DDL
* record set > $}}</li>
* <li>{{ID_OF_ORG_WITH_NAME=< name of the organization >$}}</li>
* <li>{{UUID_OF_ORG_WITH_NAME=< name of the organization >$}}</li>
* <li>{{ID_OF_USER_GROUP_WITH_NAME=< name of the user group >$}}</li>
* <li>{{UUDID_OF_USER_GROUP_WITH_NAME=< name of the user group >$}}
* </li>
* </ul>
*
* @param groupId the group id which is used by default for the look up.
* @param company the company id that is used for the default look up.
* @param value the value string in which the occurance of any above resolve
* expression is resolved.
* @param resolverHint the resolver hint textually specifies where the value is from
* and is used for logging problems or infos on the resolution.
*
* @return Returns the string value with any resolver expression resolved to
* the corresponding elements.
*/
public static String lookupAll(final long groupId, final long company, final String value,
final String resolverHint) {
// substitute references to groups/sites
String retVal = ResolverUtil.lookupSiteIdWithName(resolverHint, value, company);
// ID for article template
retVal = ResolverUtil.lookupStructureOrTemplateIdWithKey(retVal, resolverHint, groupId, company, false, "ART",
true, JournalArticle.class);
// ID for article structure
retVal = ResolverUtil.lookupStructureOrTemplateIdWithKey(retVal, resolverHint, groupId, company, false, "ART",
false, JournalArticle.class);
// UUID for article structure
retVal = ResolverUtil.lookupStructureOrTemplateIdWithKey(retVal, resolverHint, groupId, company, true, "ART",
false, JournalArticle.class);
// UUID for article template
retVal = ResolverUtil.lookupStructureOrTemplateIdWithKey(retVal, resolverHint, groupId, company, true, "ART",
true, JournalArticle.class);
// UUID for ADT
retVal = ResolverUtil.lookupStructureOrTemplateIdWithKey(retVal, resolverHint, groupId, company, true, "ADT",
true, AssetEntry.class);
// ID for ADT
retVal = ResolverUtil.lookupStructureOrTemplateIdWithKey(retVal, resolverHint, groupId, company, false, "ADT",
true, AssetEntry.class);
// Resolve categories
retVal = ResolverUtil.substituteCategoryNameWithCategoryId(retVal, resolverHint, groupId, company);
// Substitute the article key with the primary key (id)
retVal = ResolverUtil.lookupArticleWithArticleId(retVal, resolverHint, groupId, company, ID_TYPE_ID);
// Substitute the article key with the uuid of the article
retVal = ResolverUtil.lookupArticleWithArticleId(retVal, resolverHint, groupId, company, ID_TYPE_UUID);
// Resource type id for articles
retVal = ResolverUtil.lookupArticleWithArticleId(retVal, resolverHint, groupId, company, ID_TYPE_RESOURCE);
// Substitute references to files by their URLs
retVal = ResolverUtil.substituteFileReferencesWithURL(retVal, resolverHint, groupId, company, groupId,
ID_TYPE_FILE);
// Substitute references to files by their id
retVal = ResolverUtil.substituteFileReferencesWithURL(retVal, resolverHint, groupId, company, groupId,
ID_TYPE_ID);
// Substitute references to files by their UUID
retVal = ResolverUtil.substituteFileReferencesWithURL(retVal, resolverHint, groupId, company, groupId,
ID_TYPE_UUID);
// Substitute class id references
retVal = ResolverUtil.getClassIdByName(retVal, resolverHint);
// Substitute private page friendly urls to layout ids
retVal = ResolverUtil.lookupPageIdWithFriendlyUrl(retVal, resolverHint, groupId, company, true, IdMode.ID);
// Substitute public page friendly urls to layout ids
retVal = ResolverUtil.lookupPageIdWithFriendlyUrl(retVal, resolverHint, groupId, company, false, IdMode.ID);
// Substitute private page friendly urls to plids
retVal = ResolverUtil.lookupPageIdWithFriendlyUrl(retVal, resolverHint, groupId, company, true, IdMode.PLID);
// Substitute public page friendly urls to plids
retVal = ResolverUtil.lookupPageIdWithFriendlyUrl(retVal, resolverHint, groupId, company, false, IdMode.PLID);
// Substitute private page friendly urls to uuids
retVal = ResolverUtil.lookupPageIdWithFriendlyUrl(retVal, resolverHint, groupId, company, true, IdMode.UUID);
// Substitute public page friendly urls to uuids
retVal = ResolverUtil.lookupPageIdWithFriendlyUrl(retVal, resolverHint, groupId, company, false, IdMode.UUID);
// lookup ddl record set id by key
retVal = lookupDDLRecordSetId(retVal, resolverHint, groupId, company);
// replace id of user groups
retVal = lookupOrgOrUserGroupIdWithName(resolverHint, retVal, company, false, false);
// replace uuid of user groups
retVal = lookupOrgOrUserGroupIdWithName(resolverHint, retVal, company, true, false);
// replace id of orgs
retVal = lookupOrgOrUserGroupIdWithName(resolverHint, retVal, company, false, true);
// replace uuid of orgs
retVal = lookupOrgOrUserGroupIdWithName(resolverHint, retVal, company, true, true);
return retVal;
}
public static String getClassIdByName(final String value, final String locationHint) {
String valueCopy = value;
String retVal = valueCopy;
while (valueCopy != null && valueCopy.trim().indexOf(CLASS_ID_BY_NAME) > -1) {
int pos = valueCopy.trim().indexOf(CLASS_ID_BY_NAME);
int pos2 = valueCopy.indexOf(CLOSING_TAG, pos + 1);
String name = "";
if (pos2 > -1) {
try {
name = valueCopy.substring(pos + CLASS_ID_BY_NAME.length(), pos2);
long groupId = ClassNameLocalServiceUtil.getClassNameId(name);
retVal = valueCopy.substring(0, pos) + groupId
+ valueCopy.substring(pos2 + CLOSING_TAG.length(), valueCopy.length());
valueCopy = retVal;
} catch (Exception ex) {
LOG.error(String.format("Could not resolve class %1$s for %2$s", name, locationHint), ex);
}
} else {
LOG.warn(String.format(COULD_NOT_RESOLVE_SITE_NAME, locationHint, CLOSING_TAG));
break;
}
}
return retVal;
}
public static long getSiteGroupIdByName(final String siteName, final long company, final String locationName) {
long siteGroupId = 0;
if (siteName.equalsIgnoreCase("global")) {
try {
// look up global site
siteGroupId = GroupLocalServiceUtil.getCompanyGroup(company).getGroupId();
} catch (PortalException e) {
LOG.error("Id of global site could not be retrieved!", e);
}
} else {
try {
// look up default site
siteGroupId = GroupLocalServiceUtil.getGroup(company, getSiteName(siteName)).getGroupId();
} catch (PortalException e) {
LOG.error(String.format("Id of site %1$s could not be retrieved for %2$s", siteName, locationName), e);
}
}
return siteGroupId;
}
private static String getSiteName(final String siteName) {
if (siteName.equalsIgnoreCase("default") || siteName.equals("")) {
return GroupConstants.GUEST;
}
return siteName;
}
/**
* Substitutes all references for documents and media files. A file
* reference must have the following format in BNF: <br/>
* fileReference ::= "{{$FILE=" siteReference+ filePath "$}}"<br/>
* siteReference ::= "::" < site-name > "::"<br/>
* filePath ::= ("/" < path-segment >)*<br/>
* <br/>
*
* @param content The content of the article.
* @param locationHint A location hint where the substitution is done (for logging),
* eg., the file name of the article.
* @param groupId The group id (site) in which scope the article is imported.
* @param company The company id.
* @param repoId The repository id.
*
* @return Returns the content with all substituted file references.
*/
public static String substituteFileReferencesWithURL(final String content, final String locationHint,
final long groupId, final long company, final long repoId, final int refType) {
String openingTag = FILE_REFERENCE_URL;
if (refType == ID_TYPE_ID) {
openingTag = FILE_REFERENCE_ID;
} else if (refType == ID_TYPE_UUID) {
openingTag = FILE_REFERENCE_UUID;
}
String result = content;
int pos = result.indexOf(openingTag);
while (pos > -1) {
int pos2 = result.indexOf(CLOSING_TAG, pos);
if (pos2 < 0) {
LOG.error(String.format("No closing Tag, pos %1$s in file %2$s", pos, locationHint));
break;
} else {
// by default the referred file is looked up in current site.
long siteGroupId = groupId;
String filePath = result.substring(pos + openingTag.length(), pos2).trim();
// check for the reference to another site
String[] refSegs = ResolverUtil.separateSiteRef(filePath);
if (!refSegs[0].equals("")) {
siteGroupId = ResolverUtil.getSiteGroupIdByName(refSegs[0], company, locationHint);
filePath = refSegs[1];
}
FileEntry fe = DocumentUtil.findDocument(filePath, siteGroupId, repoId);
if (fe == null) {
LOG.error(String.format("Referred file %1$s is not found in documents and media.", filePath));
result = result.substring(0, pos) + " <file-not-found /> "
+ result.substring(pos2 + CLOSING_TAG.length(), result.length());
} else {
String fileEntryRef = " <file-not-found /> ";
try {
if (refType == ID_TYPE_ID) {
fileEntryRef = Long.toString(fe.getFileEntryId());
} else if (refType == ID_TYPE_UUID) {
fileEntryRef = fe.getUuid();
} else {
fileEntryRef = DLUtil.getPreviewURL(fe, fe.getFileVersion(), null, StringPool.BLANK);
}
} catch (PortalException e) {
LOG.error(String.format("URL of referred file %1$s cannot be retrieved.", filePath));
}
result = result.substring(0, pos) + fileEntryRef
+ result.substring(pos2 + CLOSING_TAG.length(), result.length());
}
}
pos = result.indexOf(openingTag, pos + 1);
}
return result;
}
public static String substituteCategoryNameWithCategoryId(final String content, final String locationHint,
final long groupId, final long company) {
String openingTag = TEMPLATE_CATEGORY;
String result = content;
if (result.startsWith(openingTag)) {
String[] values = result.replace(CLOSING_TAG, "").split(VALUE_SPLIT);
if (values.length == 4) {
long groupIdResolved = groupId;
try {
groupIdResolved = ResolverUtil.getSiteGroupIdByName(values[1], company, locationHint);
String category = resolveVocabularyName(locationHint, values, groupIdResolved);
if (category != null) {
return category;
}
} catch (Exception e) {
LOG.error(String.format("Could not resolve site name for %1$s", locationHint), e);
}
} else {
LOG.error(
"Categories to be susbstited is not in correct format : SiteName::Vocabulary::CategoriesPath");
}
}
return result;
}
private static String resolveVocabularyName(String locationHint, String[] values, long groupIdResolved) {
try {
AssetVocabulary assetVocabulary =
AssetVocabularyLocalServiceUtil.getGroupVocabulary(groupIdResolved, values[2]);
String[] categoryIds = values[3].split("/");
String category = resolveCategoryId(locationHint, assetVocabulary, categoryIds);
if (category != null)
return category;
} catch (PortalException e) {
LOG.error(String.format("Could not resolve vocabulary name for %1$s", locationHint), e);
}
return null;
}
private static String resolveCategoryId(String locationHint, AssetVocabulary assetVocabulary,
String[] categoryIds) {
try {
AssetCategory category = assetVocabulary.getCategories().stream()
.filter(vocabularyCategory -> vocabularyCategory.getName().equals(categoryIds[0])).findFirst()
.orElseThrow(PortalException::new);
for (int i = 1; i < categoryIds.length; i++) {
String categoryName = categoryIds[i];
category = AssetCategoryLocalServiceUtil.getChildCategories(category.getCategoryId()).stream()
.filter(childrenCategory -> childrenCategory.getName().equals(categoryName)).findFirst()
.orElseThrow(PortalException::new);
}
return String.valueOf(category.getCategoryId());
} catch (PortalException e) {
LOG.error(String.format("Could not resolve category path for %1$s", locationHint), e);
}
return null;
}
public static String lookupSiteIdWithName(final String locationHint, final String value, final long company) {
String valueCopy = value;
String retVal = valueCopy;
while (valueCopy != null && valueCopy.trim().indexOf(ID_OF_SITE_WITH_NAME_KEY) > -1) {
int pos = valueCopy.trim().indexOf(ID_OF_SITE_WITH_NAME_KEY);
int pos2 = valueCopy.indexOf(CLOSING_TAG, pos + 1);
if (pos2 > -1) {
try {
String name = valueCopy.substring(pos + ID_OF_SITE_WITH_NAME_KEY.length(), pos2);
long groupId = ResolverUtil.getSiteGroupIdByName(name, company, locationHint);
retVal = valueCopy.substring(0, pos) + groupId
+ valueCopy.substring(pos2 + CLOSING_TAG.length(), valueCopy.length());
valueCopy = retVal;
} catch (Exception ex) {
LOG.error(String.format("Could not resolve site name for %1$s", locationHint), ex);
}
} else {
LOG.warn(String.format(COULD_NOT_RESOLVE_SITE_NAME, locationHint, CLOSING_TAG));
break;
}
}
return retVal;
}
public static String lookupOrgOrUserGroupIdWithName(final String locationHint, final String value,
final long company, final boolean uuid, final boolean org) {
String valueCopy = value;
String retVal = valueCopy;
String searchString = ID_OF_ORG_USER_GROUP_WITH_NAME_KEY;
if (uuid) {
searchString = searchString.replace(IDTYPE, "UUID");
} else {
searchString = searchString.replace(IDTYPE, "ID");
}
if (org) {
searchString = searchString.replace(LOOKUPTYPE, "ORG");
} else {
searchString = searchString.replace(LOOKUPTYPE, "USER_GROUP");
}
while (valueCopy != null && valueCopy.trim().indexOf(searchString) > -1) {
int pos = valueCopy.trim().indexOf(searchString);
int pos2 = valueCopy.indexOf(CLOSING_TAG, pos + 1);
if (pos2 > -1) {
try {
String name = valueCopy.substring(pos + searchString.length(), pos2);
String replacementId = "NOT FOUND";
if (org) {
Organization o = ResolverUtil.getOrganization(name, company, name);
if (o != null) {
if (uuid) {
replacementId = o.getUuid();
} else {
replacementId = Long.toString(o.getOrganizationId());
}
}
} else {
UserGroup ug = ResolverUtil.getUserGroup(name, company, name);
if (ug != null) {
if (uuid) {
replacementId = ug.getUuid();
} else {
replacementId = Long.toString(ug.getUserGroupId());
}
}
}
retVal = valueCopy.substring(0, pos) + replacementId
+ valueCopy.substring(pos2 + CLOSING_TAG.length(), valueCopy.length());
valueCopy = retVal;
} catch (Exception ex) {
String type = "user group";
if (org) {
type = "organization";
}
LOG.error(String.format("Could not resolve %1$s name for %2$s", type, locationHint), ex);
}
} else {
String type = "user group";
if (org) {
type = "organization";
}
LOG.warn(String.format(
"Could not resolve %1$s name, as the syntax is offendended, closing (%3$s) is missing for %2$s",
type, locationHint, CLOSING_TAG));
break;
}
}
return retVal;
}
public static String lookupArticleWithArticleId(final String content, final String locationHint, final long groupId,
final long company, final int typeOfId) {
String contentCopy = content;
String retVal = contentCopy;
long siteGroupId = groupId;
int pos = -1;
String lookup = ARTICLE_BY_ART_ID;
if (typeOfId == 0) {
lookup = lookup.replace(IDTYPE, "ID");
} else if (typeOfId == 1) {
lookup = lookup.replace(IDTYPE, "UUID");
} else if (typeOfId == 2) {
lookup = lookup.replace(IDTYPE, "RESID");
}
while (contentCopy != null && contentCopy.indexOf(lookup) > -1) {
pos = contentCopy.indexOf(lookup);
int pos2 = contentCopy.indexOf(CLOSING_TAG, pos + 1);
if (pos2 > -1) {
String name = contentCopy.substring(pos + lookup.length(), pos2);
// check for the reference to another site
String[] refSegs = ResolverUtil.separateSiteRef(name);
if (!refSegs[0].equals("")) {
siteGroupId = ResolverUtil.getSiteGroupIdByName(refSegs[0], company, locationHint);
name = refSegs[1];
}
String templateId = "";
try {
JournalArticle ja = JournalArticleLocalServiceUtil.fetchLatestArticle(siteGroupId, name,
WorkflowConstants.STATUS_APPROVED);
if (ja != null) {
if (typeOfId == 0) {
templateId = Long.toString(ja.getId());
} else if (typeOfId == 1) {
templateId = ja.getUuid();
} else if (typeOfId == 2) {
templateId = Long.toString(ja.getResourcePrimKey());
}
} else {
LOG.error(String.format("Article with article id %1$s not found for %2$s", name, locationHint));
templateId = "!!NOTFOUND!!";
}
} catch (SystemException e) {
LOG.error(String.format("Article with article id %1$s not found for %2$s", name, locationHint), e);
}
retVal = contentCopy.substring(0, pos) + templateId
+ contentCopy.substring(pos2 + CLOSING_TAG.length(), contentCopy.length());
contentCopy = retVal;
} else {
LOG.warn(String.format(
"Could not resolve template, as the syntax is offended, closing (%1$s) is missing for %2$s abort parsing, as this is possibly an error!",
CLOSING_TAG, locationHint));
break;
}
}
return retVal;
}
public static String lookupPageIdWithFriendlyUrl(final String content, final String locationHint,
final long groupId, final long company, final boolean isPrivate, final IdMode mode) {
String contentCopy = content;
String lookUp = PAGE_ID_BY_FRIENDLY_URL;
if (isPrivate) {
lookUp = lookUp.replace("%%PTYPE%%", "PRIV");
} else {
lookUp = lookUp.replace("%%PTYPE%%", "PUB");
}
switch (mode) {
case ID:
lookUp = lookUp.replace(LAYOUTID, "PLID");
break;
case PLID:
lookUp = lookUp.replace(LAYOUTID, "ID");
break;
case UUID:
lookUp = lookUp.replace(LAYOUTID, "UUID");
break;
}
int pos = contentCopy.indexOf(lookUp);
while (pos > -1) {
int pos2 = contentCopy.indexOf(CLOSING_TAG, pos);
if (pos2 < 0) {
LOG.error(String.format("No closing Tag, pos %1$s for %2$s", pos, locationHint));
break;
} else {
// by default the referred file is looked up in current site.
long siteGroupId = groupId;
String fUrl = contentCopy.substring(pos + lookUp.length(), pos2).trim();
// check for the reference to another site
String[] refSegs = ResolverUtil.separateSiteRef(fUrl);
if (!refSegs[0].equals("")) {
siteGroupId = ResolverUtil.getSiteGroupIdByName(refSegs[0], company, locationHint);
fUrl = refSegs[1];
}
String pageId = "NOT FOUND";
Layout l = null;
try {
l = LayoutLocalServiceUtil.getFriendlyURLLayout(siteGroupId, isPrivate, fUrl);
} catch (PortalException | SystemException e) {
LOG.error(e);
}
if (l == null) {
LOG.error(String.format("Referred page %1$s is not found .", fUrl));
contentCopy = contentCopy.substring(0, pos) + " PAGE NOT FOUND!! "
+ contentCopy.substring(pos2 + CLOSING_TAG.length(), contentCopy.length());
} else {
switch (mode) {
case ID:
pageId = Long.toString(l.getLayoutId());
break;
case PLID:
pageId = Long.toString(l.getPlid());
break;
case UUID:
pageId = l.getUuid();
break;
}
contentCopy = contentCopy.substring(0, pos) + pageId
+ contentCopy.substring(pos2 + CLOSING_TAG.length(), contentCopy.length());
}
}
pos = contentCopy.indexOf(lookUp, pos + 1);
}
//
return contentCopy;
}
public static String lookupDDLRecordSetId(final String content, final String locationHint, final long groupId,
final long company) {
String contentCopy = content;
String lookUp = DDL_REC_SET_BY_KEY;
int pos = contentCopy.indexOf(lookUp);
while (pos > -1) {
int pos2 = contentCopy.indexOf(CLOSING_TAG, pos);
if (pos2 < 0) {
LOG.error(String.format("No closing Tag, pos %1$s for %2$s", pos, locationHint));
break;
} else {
// by default the referred file is looked up in current site.
long siteGroupId = groupId;
String recordsetId = contentCopy.substring(pos + lookUp.length(), pos2).trim();
// check for the reference to another site
String[] refSegs = ResolverUtil.separateSiteRef(recordsetId);
if (!refSegs[0].equals("")) {
siteGroupId = ResolverUtil.getSiteGroupIdByName(refSegs[0], company, locationHint);
recordsetId = refSegs[1];
}
DDLRecordSet rs = null;
try {
rs = DDLRecordSetLocalServiceUtil.getRecordSet(siteGroupId, recordsetId);
} catch (PortalException e) {
LOG.error(String.format("Error retrieving referred DDL structure %1$s.", recordsetId));
}
if (rs == null) {
LOG.error(String.format("Referred DDL structure %1$s is not found .", recordsetId));
contentCopy = contentCopy.substring(0, pos) + " PAGE NOT FOUND!! "
+ contentCopy.substring(pos2 + CLOSING_TAG.length(), contentCopy.length());
} else {
String pageId = Long.toString(rs.getRecordSetId());
contentCopy = contentCopy.substring(0, pos) + pageId
+ contentCopy.substring(pos2 + CLOSING_TAG.length(), contentCopy.length());
}
}
pos = contentCopy.indexOf(lookUp, pos + 1);
}
//
return contentCopy;
}
// CHECKSTYLE:OFF
public static String lookupStructureOrTemplateIdWithKey(final String content, final String locationHint,
final long groupId, final long company, final boolean uuid, final String commandPrefix,
final boolean isTemplate, final Class referredClass) {
String contentCopy = content;
String retVal = contentCopy;
long siteGroupId = groupId;
int pos = -1;
String lookup = TEMPLATE_BY_KEY;
if (!isTemplate) {
lookup = STRUCTURE_BY_KEY;
}
if (uuid) {
lookup = lookup.replace(IDTYPE, "UUID");
} else {
lookup = lookup.replace(IDTYPE, "ID");
}
lookup = lookup.replace("%%PREFIX%%", commandPrefix);
while (contentCopy != null && contentCopy.indexOf(lookup) > -1) {
pos = contentCopy.indexOf(lookup);
int pos2 = contentCopy.indexOf(CLOSING_TAG);
if (pos2 > -1) {
String name = contentCopy.substring(pos + lookup.length(), pos2);
// check for the reference to another site
String[] refSegs = ResolverUtil.separateSiteRef(name);
if (!refSegs[0].equals("")) {
siteGroupId = ResolverUtil.getSiteGroupIdByName(refSegs[0], company, locationHint);
name = refSegs[1];
}
String templateId = "";
try {
if (uuid) {
if (isTemplate) {
templateId = getTemplateUUID(name);
} else {
templateId = getStructureUUID(name, siteGroupId, referredClass);
}
} else {
if (isTemplate) {
templateId = Long.toString(getTemplateId(name, siteGroupId, referredClass));
} else {
templateId = Long.toString(getStructureId(name, siteGroupId, referredClass, false));
}
}
} catch (PortalException | SystemException e) {
LOG.error(
String.format("Template with key contentCopy %1$s not found for %2$s", name, locationHint));
LOG.error((Throwable) e);
}
retVal = contentCopy.substring(0, pos) + templateId
+ contentCopy.substring(pos2 + CLOSING_TAG.length(), contentCopy.length());
contentCopy = retVal;
} else {
LOG.warn("Could not resolve template, as the syntax is offended, closing $}} is " + "missing for "
+ locationHint + " abort parsing, as this is possibly an error!");
break;
}
}
return retVal;
}
// CHECKSTYLE:ON
public static long getStructureId(final String structureKey, final long groupId, final Class clazz,
boolean includeAncestorStructures) throws PortalException {
long classNameId = ClassNameLocalServiceUtil.getClassNameId(clazz);
DDMStructure structure = DDMStructureLocalServiceUtil.getStructure(groupId, classNameId, structureKey,
includeAncestorStructures);
return structure.getStructureId();
}
public static String getStructureUUID(final String structureKey, final long groupId, final Class clazz)
throws PortalException {
long classNameId = ClassNameLocalServiceUtil.getClassNameId(clazz);
DDMStructure structure = DDMStructureLocalServiceUtil.getStructure(groupId, classNameId, structureKey);
return structure.getUuid();
}
public static long getTemplateId(final String templateKey, final long groupId, final Class clazz)
throws PortalException {
long classNameId = ClassNameLocalServiceUtil.getClassNameId(clazz);
DDMTemplate template = DDMTemplateLocalServiceUtil.getTemplate(groupId, classNameId, templateKey);
return template.getTemplateId();
}
public static Organization getOrganization(final String name, final long companyId, final String locationHint) {
Organization o = null;
try {
o = OrganizationLocalServiceUtil.getOrganization(companyId, name);
} catch (PortalException e) {
LOG.error(String.format("Could not retrieve organization %1$s in context %2$s", name, locationHint));
}
return o;
}
public static UserGroup getUserGroup(final String name, final long companyId, final String locationHint) {
UserGroup o = null;
try {
o = UserGroupLocalServiceUtil.getUserGroup(companyId, name);
} catch (PortalException e) {
LOG.error(String.format("Could not retrieve organization %1$s in context %2$s", name, locationHint));
}
return o;
}
public static String getTemplateUUID(final String templateKey) {
DynamicQuery dq = DDMTemplateLocalServiceUtil.dynamicQuery()
.add(PropertyFactoryUtil.forName("templateKey").eq(templateKey));
List<DDMTemplate> templateList = new ArrayList<>();
String uuid = "NOT FOUND!!!!";
try {
templateList = DDMTemplateLocalServiceUtil.dynamicQuery(dq);
if (templateList != null && !templateList.isEmpty() && templateList.get(0) != null) {
uuid = templateList.get(0).getUuid();
}
} catch (SystemException e) {
LOG.error(String.format("Tempate with key %1$s not found !!", templateKey), e);
}
return uuid;
}
private static String[] separateSiteRef(final String content) {
String contentCopy = content;
String[] retVal = new String[2];
retVal[0] = "";
retVal[1] = contentCopy;
if (contentCopy.startsWith(VALUE_SPLIT)) {
int siteNameEndPos = contentCopy.indexOf(VALUE_SPLIT, 2);
if (siteNameEndPos > -1) {
String siteName = contentCopy.substring(0 + 2, siteNameEndPos);
contentCopy = contentCopy.substring(siteNameEndPos + 2, contentCopy.length()).trim();
retVal[0] = siteName;
retVal[1] = contentCopy;
}
}
return retVal;
}
}
|
<filename>src/pdbf/json/JSON.java
package pdbf.json;
/**
* Created by lukas on 01.06.16.
*/
public class JSON extends TextualPDBFelement {
}
|
def max_number(num_list):
max = num_list[0]
for num in num_list:
if num > max:
max = num
return max
print(max_number([5, 6, 7, -2, 3])) |
const express = require('express');
const router = express.Router();
const getConnection = require('../../connection');
const middleware = require('../auth/auth_middleware');
router.get('/accounthead/:id',middleware.loggedin_as_superuser, (req, res) => {
getConnection((err, connection) => {
if (err) {
console.log(err);
res.send({ status: false });
}
else {
var account_id = req.params.id;
var sql = `
SELECT
Account_Head.account_id,Account_Head.account_name,
Village.village_name,
Taluka.taluka_name,
District.district_name
FROM Account_Head
INNER JOIN Village
ON Account_Head.account_id = ? AND Village.village_id = Account_Head.village_id
INNER JOIN Taluka
ON Taluka.taluka_id = Village.taluka_id
INNER JOIN District
ON District.district_id = Taluka.district_id;
SELECT
DISTINCT sub_account_id AS sid,
resource_person_id AS rid
FROM Account_Balance
WHERE account_id = ?
ORDER BY sub_account_id DESC;
`;
connection.query(sql, [account_id, account_id], (err, results) => {
connection.release();
if (err) {
console.log(err);
res.send({ status: false });
}
else {
if (results.length > 0) {
res.send({
status: true,
data: results[0][0],
sub_account_list: results[1]
});
}
else {
res.send({ status: false });
}
}
});
}
});
});
router.get('/subaccount/:id',middleware.loggedin_as_superuser, (req, res) => {
getConnection((err, connection) => {
if (err) {
console.log(err);
res.send({ status: false });
}
else {
var sub_account_id = req.params.id;
var sql = `
SELECT sub_account_name,sub_account_address
FROM Sub_Account
WHERE sub_account_id = ?;
`;
connection.query(sql, sub_account_id, (err, results) => {
connection.release();
if (err) {
console.log(err);
res.send({ status: false });
}
else {
res.send({
status: true,
data: results[0]
});
}
});
}
});
});
module.exports = router; |
#!/bin/sh
# Simulates a GWAS interface from VCF to space-separated CSV.
# If there is at least one sample and one variant in the VCF:
# - Print a header
# - For each variant:
# - Print the site and a p-value.
awk 'BEGIN {
FS="\t"
printedHeaderLine="false"
nHeaderSamples=0
}
{
if ($1=="#CHROM") {
nHeaderSamples=NF-9
}
}
{
if ($1 !~ /^#/ && nHeaderSamples==NF-9 && nHeaderSamples>0) {
if (printedHeaderLine=="false") {
print "CHR","POS","pValue"
printedHeaderLine="true"
}
print $1,$2,0.5
}
}'
|
# For inspiration, see also Debian's ncbi-vdb package:
# https://salsa.debian.org/med-team/ncbi-vdb
# This is a non-autotools configuration system. The usual ./configure options
# don’t work. Paths to toolchain binaries (gcc, g++, ar) are in part hard-coded
# and need to be patched.
sed -i.backup \
-e "s|gcc|$CC|g" \
-e "s|g++|$CXX|g" \
build/Makefile.gcc
# * --debug lets the configure script print extra info
# * Only LDFLAGS and CXX can be customized at configure time.
./configure \
--debug \
--prefix=$PREFIX \
--build-prefix=ncbi-outdir \
--with-ngs-sdk-prefix=$PREFIX \
CXX=$CXX
# Edit the generated build configuration to use the proper tools
# sed -i.backup \
# -e "s|= gcc|= $CC|" \
# -e "s|= g++|= $CXX|" \
# -e "s|= ar rc|= ar|" \
# -e "s|= ar|= $AR|" \
# build/Makefile.config.linux.x86_64
make
# This does not install the header files
make install
# These tests fail sometimes because they try to access online resources
make -C test/vdb
# Copy headers manually. As done by Debian, install them into a common subdirectory
mv interfaces/* $PREFIX/include/ncbi-vdb
# To Do
# Some of the internal libraries are not built. These messages are printed during the build:
# NOTE - internal library libkff cannot be built: It requires 'libmagic' and its development headers.
# NOTE - internal library libkxml cannot be built: It requires 'libxml2' and its development headers.
# NOTE - internal library libkxfs cannot be built: It requires 'libxml2' and its development headers.
# NOTE - library libkdf5 cannot be built: It requires 'libhdf5' and its development headers.
# NOTE - library libvdb-sqlite cannot be built: It requires 'libxml2'.
# These other notes are written at configure time:
# bison: command not found
# bc: command not found
|
import numpy as np
from scipy import linalg as LA
# Assuming noise_train_cov is the covariance matrix of the input signal
# Compute the eigen decomposition of the covariance matrix
D, V = LA.eig(noise_train_cov)
# Compute the whitening transformation matrix
D_sqrt_inv = np.diag(D**(-0.5)) # Create a diagonal matrix with the inverse square root of eigenvalues
W = np.dot(D_sqrt_inv, V.T) # Compute the whitening filter matrix using the dot product
# Now W can be used to whiten the input signal by multiplying it with the signal |
#!/bin/bash
source ~/.dev.keys
mkdir -p ../log
MIX_ENV=dev elixir --sname trainloc-dev -S mix do deps.get --only dev, compile --force, run --no-halt >> ../log/trainloc-dev.log
|
def top_three(lst):
sorted_lst = sorted(lst, reverse=True)
return sorted_lst[:3]
print(top_three([10, 8, 3, 11, 7, 14, 13])) |
<reponame>polakowski/js-core-ext-array-reject<filename>lib/array-reject.js
Array.prototype.reject = function() {
var args = Array.from(arguments);
var callback = args.shift();
var negatedCallback = callback && function() {
return !callback.apply(this, arguments);
};
args.unshift(negatedCallback);
return this.filter.apply(this, args);
};
|
<reponame>blushft/strana<gh_stars>1-10
// Code generated by entc, DO NOT EDIT.
package ent
import (
"context"
"database/sql/driver"
"errors"
"fmt"
"math"
"github.com/blushft/strana/modules/sink/reporter/store/ent/event"
"github.com/blushft/strana/modules/sink/reporter/store/ent/oscontext"
"github.com/blushft/strana/modules/sink/reporter/store/ent/predicate"
"github.com/facebook/ent/dialect/sql"
"github.com/facebook/ent/dialect/sql/sqlgraph"
"github.com/facebook/ent/schema/field"
)
// OSContextQuery is the builder for querying OSContext entities.
type OSContextQuery struct {
config
limit *int
offset *int
order []OrderFunc
unique []string
predicates []predicate.OSContext
// eager-loading edges.
withEvents *EventQuery
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the builder.
func (ocq *OSContextQuery) Where(ps ...predicate.OSContext) *OSContextQuery {
ocq.predicates = append(ocq.predicates, ps...)
return ocq
}
// Limit adds a limit step to the query.
func (ocq *OSContextQuery) Limit(limit int) *OSContextQuery {
ocq.limit = &limit
return ocq
}
// Offset adds an offset step to the query.
func (ocq *OSContextQuery) Offset(offset int) *OSContextQuery {
ocq.offset = &offset
return ocq
}
// Order adds an order step to the query.
func (ocq *OSContextQuery) Order(o ...OrderFunc) *OSContextQuery {
ocq.order = append(ocq.order, o...)
return ocq
}
// QueryEvents chains the current query on the events edge.
func (ocq *OSContextQuery) QueryEvents() *EventQuery {
query := &EventQuery{config: ocq.config}
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := ocq.prepareQuery(ctx); err != nil {
return nil, err
}
step := sqlgraph.NewStep(
sqlgraph.From(oscontext.Table, oscontext.FieldID, ocq.sqlQuery()),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, oscontext.EventsTable, oscontext.EventsColumn),
)
fromU = sqlgraph.SetNeighbors(ocq.driver.Dialect(), step)
return fromU, nil
}
return query
}
// First returns the first OSContext entity in the query. Returns *NotFoundError when no oscontext was found.
func (ocq *OSContextQuery) First(ctx context.Context) (*OSContext, error) {
ocs, err := ocq.Limit(1).All(ctx)
if err != nil {
return nil, err
}
if len(ocs) == 0 {
return nil, &NotFoundError{oscontext.Label}
}
return ocs[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (ocq *OSContextQuery) FirstX(ctx context.Context) *OSContext {
oc, err := ocq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return oc
}
// FirstID returns the first OSContext id in the query. Returns *NotFoundError when no id was found.
func (ocq *OSContextQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = ocq.Limit(1).IDs(ctx); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{oscontext.Label}
return
}
return ids[0], nil
}
// FirstXID is like FirstID, but panics if an error occurs.
func (ocq *OSContextQuery) FirstXID(ctx context.Context) int {
id, err := ocq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns the only OSContext entity in the query, returns an error if not exactly one entity was returned.
func (ocq *OSContextQuery) Only(ctx context.Context) (*OSContext, error) {
ocs, err := ocq.Limit(2).All(ctx)
if err != nil {
return nil, err
}
switch len(ocs) {
case 1:
return ocs[0], nil
case 0:
return nil, &NotFoundError{oscontext.Label}
default:
return nil, &NotSingularError{oscontext.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (ocq *OSContextQuery) OnlyX(ctx context.Context) *OSContext {
oc, err := ocq.Only(ctx)
if err != nil {
panic(err)
}
return oc
}
// OnlyID returns the only OSContext id in the query, returns an error if not exactly one id was returned.
func (ocq *OSContextQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = ocq.Limit(2).IDs(ctx); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{oscontext.Label}
default:
err = &NotSingularError{oscontext.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (ocq *OSContextQuery) OnlyIDX(ctx context.Context) int {
id, err := ocq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of OSContexts.
func (ocq *OSContextQuery) All(ctx context.Context) ([]*OSContext, error) {
if err := ocq.prepareQuery(ctx); err != nil {
return nil, err
}
return ocq.sqlAll(ctx)
}
// AllX is like All, but panics if an error occurs.
func (ocq *OSContextQuery) AllX(ctx context.Context) []*OSContext {
ocs, err := ocq.All(ctx)
if err != nil {
panic(err)
}
return ocs
}
// IDs executes the query and returns a list of OSContext ids.
func (ocq *OSContextQuery) IDs(ctx context.Context) ([]int, error) {
var ids []int
if err := ocq.Select(oscontext.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (ocq *OSContextQuery) IDsX(ctx context.Context) []int {
ids, err := ocq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (ocq *OSContextQuery) Count(ctx context.Context) (int, error) {
if err := ocq.prepareQuery(ctx); err != nil {
return 0, err
}
return ocq.sqlCount(ctx)
}
// CountX is like Count, but panics if an error occurs.
func (ocq *OSContextQuery) CountX(ctx context.Context) int {
count, err := ocq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (ocq *OSContextQuery) Exist(ctx context.Context) (bool, error) {
if err := ocq.prepareQuery(ctx); err != nil {
return false, err
}
return ocq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
func (ocq *OSContextQuery) ExistX(ctx context.Context) bool {
exist, err := ocq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the query builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (ocq *OSContextQuery) Clone() *OSContextQuery {
return &OSContextQuery{
config: ocq.config,
limit: ocq.limit,
offset: ocq.offset,
order: append([]OrderFunc{}, ocq.order...),
unique: append([]string{}, ocq.unique...),
predicates: append([]predicate.OSContext{}, ocq.predicates...),
// clone intermediate query.
sql: ocq.sql.Clone(),
path: ocq.path,
}
}
// WithEvents tells the query-builder to eager-loads the nodes that are connected to
// the "events" edge. The optional arguments used to configure the query builder of the edge.
func (ocq *OSContextQuery) WithEvents(opts ...func(*EventQuery)) *OSContextQuery {
query := &EventQuery{config: ocq.config}
for _, opt := range opts {
opt(query)
}
ocq.withEvents = query
return ocq
}
// GroupBy used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// Name string `json:"name,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.OSContext.Query().
// GroupBy(oscontext.FieldName).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
//
func (ocq *OSContextQuery) GroupBy(field string, fields ...string) *OSContextGroupBy {
group := &OSContextGroupBy{config: ocq.config}
group.fields = append([]string{field}, fields...)
group.path = func(ctx context.Context) (prev *sql.Selector, err error) {
if err := ocq.prepareQuery(ctx); err != nil {
return nil, err
}
return ocq.sqlQuery(), nil
}
return group
}
// Select one or more fields from the given query.
//
// Example:
//
// var v []struct {
// Name string `json:"name,omitempty"`
// }
//
// client.OSContext.Query().
// Select(oscontext.FieldName).
// Scan(ctx, &v)
//
func (ocq *OSContextQuery) Select(field string, fields ...string) *OSContextSelect {
selector := &OSContextSelect{config: ocq.config}
selector.fields = append([]string{field}, fields...)
selector.path = func(ctx context.Context) (prev *sql.Selector, err error) {
if err := ocq.prepareQuery(ctx); err != nil {
return nil, err
}
return ocq.sqlQuery(), nil
}
return selector
}
func (ocq *OSContextQuery) prepareQuery(ctx context.Context) error {
if ocq.path != nil {
prev, err := ocq.path(ctx)
if err != nil {
return err
}
ocq.sql = prev
}
return nil
}
func (ocq *OSContextQuery) sqlAll(ctx context.Context) ([]*OSContext, error) {
var (
nodes = []*OSContext{}
_spec = ocq.querySpec()
loadedTypes = [1]bool{
ocq.withEvents != nil,
}
)
_spec.ScanValues = func() []interface{} {
node := &OSContext{config: ocq.config}
nodes = append(nodes, node)
values := node.scanValues()
return values
}
_spec.Assign = func(values ...interface{}) error {
if len(nodes) == 0 {
return fmt.Errorf("ent: Assign called without calling ScanValues")
}
node := nodes[len(nodes)-1]
node.Edges.loadedTypes = loadedTypes
return node.assignValues(values...)
}
if err := sqlgraph.QueryNodes(ctx, ocq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
if query := ocq.withEvents; query != nil {
fks := make([]driver.Value, 0, len(nodes))
nodeids := make(map[int]*OSContext)
for i := range nodes {
fks = append(fks, nodes[i].ID)
nodeids[nodes[i].ID] = nodes[i]
}
query.withFKs = true
query.Where(predicate.Event(func(s *sql.Selector) {
s.Where(sql.InValues(oscontext.EventsColumn, fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
return nil, err
}
for _, n := range neighbors {
fk := n.event_os
if fk == nil {
return nil, fmt.Errorf(`foreign-key "event_os" is nil for node %v`, n.ID)
}
node, ok := nodeids[*fk]
if !ok {
return nil, fmt.Errorf(`unexpected foreign-key "event_os" returned %v for node %v`, *fk, n.ID)
}
node.Edges.Events = append(node.Edges.Events, n)
}
}
return nodes, nil
}
func (ocq *OSContextQuery) sqlCount(ctx context.Context) (int, error) {
_spec := ocq.querySpec()
return sqlgraph.CountNodes(ctx, ocq.driver, _spec)
}
func (ocq *OSContextQuery) sqlExist(ctx context.Context) (bool, error) {
n, err := ocq.sqlCount(ctx)
if err != nil {
return false, fmt.Errorf("ent: check existence: %v", err)
}
return n > 0, nil
}
func (ocq *OSContextQuery) querySpec() *sqlgraph.QuerySpec {
_spec := &sqlgraph.QuerySpec{
Node: &sqlgraph.NodeSpec{
Table: oscontext.Table,
Columns: oscontext.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: oscontext.FieldID,
},
},
From: ocq.sql,
Unique: true,
}
if ps := ocq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := ocq.limit; limit != nil {
_spec.Limit = *limit
}
if offset := ocq.offset; offset != nil {
_spec.Offset = *offset
}
if ps := ocq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (ocq *OSContextQuery) sqlQuery() *sql.Selector {
builder := sql.Dialect(ocq.driver.Dialect())
t1 := builder.Table(oscontext.Table)
selector := builder.Select(t1.Columns(oscontext.Columns...)...).From(t1)
if ocq.sql != nil {
selector = ocq.sql
selector.Select(selector.Columns(oscontext.Columns...)...)
}
for _, p := range ocq.predicates {
p(selector)
}
for _, p := range ocq.order {
p(selector)
}
if offset := ocq.offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := ocq.limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// OSContextGroupBy is the builder for group-by OSContext entities.
type OSContextGroupBy struct {
config
fields []string
fns []AggregateFunc
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Aggregate adds the given aggregation functions to the group-by query.
func (ocgb *OSContextGroupBy) Aggregate(fns ...AggregateFunc) *OSContextGroupBy {
ocgb.fns = append(ocgb.fns, fns...)
return ocgb
}
// Scan applies the group-by query and scan the result into the given value.
func (ocgb *OSContextGroupBy) Scan(ctx context.Context, v interface{}) error {
query, err := ocgb.path(ctx)
if err != nil {
return err
}
ocgb.sql = query
return ocgb.sqlScan(ctx, v)
}
// ScanX is like Scan, but panics if an error occurs.
func (ocgb *OSContextGroupBy) ScanX(ctx context.Context, v interface{}) {
if err := ocgb.Scan(ctx, v); err != nil {
panic(err)
}
}
// Strings returns list of strings from group-by. It is only allowed when querying group-by with one field.
func (ocgb *OSContextGroupBy) Strings(ctx context.Context) ([]string, error) {
if len(ocgb.fields) > 1 {
return nil, errors.New("ent: OSContextGroupBy.Strings is not achievable when grouping more than 1 field")
}
var v []string
if err := ocgb.Scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// StringsX is like Strings, but panics if an error occurs.
func (ocgb *OSContextGroupBy) StringsX(ctx context.Context) []string {
v, err := ocgb.Strings(ctx)
if err != nil {
panic(err)
}
return v
}
// String returns a single string from group-by. It is only allowed when querying group-by with one field.
func (ocgb *OSContextGroupBy) String(ctx context.Context) (_ string, err error) {
var v []string
if v, err = ocgb.Strings(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{oscontext.Label}
default:
err = fmt.Errorf("ent: OSContextGroupBy.Strings returned %d results when one was expected", len(v))
}
return
}
// StringX is like String, but panics if an error occurs.
func (ocgb *OSContextGroupBy) StringX(ctx context.Context) string {
v, err := ocgb.String(ctx)
if err != nil {
panic(err)
}
return v
}
// Ints returns list of ints from group-by. It is only allowed when querying group-by with one field.
func (ocgb *OSContextGroupBy) Ints(ctx context.Context) ([]int, error) {
if len(ocgb.fields) > 1 {
return nil, errors.New("ent: OSContextGroupBy.Ints is not achievable when grouping more than 1 field")
}
var v []int
if err := ocgb.Scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// IntsX is like Ints, but panics if an error occurs.
func (ocgb *OSContextGroupBy) IntsX(ctx context.Context) []int {
v, err := ocgb.Ints(ctx)
if err != nil {
panic(err)
}
return v
}
// Int returns a single int from group-by. It is only allowed when querying group-by with one field.
func (ocgb *OSContextGroupBy) Int(ctx context.Context) (_ int, err error) {
var v []int
if v, err = ocgb.Ints(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{oscontext.Label}
default:
err = fmt.Errorf("ent: OSContextGroupBy.Ints returned %d results when one was expected", len(v))
}
return
}
// IntX is like Int, but panics if an error occurs.
func (ocgb *OSContextGroupBy) IntX(ctx context.Context) int {
v, err := ocgb.Int(ctx)
if err != nil {
panic(err)
}
return v
}
// Float64s returns list of float64s from group-by. It is only allowed when querying group-by with one field.
func (ocgb *OSContextGroupBy) Float64s(ctx context.Context) ([]float64, error) {
if len(ocgb.fields) > 1 {
return nil, errors.New("ent: OSContextGroupBy.Float64s is not achievable when grouping more than 1 field")
}
var v []float64
if err := ocgb.Scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// Float64sX is like Float64s, but panics if an error occurs.
func (ocgb *OSContextGroupBy) Float64sX(ctx context.Context) []float64 {
v, err := ocgb.Float64s(ctx)
if err != nil {
panic(err)
}
return v
}
// Float64 returns a single float64 from group-by. It is only allowed when querying group-by with one field.
func (ocgb *OSContextGroupBy) Float64(ctx context.Context) (_ float64, err error) {
var v []float64
if v, err = ocgb.Float64s(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{oscontext.Label}
default:
err = fmt.Errorf("ent: OSContextGroupBy.Float64s returned %d results when one was expected", len(v))
}
return
}
// Float64X is like Float64, but panics if an error occurs.
func (ocgb *OSContextGroupBy) Float64X(ctx context.Context) float64 {
v, err := ocgb.Float64(ctx)
if err != nil {
panic(err)
}
return v
}
// Bools returns list of bools from group-by. It is only allowed when querying group-by with one field.
func (ocgb *OSContextGroupBy) Bools(ctx context.Context) ([]bool, error) {
if len(ocgb.fields) > 1 {
return nil, errors.New("ent: OSContextGroupBy.Bools is not achievable when grouping more than 1 field")
}
var v []bool
if err := ocgb.Scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// BoolsX is like Bools, but panics if an error occurs.
func (ocgb *OSContextGroupBy) BoolsX(ctx context.Context) []bool {
v, err := ocgb.Bools(ctx)
if err != nil {
panic(err)
}
return v
}
// Bool returns a single bool from group-by. It is only allowed when querying group-by with one field.
func (ocgb *OSContextGroupBy) Bool(ctx context.Context) (_ bool, err error) {
var v []bool
if v, err = ocgb.Bools(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{oscontext.Label}
default:
err = fmt.Errorf("ent: OSContextGroupBy.Bools returned %d results when one was expected", len(v))
}
return
}
// BoolX is like Bool, but panics if an error occurs.
func (ocgb *OSContextGroupBy) BoolX(ctx context.Context) bool {
v, err := ocgb.Bool(ctx)
if err != nil {
panic(err)
}
return v
}
func (ocgb *OSContextGroupBy) sqlScan(ctx context.Context, v interface{}) error {
rows := &sql.Rows{}
query, args := ocgb.sqlQuery().Query()
if err := ocgb.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (ocgb *OSContextGroupBy) sqlQuery() *sql.Selector {
selector := ocgb.sql
columns := make([]string, 0, len(ocgb.fields)+len(ocgb.fns))
columns = append(columns, ocgb.fields...)
for _, fn := range ocgb.fns {
columns = append(columns, fn(selector))
}
return selector.Select(columns...).GroupBy(ocgb.fields...)
}
// OSContextSelect is the builder for select fields of OSContext entities.
type OSContextSelect struct {
config
fields []string
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Scan applies the selector query and scan the result into the given value.
func (ocs *OSContextSelect) Scan(ctx context.Context, v interface{}) error {
query, err := ocs.path(ctx)
if err != nil {
return err
}
ocs.sql = query
return ocs.sqlScan(ctx, v)
}
// ScanX is like Scan, but panics if an error occurs.
func (ocs *OSContextSelect) ScanX(ctx context.Context, v interface{}) {
if err := ocs.Scan(ctx, v); err != nil {
panic(err)
}
}
// Strings returns list of strings from selector. It is only allowed when selecting one field.
func (ocs *OSContextSelect) Strings(ctx context.Context) ([]string, error) {
if len(ocs.fields) > 1 {
return nil, errors.New("ent: OSContextSelect.Strings is not achievable when selecting more than 1 field")
}
var v []string
if err := ocs.Scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// StringsX is like Strings, but panics if an error occurs.
func (ocs *OSContextSelect) StringsX(ctx context.Context) []string {
v, err := ocs.Strings(ctx)
if err != nil {
panic(err)
}
return v
}
// String returns a single string from selector. It is only allowed when selecting one field.
func (ocs *OSContextSelect) String(ctx context.Context) (_ string, err error) {
var v []string
if v, err = ocs.Strings(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{oscontext.Label}
default:
err = fmt.Errorf("ent: OSContextSelect.Strings returned %d results when one was expected", len(v))
}
return
}
// StringX is like String, but panics if an error occurs.
func (ocs *OSContextSelect) StringX(ctx context.Context) string {
v, err := ocs.String(ctx)
if err != nil {
panic(err)
}
return v
}
// Ints returns list of ints from selector. It is only allowed when selecting one field.
func (ocs *OSContextSelect) Ints(ctx context.Context) ([]int, error) {
if len(ocs.fields) > 1 {
return nil, errors.New("ent: OSContextSelect.Ints is not achievable when selecting more than 1 field")
}
var v []int
if err := ocs.Scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// IntsX is like Ints, but panics if an error occurs.
func (ocs *OSContextSelect) IntsX(ctx context.Context) []int {
v, err := ocs.Ints(ctx)
if err != nil {
panic(err)
}
return v
}
// Int returns a single int from selector. It is only allowed when selecting one field.
func (ocs *OSContextSelect) Int(ctx context.Context) (_ int, err error) {
var v []int
if v, err = ocs.Ints(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{oscontext.Label}
default:
err = fmt.Errorf("ent: OSContextSelect.Ints returned %d results when one was expected", len(v))
}
return
}
// IntX is like Int, but panics if an error occurs.
func (ocs *OSContextSelect) IntX(ctx context.Context) int {
v, err := ocs.Int(ctx)
if err != nil {
panic(err)
}
return v
}
// Float64s returns list of float64s from selector. It is only allowed when selecting one field.
func (ocs *OSContextSelect) Float64s(ctx context.Context) ([]float64, error) {
if len(ocs.fields) > 1 {
return nil, errors.New("ent: OSContextSelect.Float64s is not achievable when selecting more than 1 field")
}
var v []float64
if err := ocs.Scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// Float64sX is like Float64s, but panics if an error occurs.
func (ocs *OSContextSelect) Float64sX(ctx context.Context) []float64 {
v, err := ocs.Float64s(ctx)
if err != nil {
panic(err)
}
return v
}
// Float64 returns a single float64 from selector. It is only allowed when selecting one field.
func (ocs *OSContextSelect) Float64(ctx context.Context) (_ float64, err error) {
var v []float64
if v, err = ocs.Float64s(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{oscontext.Label}
default:
err = fmt.Errorf("ent: OSContextSelect.Float64s returned %d results when one was expected", len(v))
}
return
}
// Float64X is like Float64, but panics if an error occurs.
func (ocs *OSContextSelect) Float64X(ctx context.Context) float64 {
v, err := ocs.Float64(ctx)
if err != nil {
panic(err)
}
return v
}
// Bools returns list of bools from selector. It is only allowed when selecting one field.
func (ocs *OSContextSelect) Bools(ctx context.Context) ([]bool, error) {
if len(ocs.fields) > 1 {
return nil, errors.New("ent: OSContextSelect.Bools is not achievable when selecting more than 1 field")
}
var v []bool
if err := ocs.Scan(ctx, &v); err != nil {
return nil, err
}
return v, nil
}
// BoolsX is like Bools, but panics if an error occurs.
func (ocs *OSContextSelect) BoolsX(ctx context.Context) []bool {
v, err := ocs.Bools(ctx)
if err != nil {
panic(err)
}
return v
}
// Bool returns a single bool from selector. It is only allowed when selecting one field.
func (ocs *OSContextSelect) Bool(ctx context.Context) (_ bool, err error) {
var v []bool
if v, err = ocs.Bools(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{oscontext.Label}
default:
err = fmt.Errorf("ent: OSContextSelect.Bools returned %d results when one was expected", len(v))
}
return
}
// BoolX is like Bool, but panics if an error occurs.
func (ocs *OSContextSelect) BoolX(ctx context.Context) bool {
v, err := ocs.Bool(ctx)
if err != nil {
panic(err)
}
return v
}
func (ocs *OSContextSelect) sqlScan(ctx context.Context, v interface{}) error {
rows := &sql.Rows{}
query, args := ocs.sqlQuery().Query()
if err := ocs.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (ocs *OSContextSelect) sqlQuery() sql.Querier {
selector := ocs.sql
selector.Select(selector.Columns(ocs.fields...)...)
return selector
}
|
read X
read Y
# sum
echo $((X+Y))
# difference
echo $((X-Y))
# product
echo $((X*Y))
# quotient
echo $((X/Y))
|
<gh_stars>1000+
// Code generated by go-swagger; DO NOT EDIT.
package service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/runtime"
"github.com/keptn/keptn/configuration-service/models"
)
// DeleteProjectProjectNameStageStageNameServiceServiceNameNoContentCode is the HTTP code returned for type DeleteProjectProjectNameStageStageNameServiceServiceNameNoContent
const DeleteProjectProjectNameStageStageNameServiceServiceNameNoContentCode int = 204
/*DeleteProjectProjectNameStageStageNameServiceServiceNameNoContent Success. Service has been deleted. Response does not have a body.
swagger:response deleteProjectProjectNameStageStageNameServiceServiceNameNoContent
*/
type DeleteProjectProjectNameStageStageNameServiceServiceNameNoContent struct {
}
// NewDeleteProjectProjectNameStageStageNameServiceServiceNameNoContent creates DeleteProjectProjectNameStageStageNameServiceServiceNameNoContent with default headers values
func NewDeleteProjectProjectNameStageStageNameServiceServiceNameNoContent() *DeleteProjectProjectNameStageStageNameServiceServiceNameNoContent {
return &DeleteProjectProjectNameStageStageNameServiceServiceNameNoContent{}
}
// WriteResponse to the client
func (o *DeleteProjectProjectNameStageStageNameServiceServiceNameNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses
rw.WriteHeader(204)
}
// DeleteProjectProjectNameStageStageNameServiceServiceNameBadRequestCode is the HTTP code returned for type DeleteProjectProjectNameStageStageNameServiceServiceNameBadRequest
const DeleteProjectProjectNameStageStageNameServiceServiceNameBadRequestCode int = 400
/*DeleteProjectProjectNameStageStageNameServiceServiceNameBadRequest Failed. Service could not be deleted.
swagger:response deleteProjectProjectNameStageStageNameServiceServiceNameBadRequest
*/
type DeleteProjectProjectNameStageStageNameServiceServiceNameBadRequest struct {
/*
In: Body
*/
Payload *models.Error `json:"body,omitempty"`
}
// NewDeleteProjectProjectNameStageStageNameServiceServiceNameBadRequest creates DeleteProjectProjectNameStageStageNameServiceServiceNameBadRequest with default headers values
func NewDeleteProjectProjectNameStageStageNameServiceServiceNameBadRequest() *DeleteProjectProjectNameStageStageNameServiceServiceNameBadRequest {
return &DeleteProjectProjectNameStageStageNameServiceServiceNameBadRequest{}
}
// WithPayload adds the payload to the delete project project name stage stage name service service name bad request response
func (o *DeleteProjectProjectNameStageStageNameServiceServiceNameBadRequest) WithPayload(payload *models.Error) *DeleteProjectProjectNameStageStageNameServiceServiceNameBadRequest {
o.Payload = payload
return o
}
// SetPayload sets the payload to the delete project project name stage stage name service service name bad request response
func (o *DeleteProjectProjectNameStageStageNameServiceServiceNameBadRequest) SetPayload(payload *models.Error) {
o.Payload = payload
}
// WriteResponse to the client
func (o *DeleteProjectProjectNameStageStageNameServiceServiceNameBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(400)
if o.Payload != nil {
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
}
/*DeleteProjectProjectNameStageStageNameServiceServiceNameDefault Error
swagger:response deleteProjectProjectNameStageStageNameServiceServiceNameDefault
*/
type DeleteProjectProjectNameStageStageNameServiceServiceNameDefault struct {
_statusCode int
/*
In: Body
*/
Payload *models.Error `json:"body,omitempty"`
}
// NewDeleteProjectProjectNameStageStageNameServiceServiceNameDefault creates DeleteProjectProjectNameStageStageNameServiceServiceNameDefault with default headers values
func NewDeleteProjectProjectNameStageStageNameServiceServiceNameDefault(code int) *DeleteProjectProjectNameStageStageNameServiceServiceNameDefault {
if code <= 0 {
code = 500
}
return &DeleteProjectProjectNameStageStageNameServiceServiceNameDefault{
_statusCode: code,
}
}
// WithStatusCode adds the status to the delete project project name stage stage name service service name default response
func (o *DeleteProjectProjectNameStageStageNameServiceServiceNameDefault) WithStatusCode(code int) *DeleteProjectProjectNameStageStageNameServiceServiceNameDefault {
o._statusCode = code
return o
}
// SetStatusCode sets the status to the delete project project name stage stage name service service name default response
func (o *DeleteProjectProjectNameStageStageNameServiceServiceNameDefault) SetStatusCode(code int) {
o._statusCode = code
}
// WithPayload adds the payload to the delete project project name stage stage name service service name default response
func (o *DeleteProjectProjectNameStageStageNameServiceServiceNameDefault) WithPayload(payload *models.Error) *DeleteProjectProjectNameStageStageNameServiceServiceNameDefault {
o.Payload = payload
return o
}
// SetPayload sets the payload to the delete project project name stage stage name service service name default response
func (o *DeleteProjectProjectNameStageStageNameServiceServiceNameDefault) SetPayload(payload *models.Error) {
o.Payload = payload
}
// WriteResponse to the client
func (o *DeleteProjectProjectNameStageStageNameServiceServiceNameDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(o._statusCode)
if o.Payload != nil {
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
}
|
<reponame>iFlameing/WebAssemblyStudio
/* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/ */
import "jest-enzyme";
import * as React from "react";
import { shallow } from "enzyme";
jest.mock("../../../src/utils/fetchTemplates", () => {
return {
default: async () =>
JSON.parse(require("fs").readFileSync(__dirname + "/templates.json").toString()),
};
});
jest.mock("../../../src/service", () => {
return {
Service: {
compileMarkdownToHtml(md) { return `<pre>${md}</pre>`; },
},
};
});
jest.mock("../../../src/config", () => {
return {
default: async () => {
return {
serviceUrl: "",
clang: "",
rustc: "",
templates: ""
};
},
};
});
import { NewProjectDialog, Template } from "../../../src/components/NewProjectDialog";
const createButtonIndex = 1;
const cancelButtonIndex = 0;
async function promiseWait(count) {
return count > 0 && await Promise.resolve(count - 1).then(promiseWait);
}
describe("Tests for NewProjectDialog component", () => {
const setup = (params: {
onCreate?: (template: Template) => void;
onCancel?: () => void;
}) => {
// tslint:disable-next-line
const nop = () => {};
return shallow(<NewProjectDialog
isOpen={true}
onCreate={params.onCreate || nop}
onCancel={params.onCancel || nop}
/>);
};
it("NewProjectDialog renders correctly", () => {
const dialog = setup({});
expect(dialog.find("ListBox")).toExist();
const buttons = dialog.find("Button");
expect(buttons.length).toBe(2);
expect(buttons.at(createButtonIndex)).toHaveProp("label", "Create");
expect(buttons.at(createButtonIndex)).toHaveProp("title", "Create");
expect(buttons.at(cancelButtonIndex)).toHaveProp("label", "Cancel");
expect(buttons.at(cancelButtonIndex)).toHaveProp("title", "Cancel");
});
it("NewProjectDialog calls back onCreate", async () => {
let chosenTemplate = null;
const dialog = setup({
onCreate(template) { chosenTemplate = template; },
});
{
const createButton = dialog.find("Button").at(createButtonIndex);
expect(createButton).toHaveProp("isDisabled", true);
}
await promiseWait(3); // wait on templates loading and md-to-html
dialog.update();
{
const createButton = dialog.find("Button").at(createButtonIndex);
expect(createButton).toHaveProp("isDisabled", false);
createButton.simulate("click");
expect(chosenTemplate).toBeTruthy();
}
});
it("NewProjectDialog calls back onCancel", () => {
let cancelCalled = false;
const dialog = setup({
onCancel() { cancelCalled = true; },
});
dialog.find("Button").at(cancelButtonIndex).simulate("click");
expect(cancelCalled).toBeTruthy();
});
});
|
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Alamofire/Alamofire.framework"
install_framework "${BUILT_PRODUCTS_DIR}/KeyHolder/KeyHolder.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Log/Log.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Magnet/Magnet.framework"
install_framework "${BUILT_PRODUCTS_DIR}/SwiftProtobuf/SwiftProtobuf.framework"
install_framework "${BUILT_PRODUCTS_DIR}/SwiftyJSON/SwiftyJSON.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Alamofire/Alamofire.framework"
install_framework "${BUILT_PRODUCTS_DIR}/KeyHolder/KeyHolder.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Log/Log.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Magnet/Magnet.framework"
install_framework "${BUILT_PRODUCTS_DIR}/SwiftProtobuf/SwiftProtobuf.framework"
install_framework "${BUILT_PRODUCTS_DIR}/SwiftyJSON/SwiftyJSON.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
#!/bin/bash
#
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# import utils
. scripts/envVar.sh
# fetchChannelConfig <org> <channel_id> <output_json>
# Writes the current channel config for a given channel to a JSON file
# NOTE: this must be run in a CLI container since it requires configtxlator
fetchChannelConfig() {
ORG=$1
CHANNEL=$2
OUTPUT=$3
setGlobals $ORG
infoln "Fetching the most recent configuration block for the channel"
set -x
peer channel fetch config config_block.pb -o orderer.example.com:7050 --ordererTLSHostnameOverride orderer.example.com -c $CHANNEL --tls --cafile "$ORDERER_CA"
{ set +x; } 2>/dev/null
infoln "Decoding config block to JSON and isolating config to ${OUTPUT}"
set -x
configtxlator proto_decode --input config_block.pb --type common.Block | jq .data.data[0].payload.data.config >"${OUTPUT}"
{ set +x; } 2>/dev/null
}
# createConfigUpdate <channel_id> <original_config.json> <modified_config.json> <output.pb>
# Takes an original and modified config, and produces the config update tx
# which transitions between the two
# NOTE: this must be run in a CLI container since it requires configtxlator
createConfigUpdate() {
CHANNEL=$1
ORIGINAL=$2
MODIFIED=$3
OUTPUT=$4
set -x
configtxlator proto_encode --input "${ORIGINAL}" --type common.Config >original_config.pb
configtxlator proto_encode --input "${MODIFIED}" --type common.Config >modified_config.pb
configtxlator compute_update --channel_id "${CHANNEL}" --original original_config.pb --updated modified_config.pb >config_update.pb
configtxlator proto_decode --input config_update.pb --type common.ConfigUpdate >config_update.json
echo '{"payload":{"header":{"channel_header":{"channel_id":"'$CHANNEL'", "type":2}},"data":{"config_update":'$(cat config_update.json)'}}}' | jq . >config_update_in_envelope.json
configtxlator proto_encode --input config_update_in_envelope.json --type common.Envelope >"${OUTPUT}"
{ set +x; } 2>/dev/null
}
# signConfigtxAsPeerOrg <org> <configtx.pb>
# Set the peerOrg admin of an org and sign the config update
signConfigtxAsPeerOrg() {
ORG=$1
CONFIGTXFILE=$2
setGlobals $ORG
set -x
peer channel signconfigtx -f "${CONFIGTXFILE}"
{ set +x; } 2>/dev/null
} |
import xml.etree.ElementTree as ET
from typing import List, Dict, Union
def process_articles(xml_data: str) -> List[Dict[str, Union[str, List[str]]]]:
ns = {'ns': 'http://www.w3.org/2005/Atom'} # Namespace for XML elements
root = ET.fromstring(xml_data)
articles = []
for article in root.findall('ns:article', ns):
url = article.find('ns:url', ns).text
authors = [author.find('ns:name', ns).text for author in article.findall('ns:authors/ns:author', ns)]
articles.append({'url': url, 'authors': authors})
return articles |
# load the dataset
data <- read.csv('bicycle_usage.csv')
# perform a linear regression
model <- lm(usage ~ temperature, data=data)
# print the summary
summary(model)
# predict the bicycle usage based on new temperature
predictedUsage <- predict(model, newdata=data.frame(temperature=60))
# print the prediction
print(predictedUsage) |
<reponame>cosmouser/gp3csv
package ugd
import (
"testing"
"os"
"strings"
"path"
"encoding/gob"
)
func openGob() (store map[string][]byte, err error) {
f, err := os.Open(path.Join("..", "TAESC.gp3.gob"))
if err != nil {
return nil, err
}
dec := gob.NewDecoder(f)
err = dec.Decode(&store)
if err != nil {
return nil, err
}
f.Close()
return
}
func TestMakeRecords(t *testing.T) {
db, err := openGob()
if err != nil {
t.Error(err)
}
nodes, err := loadTdfDataDir(db, escUnitsDir)
if err != nil {
t.Error(err)
}
downloadNodes, err := loadTdfDataDir(db, escDownloadsDir)
if err != nil {
t.Error(err)
}
addBuildRelationships(nodes, downloadNodes)
unitRecords, err := makeUnitRecords(nodes)
if err != nil {
t.Error(err)
}
if len(unitRecords) != len(nodes)+1 {
t.Fatal("some unit records were not gathered")
}
if len(unitRecords[0]) < 2 {
t.Error("no fields were created")
}
weapNodes, err := loadTdfDataDir(db, escWeaponsDir)
if err != nil {
t.Error(err)
}
weaponRecords, err := makeWeaponRecords(weapNodes)
if err != nil {
t.Error(err)
}
if len(weaponRecords) != len(weapNodes)+1 {
t.Error("some weapon records were not gathered")
}
}
func TestEncodeUnitsCSV(t *testing.T) {
db, err := openGob()
if err != nil {
t.Error(err)
}
outFilePath := path.Join("..", "tmp_units.csv")
unitsFile, err := os.Create(outFilePath)
if err != nil {
t.Error(err)
}
err = EncodeUnitsCSV(db, unitsFile)
if err != nil {
t.Error(err)
}
unitsFile.Close()
err = os.Remove(outFilePath)
if err != nil {
t.Error(err)
}
}
func TestEncodeWeaponsCSV(t *testing.T) {
db, err := openGob()
if err != nil {
t.Error(err)
}
outFilePath := path.Join("..", "tmp_weapons.csv")
weaponsFile, err := os.Create(outFilePath)
if err != nil {
t.Error(err)
}
err = EncodeWeaponsCSV(db, weaponsFile)
if err != nil {
t.Error(err)
}
weaponsFile.Close()
err = os.Remove(outFilePath)
if err != nil {
t.Error(err)
}
}
func TestGatherUnitPics(t *testing.T) {
db, err := openGob()
if err != nil {
t.Error(err)
}
pics, err := GatherUnitPics(db)
if err != nil {
t.Error(err)
}
if len(pics) == 0 {
t.Error("pics slice is empty")
}
outFilePath := path.Join("..", "tmp_pics.zip")
picsArchive, err := os.Create(outFilePath)
err = ExportPicsToZip(pics, picsArchive)
if err != nil {
t.Error(err)
}
picsArchive.Close()
err = os.Remove(outFilePath)
if err != nil {
t.Error(err)
}
}
func TestUnitData(t *testing.T) {
db, err := openGob()
if err != nil {
t.Error(err)
}
if len(db["/unitsE/ARMCOM.fbi"]) == 0 {
t.Error("missing file")
}
nodes, err := loadTdfDataDir(db, escUnitsDir)
if err != nil {
t.Error(err)
}
if len(nodes) == 0 {
t.Error("got 0 for unit count, expected at least a couple hundred")
}
downloadNodes, err := loadTdfDataDir(db, escDownloadsDir)
if err != nil {
t.Error(err)
}
addBuildRelationships(nodes, downloadNodes)
for _, v := range nodes {
if v.Fields["unitname"] == "CORAK" {
if v.Fields["copyright"] != "Copyright 1997 Humongous Entertainment. All rights reserved." {
t.Errorf("%v is missing copyright, has %v", v.Fields["unitname"], v.Fields["copyright"])
}
if strings.Index(v.Fields["builtby"], "CORLAB") < 0 {
t.Error("builtby info for CORAK is missing CORLAB")
}
}
if v.Fields["unitname"] == "CORCK" {
if strings.Index(v.Fields["canbuild"], "CORRL") < 0 {
t.Error("canbuild info for CORCK is missing CORRL")
}
}
}
}
|
#!/bin/bash
# Build binary for all platforms for version $1.
set -e
DIR=`dirname $0`
ROOT=$DIR/..
VSN=$1
$ROOT/scripts/build-all-platforms.sh "cidr2range" $VSN
|
public static int longestCommonSubsequence(String str1, String str2) {
int m = str1.length();
int n = str2.length();
int[][] dp = new int[m + 1][n + 1];
for (int i = 0; i <= m; i++) {
for (int j = 0; j <= n; j++) {
if (i == 0 || j == 0) {
dp[i][j] = 0;
} else if (str1.charAt(i - 1) == str2.charAt(j - 1)) {
dp[i][j] = 1 + dp[i - 1][j - 1];
} else {
dp[i][j] = Math.max(dp[i - 1][j], dp[i][j - 1]);
}
}
}
return dp[m][n];
}
String str1 = "ABCBDAB";
String str2 = "BDCABA";
int result = longestCommonSubsequence(str1, str2);
System.out.println("Length of LCS is: "+ result); // Output: Length of LCS is: 4 |
import scrapy
def extract_and_join_url(response):
# Extract the href attribute value from the anchor tag within the "repository" div
repository = response.css('.repository')
href_value = repository.css('a::attr(href)').extract_first()
# Join the extracted URL with the base URL of the response
joined_url = response.urljoin(href_value)
return joined_url |
<filename>test/foo/foo.go
package foo
import (
"context"
"encoding/json"
"fmt"
"github.com/fatih/color"
"github.com/gosuri/uitable"
)
type Foo struct {
List bool
Output string
}
func (f *Foo) Do(ctx context.Context) error {
if f.List {
return f.DoList(ctx)
}
return nil
}
func (f *Foo) DoList(ctx context.Context) error {
foos := []string{"foo1", "foo2", "foo3"}
switch f.Output {
case "json":
b, err := json.Marshal(foos)
if err != nil {
return err
}
fmt.Println(string(b))
default:
tbl := uitable.New()
tbl.Separator = " "
tbl.AddRow("Index", "Value")
for k, v := range foos {
tbl.AddRow(k, v)
}
_, _ = fmt.Fprintln(color.Output, tbl)
}
return nil
}
|
#!/usr/bin/env bats
source "${BATS_TEST_DIRNAME}/test_helpers.sh"
@test "It should install mongod 3.4.17" {
run mongod --version
[[ "$output" =~ "db version v3.4.17" ]]
}
|
#!/bin/bash
echo "Environment Variables"
echo "http://packer.io"
|
#!/bin/bash
CXX=${CXX:-clang++}
for x in *.cpp renik/*.cpp; do
echo $CXX $x
$CXX -std=c++11 -Ipolyfill -Iglm -c $x -o /dev/null -Wno-return-type -Wno-writable-strings -Wno-macro-redefined -fno-exceptions -Wno-logical-op-parentheses
done
|
#!/usr/bin/env bash
# Copyright 2021, Oath Inc.
# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms
# Bootstrap python
export PYTHON_BOOTSTRAP_SKIP_SCREWDRIVERCD="True"
sd-cmd exec python-2104/python_bootstrap@pre
. /tmp/python_bootstrap.env
if [ "${GEN_REQUIREMENTS_SRC_FILES}" = "" ]; then
if [ -e "requirements.in" ]; then
GEN_REQUIREMENTS_SRC_FILES="requirements.in"
fi
fi
${BASE_PYTHON_BIN}/pypirun pip-tools pip-compile -o requirements.txt --generate-hashes --allow-unsafe ${GEN_REQUIREMENTS_SRC_FILES}
mkdir -p "${SD_ARTIFACTS_DIR}/config"
cp requirements.txt "${SD_ARTIFACTS_DIR}/config"
|
#if defined(TEMPEST_BUILD_DIRECTX12)
#include "dxframebuffer.h"
#include "dxdevice.h"
#include "dxswapchain.h"
#include "dxtexture.h"
#include "guid.h"
using namespace Tempest;
using namespace Tempest::Detail;
TextureLayout DxFramebuffer::Attach::defaultLayout() {
if(isSwImage)
return TextureLayout::Present;
if(Detail::nativeIsDepthFormat(format))
return TextureLayout::DepthAttach; // no readable depth for now
return TextureLayout::Sampler;
}
TextureLayout DxFramebuffer::Attach::renderLayout() {
if(isSwImage)
return TextureLayout::ColorAttach;
if(Detail::nativeIsDepthFormat(format))
return TextureLayout::DepthAttach;
return TextureLayout::ColorAttach;
}
void* DxFramebuffer::Attach::nativeHandle() {
return res;
}
DxFramebuffer::DxFramebuffer(DxDevice& dev, DxFboLayout& lay, uint32_t cnt,
DxSwapchain** swapchain, DxTexture** cl, const uint32_t* imgId, DxTexture* zbuf)
:viewsCount(cnt), lay(&lay) {
ID3D12Resource* res[256] = {};
for(size_t i=0; i<cnt; ++i) {
if(cl[i]==nullptr) {
res[i] = swapchain[i]->views[imgId[i]].get();
} else {
res[i] = cl[i]->impl.get();
}
}
setupViews(*dev.device, res, cnt, zbuf==nullptr ? nullptr : zbuf->impl.get());
for(size_t i=0; i<cnt; ++i)
if(cl[i]==nullptr)
views[i].isSwImage = true;
}
DxFramebuffer::~DxFramebuffer() {
}
void DxFramebuffer::setupViews(ID3D12Device& device,
ID3D12Resource** res, size_t cnt, ID3D12Resource* ds) {
// descriptor heap
D3D12_DESCRIPTOR_HEAP_DESC rtvHeapDesc = {};
rtvHeapDesc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_RTV;
rtvHeapDesc.NumDescriptors = UINT(cnt);
rtvHeapDesc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
dxAssert(device.CreateDescriptorHeap(&rtvHeapDesc, uuid<ID3D12DescriptorHeap>(), reinterpret_cast<void**>(&rtvHeap)));
// frame resources.
views.reset(new Attach[viewsCount]);
for(size_t i=0;i<viewsCount;++i) {
views[i].res = res[i];
auto desc = views[i].res->GetDesc();
views[i].format = desc.Format;
}
if(ds!=nullptr) {
D3D12_DESCRIPTOR_HEAP_DESC dsHeapDesc = {};
dsHeapDesc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_DSV;
dsHeapDesc.NumDescriptors = 1;//UINT(cnt + (ds==nullptr ? 0 : 1));
dsHeapDesc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
dxAssert(device.CreateDescriptorHeap(&dsHeapDesc, uuid<ID3D12DescriptorHeap>(), reinterpret_cast<void**>(&dsvHeap)));
auto desc = ds->GetDesc();
depth.res = ds;
depth.format = desc.Format;
}
D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = rtvHeap->GetCPUDescriptorHandleForHeapStart();
rtvHeapInc = device.GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_RTV);
for(size_t i=0;i<viewsCount;++i) {
device.CreateRenderTargetView(views[i].res, nullptr, rtvHandle);
rtvHandle.ptr+=rtvHeapInc;
}
if(ds!=nullptr) {
D3D12_CPU_DESCRIPTOR_HANDLE dsvHandle = dsvHeap->GetCPUDescriptorHandleForHeapStart();
device.CreateDepthStencilView(depth.res, nullptr, dsvHandle);
}
}
#endif
|
#!/bin/bash
VER=$1
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
WORKROOT=$DIR/work-plugin-sql-server
WORK=$WORKROOT/Plugins/SqlServer
ROOT=$DIR/../..
BUILD=$ROOT/VirtualRadar/bin/x86/Release/Plugins/SqlServer
OUTPUT=$DIR/output
if [ -d $WORKROOT ]; then
rm -r $WORKROOT;
fi
if [ ! -d $OUTPUT ]; then
mkdir $OUTPUT;
fi
mkdir -p $WORK
cp -r $BUILD/* $WORK
cd $WORKROOT
tar -czf $OUTPUT/Plugin-SqlServer-$VER.tar.gz *
echo Created $OUTPUT/Plugin-SqlServer-$VER.tar.gz
|
<filename>src/test/java/models/SiteTest.java<gh_stars>0
package models;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.time.LocalDateTime;
import static org.junit.Assert.*;
public class SiteTest {
@Before
public void setUp() throws Exception {
}
@After
public void tearDown() throws Exception {
}
@Test
public void NewSiteObjectGetsCorrectlyCreated_true() throws Exception {
Site task = setupNewSite();
assertEquals(true, task instanceof Site);
}
@Test
public void SiteInstantiatesWithDescription_true() throws Exception {
Site site = setupNewSite();
assertEquals("Nairobi Gospel", site.getDescription());
}
@Test
public void isCompletedPropertyIsFalseAfterInstantiation() throws Exception {
Site site = setupNewSite();
assertEquals(false, site.getCompleted()); //should never start as completed
}
@Test
public void getCreatedAtInstantiatesWithCurrentTimeToday() throws Exception {
Site site = setupNewSite();
assertEquals(LocalDateTime.now().getDayOfWeek(), site.getCreatedAt().getDayOfWeek());
}
//helper methods
public Site setupNewSite(){
return new Site("Nairobi Gospel", 1);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.