text stringlengths 1 1.05M |
|---|
sudo apt update && sudo apt upgrade -y
sudo apt install git curl wget python3
clear |
SELECT DISTINCT name
FROM student
WHERE name IN (SELECT student_name FROM student_course); |
import { isDefined } from '@collectable/core';
import { SortedMapStructure, getFirstItem } from '../internals';
export function firstKey<K, V, U> (map: SortedMapStructure<K, V, U>): K|undefined {
const item = getFirstItem(map._sorted);
return isDefined(item) ? item[0] : void 0;
}
|
<reponame>privy-open-source/gitenak
import { FileStatusResult } from 'simple-git'
import {
blue,
green,
grey,
magenta,
red,
yellow,
} from 'kleur'
export function formatTitle (title: string): string {
return title.replace(/\[.*]\s*:?\s/, '').trim()
}
export function validURL (string: string): boolean {
const pattern = new RegExp('^(https?:\\/\\/)?' // protocol
+ '((([a-z\\d]([a-z\\d-]*[a-z\\d])*)\\.)+[a-z]{2,}|' // domain name
+ '((\\d{1,3}\\.){3}\\d{1,3}))' // OR ip (v4) address
+ '(\\:\\d+)?(\\/[-a-z\\d%_.~+]*)*' // port and path
+ '(\\?[;&a-z\\d%_.~+=-]*)?' // query string
+ '(\\#[-a-z\\d_]*)?$', 'i') // fragment locator
return !!pattern.test(string)
}
export function renderLabel (labels: string[]): string {
return labels.map((label) => {
switch (label) {
case 'High':
case 'Bug':
return red(`[${label}]`)
case 'To Do':
return yellow(`[${label}]`)
case 'Doing':
case 'Feature':
return green(`[${label}]`)
case 'QA':
case 'In Review':
case 'Revise':
return blue(`[${label}]`)
case 'Undeployed':
return magenta(`[${label}]`)
case 'Low':
return grey(`[${label}]`)
default:
return `[${label}]`
}
}).join('')
}
export function renderStatus (file: FileStatusResult): string {
// ' ' = unmodified
// M = modified
// A = added
// D = deleted
// R = renamed
// C = copied
// U = updated but unmerged
switch (file.working_dir) {
case '?':
case ' ':
case 'A':
return green('[NEW]')
case 'M':
return yellow('[MODIFIED]')
case 'D':
return red('[DELETED]')
case 'R':
return blue('[RENAMED]')
case 'C':
return yellow('[COPIED]')
default:
return `[${file.working_dir}]`
}
}
export function renderWorflow (workflow: string): string {
if (workflow === 'hotfix')
return red(workflow)
if (workflow === 'feature')
return green(workflow)
if (workflow === 'bugfix')
return yellow(workflow)
return workflow
}
|
def convert_to_human_name(name: str) -> str:
out = name[0] # Start with the first character as is
for char in name[1:]:
if char.isupper():
out += " %s" % char.lower() # Add a space and the lowercase version of the uppercase character
else:
out += char # Add the character as is
return out |
package com.testproj.sportapp.service;
import com.testproj.sportapp.model.UserMeal;
import com.testproj.sportapp.repository.UserMealRepository;
import com.testproj.sportapp.util.exception.ExceptionUtil;
import java.time.LocalDateTime;
import java.util.Collection;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@Service
public class UserMealServiceImpl implements UserMealService {
@Autowired
private UserMealRepository repository;
@Override
public UserMeal get(int id, int userId) {
return ExceptionUtil.check(repository.get(id, userId), id);
}
@Override
public void delete(int id, int userId) {
ExceptionUtil.check(repository.delete(id, userId), id);
}
@Override
public Collection<UserMeal> getBetweenDateTimes(LocalDateTime startDateTime, LocalDateTime endDateTime, int userId) {
return repository.getBetween(startDateTime, endDateTime, userId);
}
@Override
public Collection<UserMeal> getAll(int userId) {
return repository.getAll(userId);
}
@Override
public UserMeal update(UserMeal meal, int userId) {
return ExceptionUtil.check(repository.save(meal, userId), meal.getId());
}
@Override
public UserMeal save(UserMeal meal, int userId) {
return repository.save(meal, userId);
}
}
|
#!/bin/bash
#
# Copyright 2014-2016 The MathWorks, Inc.
ARCHIVE="$1"
CATKIN_WS="$2"
catkinWorkspaceHelp() {
echo ""
echo "You can create a Catkin workspace as follows:"
echo " mkdir -p ~/catkin_ws/src"
echo " cd ~/catkin_ws/src"
echo " catkin_init_workspace"
}
commandUsage() {
echo "Usage: $(basename $0) ARCHIVE_NAME... CATKIN_WS..."
echo "Extract and build a C++ ROS node generated from a Simulink model."
echo "ARCHIVE_NAME is the name of the TGZ file generated from the Simulink model."
echo "CATKIN_WS is the full path to your ROS Catkin workspace."
echo ""
echo "Example:"
echo " ./$(basename $0) simulinkmodel.tgz ~/catkin_ws"
}
fullUsage() {
commandUsage
catkinWorkspaceHelp
exit
}
toLowerCase() {
echo $1 | tr '[A-Z]' '[a-z]'
}
if [ -z "$1" ] || ([ ! -z "$1" ] && [ "$1" = "-h" ] || [ "$1" = "--help" ]) ; then
fullUsage
exit 0
fi
if [ ! $# -eq 2 ] ; then
echo "Expected two input arguments. Got $#."
fullUsage
exit 1
fi
# Check Catkin workspace
if [ ! -d "$CATKIN_WS" ] ; then
echo "The catkin workspace directory, "$CATKIN_WS", does not exist."
echo "Enter a valid catkin workspace directory."
catkinWorkspaceHelp
exit 1
fi
# Sanity check for CATKIN workspace
if [ ! -f "$CATKIN_WS"/src/CMakeLists.txt ] ; then
echo "The Catkin workspace directory, "$CATKIN_WS", is not a valid Catkin workspace."
echo "Enter a valid Catkin workspace directory."
catkinWorkspaceHelp
exit 1
fi
# Check Simulink archive
if [ ! -f "$ARCHIVE" ] ; then
echo "The archive, "$ARCHIVE", does not exist."
echo "Enter a valid Simulink model archive (.tgz file)."
echo ""
commandUsage
exit 1
fi
# Enforce that $ARCHIVE ends with .tgz, since the model
# name is derived by stripping off the .tgz extension
if [ ${ARCHIVE: -4} != ".tgz" ] ; then
echo "The archive, "$ARCHIVE", does not have a .tgz extension."
echo "Enter a valid Simulink model archive (.tgz file)."
echo ""
commandUsage
exit 1
fi
# Check if $ARCHIVE is a valid zip file
gzip -t "$ARCHIVE" 2> /dev/null
VALID_ZIP=$?
if [ $VALID_ZIP -ne 0 ] ; then
echo "The archive, "$ARCHIVE", is not a valid .tgz (tar zip) file."
echo ""
commandUsage
exit 1
fi
# Check for one of the standard files generated from Simulink
# (ert_main.cpp)
tar ztf "$ARCHIVE" | grep -q ert_main.cpp 2> /dev/null
VALID_SIMULINK_ARCHIVE=$?
if [ $VALID_SIMULINK_ARCHIVE -ne 0 ] ; then
echo "The archive, "$ARCHIVE", is not a valid Simulink model archive (.tgz file)."
echo ""
commandUsage
exit 1
fi
# $ARCHIVE appears to be valid.
# Extract and build it
MODEL_NAME=$(toLowerCase $(basename "$ARCHIVE" .tgz))
PROJECT_DIR="$CATKIN_WS/src/$MODEL_NAME"
echo "Catkin project directory: $PROJECT_DIR"
# Extract files to catkin project directory
mkdir -p "$PROJECT_DIR"
rm -fr "$PROJECT_DIR"/*
tar -C "$PROJECT_DIR" -xf "$ARCHIVE"
# Ensure that catkin_make will rebuild the executable
touch "$PROJECT_DIR"/*.cpp
# Build the Simulink model as a catkin project
CURR_DIR=`pwd`
cd "$CATKIN_WS"
catkin_make "$MODEL_NAME"_node
cd "$CURR_DIR"
exit 0
|
/*
* Copyright 2014 Higher Frequency Trading
*
* http://www.higherfrequencytrading.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package demo;
import net.openhft.lang.io.Bytes;
import net.openhft.lang.io.serialization.BytesMarshallable;
import net.openhft.lang.model.constraints.NotNull;
/**
* A simple container class representing a Price.
* Note that it must be Serializable so that it can be stored as an object in Chronicle.
*/
public class Price implements BytesMarshallable {
public String symbol;
public double bidPrice, askPrice;
public long bidQuantity, askQuantity;
public boolean confirmed;
public Price() {
}
public Price(String symbol, double bidPrice, long bidQuantity,double askPrice, long askQuantity, boolean confirmed) {
this.symbol = symbol;
this.bidPrice = bidPrice;
this.askPrice = askPrice;
this.bidQuantity = bidQuantity;
this.askQuantity = askQuantity;
this.confirmed = confirmed;
}
@Override
public void writeMarshallable(@NotNull Bytes out) {
out.writeEnum(symbol);
out.writeCompactDouble(bidPrice);
out.writeCompactDouble(askPrice);
out.writeCompactLong(bidQuantity);
out.writeCompactLong(askQuantity);
out.writeBoolean(confirmed);
}
@Override
public void readMarshallable(@NotNull Bytes in) throws IllegalStateException {
symbol = in.readEnum(String.class);
bidPrice = in.readCompactDouble();
askPrice = in.readCompactDouble();
bidQuantity = in.readCompactLong();
askQuantity = in.readCompactLong();
confirmed = in.readBoolean();
}
public String getSymbol() {
return symbol;
}
public void setSymbol(String symbol) {
this.symbol = symbol;
}
public double getBidPrice() {
return bidPrice;
}
public void setBidPrice(double bidPrice) {
this.bidPrice = bidPrice;
}
public double getAskPrice() {
return askPrice;
}
public void setAskPrice(double askPrice) {
this.askPrice = askPrice;
}
public long getBidQuantity() {
return bidQuantity;
}
public void setBidQuantity(long bidQuantity) {
this.bidQuantity = bidQuantity;
}
public long getAskQuantity() {
return askQuantity;
}
public void setAskQuantity(long askQuantity) {
this.askQuantity = askQuantity;
}
public boolean isConfirmed() {
return confirmed;
}
public void setConfirmed(boolean confirmed) {
this.confirmed = confirmed;
}
} |
<reponame>RusPosevkin/blockchain-server<gh_stars>0
module.exports = async function get (req, res) {
const StarBlockchain = require('../../../classes/star-blockchain');
// remove ':' symbol
const hash = req.param('hash').slice(1);
const starBlockchain = new StarBlockchain();
starBlockchain.getBlockByHash(hash)
.then((block) => {
if (block) {
return res.json(block);
}
starBlockchain.getBlockByHeight(-1, res)
.then(block => res.json(block))
.catch(() => res.status(400).json({
reason: 'Bad request',
details: 'Block was not found'
}));
});
} |
#!/usr/bin/env bash
cqlsh -f ${CQL_FILE} ${CASSANDRA_HOST}
|
<reponame>ardianchen/node-with-squelize
import jwt from 'jsonwebtoken'
const key = process.env.TOKEN_KEY.toString()
const exToken = process.env.EX_TOKEN.toString()
const verify = async (token) => new Promise((resolve, reject) => {
jwt.verify(token, key, (err, decode) => {
if (err) {
reject(new Error(err.message))
} else {
const userData = {
_id: decode._id,
username: decode.username,
email: decode.email,
authority: decode.authority,
approved: decode.approved,
active: decode.active
}
resolve(userData)
}
})
})
const create = async (doc) => new Promise((resolve, reject) => {
jwt.sign(doc, key, { expiresIn: exToken }, (err, token) => {
if (err) {
reject(new Error(err.message))
} else {
resolve(token)
}
})
})
export default {
verify,
create
}
|
<gh_stars>0
package org.rs2server.rs2.event.impl;
import org.rs2server.rs2.event.Event;
import org.rs2server.rs2.model.World;
public class PlayersOnlineTick extends Event {
public PlayersOnlineTick() {
super(120000);
}
@Override
public void execute() {
World.getWorld().sendWorldMessage("<img=32><col=42a4f4>News: There are " + World.getWorld().getPlayers().size() + " online.");
}
}
|
#!/bin/bash
until nc -z ${RABBIT_HOST} ${RABBIT_PORT}; do
echo "$(date) - INFO - Waiting for ${RABBIT_HOST}:${RABBIT_PORT}"
sleep 3
done
# Run Service
nameko run --config config.yml offender.service
|
__version__ = '3.4.3' |
#!/bin/sh
deleteLocalTag() {
command="git tag -d $1"
echo "Executing ---- ${command}"
eval "$command"
}
deleteRemoteTag() {
command="git push --delete origin $1"
echo "Executing ---- ${command}"
eval "$command"
deleteLocalTag $1
}
createLocalTag() {
command="git tag $1"
echo "Executing ---- ${command}"
eval "$command"
}
createRemoteTag() {
command="git push origin $1"
echo "Executing ---- ${command}"
eval "$command"
if [ $? -eq 0 ]; then
echo "Tag created remotely"
else
echo "Failed to create tag remotely"
deleteLocalTag $1
fi
}
podLibLint() {
command="pod lib lint $1 --allow-warnings --verbose"
echo "Executing ---- ${command}"
eval "$command"
}
podTrunkPush() {
command="pod trunk push $1 --allow-warnings --verbose"
echo "Executing ---- ${command}"
eval "$command"
if [ $? -eq 0 ]; then
echo "Pod trunk successfully"
else
echo "Pod trunk failed"
deleteRemoteTag $2
fi
}
podName="PayUIndia-CrashReporter"
podVersion="1.0.0"
podSpec="${podName}.podspec"
tag="${podName}_${podVersion}"
podLibLint ${podSpec} &&
createLocalTag ${tag} &&
createRemoteTag ${tag} &&
podTrunkPush ${podSpec} ${tag}
|
# Necessary imports
from django.db import models
import mptt.fields
# Model definition for the Category model
class Category(models.Model):
name = models.CharField(max_length=100, verbose_name='Name')
parent = mptt.fields.TreeForeignKey(
blank=True,
default='',
on_delete=models.deletion.CASCADE,
related_name='works',
to='article.Category',
verbose_name='Parent Category'
)
# Method to retrieve all articles associated with a specific category
def get_associated_articles(self):
return Article.objects.filter(category=self)
# Model definition for the Article model (assuming it exists)
class Article(models.Model):
title = models.CharField(max_length=100, verbose_name='Title')
category = mptt.fields.TreeForeignKey(
blank=True,
default='',
on_delete=models.deletion.CASCADE,
related_name='works',
to='article.Category',
verbose_name='Category'
)
content = models.TextField(verbose_name='Content') |
import torch
import math
from copy import deepcopy
from six import string_types
def eval_func(f, x):
if isinstance(f, string_types):
f = eval(f)
return f(x)
class Regime(object):
"""
Examples for regime:
1) "[{'epoch': 0, 'optimizer': 'Adam', 'lr': 1e-3},
{'epoch': 2, 'optimizer': 'Adam', 'lr': 5e-4},
{'epoch': 4, 'optimizer': 'Adam', 'lr': 1e-4},
{'epoch': 8, 'optimizer': 'Adam', 'lr': 5e-5}
]"
2)
"[{'step_lambda':
"lambda t: {
'optimizer': 'Adam',
'lr': 0.1 * min(t ** -0.5, t * 4000 ** -1.5),
'betas': (0.9, 0.98), 'eps':1e-9}
}]"
"""
def __init__(self, regime, defaults={}):
self.regime = regime
self.defaults = defaults
self.reset(regime, defaults)
def reset(self, regime=None, defaults=None):
if regime is not None:
self.regime = regime
if defaults is not None:
self.defaults = defaults
self.current_regime_phase = None
self.setting = self.defaults
def update(self, epoch=None, train_steps=None):
"""adjusts according to current epoch or steps and regime.
"""
if self.regime is None:
return False
epoch = -1 if epoch is None else epoch
train_steps = -1 if train_steps is None else train_steps
setting = deepcopy(self.setting)
if self.current_regime_phase is None:
# Find the first entry where the epoch is smallest than current
for regime_phase, regime_setting in enumerate(self.regime):
start_epoch = regime_setting.get('epoch', 0)
start_step = regime_setting.get('step', 0)
if epoch >= start_epoch or train_steps >= start_step:
self.current_regime_phase = regime_phase
break
# each entry is updated from previous
setting.update(regime_setting)
if len(self.regime) > self.current_regime_phase + 1:
next_phase = self.current_regime_phase + 1
# Any more regime steps?
start_epoch = self.regime[next_phase].get('epoch', float('inf'))
start_step = self.regime[next_phase].get('step', float('inf'))
if epoch >= start_epoch or train_steps >= start_step:
self.current_regime_phase = next_phase
setting.update(self.regime[self.current_regime_phase])
if 'lr_decay_rate' in setting and 'lr' in setting:
decay_steps = setting.pop('lr_decay_steps', 100)
if train_steps % decay_steps == 0:
decay_rate = setting.pop('lr_decay_rate')
setting['lr'] *= decay_rate ** (train_steps / decay_steps)
elif 'step_lambda' in setting:
setting.update(eval_func(setting.pop('step_lambda'), train_steps))
elif 'epoch_lambda' in setting:
setting.update(eval_func(setting.pop('epoch_lambda'), epoch))
if 'execute' in setting:
setting.pop('execute')()
if 'execute_once' in setting:
setting.pop('execute_once')()
# remove from regime, so won't happen again
self.regime[self.current_regime_phase].pop('execute_once', None)
if setting == self.setting:
return False
else:
self.setting = setting
return True
def __repr__(self):
return 'Current: %s\n Regime:%s' % (self.setting, self.regime)
|
export CONN_CONFIG=openstack-config01
export SG_NAME=sg-01
./securitygroup-unregister.sh
|
#!/usr/bin/env bash
# stops the execution if a command or pipeline has an error
set -euxo pipefail
# Tinkerbell stack Linux setup script
#
# See https://tinkerbell.org/setup for the installation steps.
# file to hold all environment variables
ENV_FILE=.env
SCRATCH=$(mktemp -d -t tmp.XXXXXXXXXX)
readonly SCRATCH
function finish() (
rm -rf "$SCRATCH"
)
trap finish EXIT
DEPLOYDIR=$(pwd)/deploy
readonly DEPLOYDIR
readonly STATEDIR=$DEPLOYDIR/state
if command -v tput >/dev/null && tput setaf 1 >/dev/null 2>&1; then
# color codes
RED="$(tput setaf 1)"
GREEN="$(tput setaf 2)"
YELLOW="$(tput setaf 3)"
RESET="$(tput sgr0)"
fi
INFO="${GREEN:-}INFO:${RESET:-}"
ERR="${RED:-}ERROR:${RESET:-}"
WARN="${YELLOW:-}WARNING:${RESET:-}"
BLANK=" "
NEXT="${GREEN:-}NEXT:${RESET:-}"
get_distribution() (
local lsb_dist=""
# Every system that we officially support has /etc/os-release
if [[ -r /etc/os-release ]]; then
# shellcheck disable=SC1091
lsb_dist="$(. /etc/os-release && echo "$ID")"
fi
# Returning an empty string here should be alright since the
# case statements don't act unless you provide an actual value
echo "$lsb_dist" | tr '[:upper:]' '[:lower:]'
)
get_distro_version() (
local lsb_version="0"
# Every system that we officially support has /etc/os-release
if [[ -r /etc/os-release ]]; then
# shellcheck disable=SC1091
lsb_version="$(. /etc/os-release && echo "$VERSION_ID")"
fi
echo "$lsb_version"
)
is_network_configured() (
# Require the provisioner interface have the host IP
if ! ip addr show "$TINKERBELL_NETWORK_INTERFACE" |
grep -q "$TINKERBELL_HOST_IP"; then
return 1
fi
return 0
)
identify_network_strategy() (
local distro=$1
local version=$2
case "$distro" in
ubuntu)
if jq -n --exit-status '$distro_version >= 17.10' --argjson distro_version "$version" >/dev/null 2>&1; then
echo "setup_networking_netplan"
else
echo "setup_networking_ubuntu_legacy"
fi
;;
centos)
echo "setup_networking_centos"
;;
*)
echo "setup_networking_manually"
;;
esac
)
setup_networking() (
local distro=$1
local version=$2
setup_network_forwarding
if is_network_configured; then
echo "$INFO tinkerbell network interface is already configured"
return 0
fi
local strategy
strategy=$(identify_network_strategy "$distro" "$version")
"${strategy}" "$distro" "$version" # execute the strategy
if is_network_configured; then
echo "$INFO tinkerbell network interface configured successfully"
else
echo "$ERR tinkerbell network interface configuration failed"
fi
NAT_INTERFACE=""
if [[ -r .nat_interface ]]; then
NAT_INTERFACE=$(cat .nat_interface)
fi
if [[ -n $NAT_INTERFACE ]] && ip addr show "$NAT_INTERFACE" &>/dev/null; then
# TODO(nshalman) the terraform code would just run these commands as-is once
# but it would be nice to make these more persistent based on OS
iptables -A FORWARD -i "$TINKERBELL_NETWORK_INTERFACE" -o "$NAT_INTERFACE" -j ACCEPT
iptables -A FORWARD -i "$NAT_INTERFACE" -o "$TINKERBELL_NETWORK_INTERFACE" -m state --state ESTABLISHED,RELATED -j ACCEPT
iptables -t nat -A POSTROUTING -o "$NAT_INTERFACE" -j MASQUERADE
fi
)
setup_networking_manually() (
local distro=$1
local version=$2
echo "$ERR this setup script cannot configure $distro ($version)"
echo "$BLANK please read this script's source and configure it manually."
exit 1
)
setup_network_forwarding() (
# enable IP forwarding for docker
if (($(sysctl -n net.ipv4.ip_forward) != 1)); then
if [[ -d /etc/sysctl.d ]]; then
echo "net.ipv4.ip_forward=1" >/etc/sysctl.d/99-tinkerbell.conf
elif [[ -f /etc/sysctl.conf ]]; then
echo "net.ipv4.ip_forward=1" >>/etc/sysctl.conf
fi
sysctl net.ipv4.ip_forward=1
fi
)
setup_networking_netplan() (
jq -n \
--arg interface "$TINKERBELL_NETWORK_INTERFACE" \
--arg cidr "$TINKERBELL_CIDR" \
--arg host_ip "$TINKERBELL_HOST_IP" \
'{
network: {
renderer: "networkd",
ethernets: {
($interface): {
addresses: [
"\($host_ip)/\($cidr)"
]
}
}
}
}' >"/etc/netplan/${TINKERBELL_NETWORK_INTERFACE}.yaml"
ip link set "$TINKERBELL_NETWORK_INTERFACE" nomaster
netplan apply
echo "$INFO waiting for the network configuration to be applied by systemd-networkd"
sleep 3
)
setup_networking_ubuntu_legacy() (
if ! [[ -f /etc/network/interfaces ]]; then
echo "$ERR file /etc/network/interfaces not found"
exit 1
fi
if grep -q "$TINKERBELL_NETWORK_INTERFACE" /etc/network/interfaces; then
echo "$ERR /etc/network/interfaces already has an entry for $TINKERBELL_NETWORK_INTERFACE."
echo "$BLANK To prevent breaking your network, please edit /etc/network/interfaces"
echo "$BLANK and configure $TINKERBELL_NETWORK_INTERFACE as follows:"
generate_iface_config
echo ""
echo "$BLANK Then run the following commands:"
echo "$BLANK ip link set $TINKERBELL_NETWORK_INTERFACE nomaster"
echo "$BLANK ifdown $TINKERBELL_NETWORK_INTERFACE"
echo "$BLANK ifup $TINKERBELL_NETWORK_INTERFACE"
exit 1
else
generate_iface_config >>/etc/network/interfaces
ip link set "$TINKERBELL_NETWORK_INTERFACE" nomaster
ifdown "$TINKERBELL_NETWORK_INTERFACE"
ifup "$TINKERBELL_NETWORK_INTERFACE"
fi
)
generate_iface_config() (
cat <<EOF
auto $TINKERBELL_NETWORK_INTERFACE
iface $TINKERBELL_NETWORK_INTERFACE inet static
address $TINKERBELL_HOST_IP/$TINKERBELL_CIDR
pre-up sleep 4
EOF
)
setup_networking_centos() (
local HWADDRESS
local content
HWADDRESS=$(ip addr show "$TINKERBELL_NETWORK_INTERFACE" | grep ether | awk -F 'ether' '{print $2}' | cut -d" " -f2)
content=$(
cat <<EOF
DEVICE=$TINKERBELL_NETWORK_INTERFACE
ONBOOT=yes
HWADDR=$HWADDRESS
BOOTPROTO=static
IPADDR=$TINKERBELL_HOST_IP
PREFIX=$TINKERBELL_CIDR
EOF
)
local cfgfile="/etc/sysconfig/network-scripts/ifcfg-$TINKERBELL_NETWORK_INTERFACE"
if [[ -f $cfgfile ]]; then
echo "$ERR network config already exists: $cfgfile"
echo "$BLANK Please update it to match this configuration:"
echo "$content"
echo ""
echo "$BLANK Then, run the following commands:"
echo "ip link set $TINKERBELL_NETWORK_INTERFACE nomaster"
echo "ifup $TINKERBELL_NETWORK_INTERFACE"
fi
echo "$content" >"$cfgfile"
ip link set "$TINKERBELL_NETWORK_INTERFACE" nomaster
ifup "$TINKERBELL_NETWORK_INTERFACE"
)
setup_osie() (
mkdir -p "$STATEDIR/webroot"
local osie_current=$STATEDIR/webroot/misc/osie/current
local tink_workflow=$STATEDIR/webroot/workflow/
if [[ ! -d $osie_current ]] || [[ ! -d $tink_workflow ]]; then
mkdir -p "$osie_current"
mkdir -p "$tink_workflow"
pushd "$SCRATCH"
if [[ -z ${TB_OSIE_TAR:-} ]]; then
curl "${OSIE_DOWNLOAD_LINK}" -o ./osie.tar.gz
tar -zxf osie.tar.gz
else
tar -zxf "$TB_OSIE_TAR"
fi
if pushd osie*/; then
if mv workflow-helper.sh workflow-helper-rc "$tink_workflow"; then
cp -r ./* "$osie_current"
else
echo "$ERR failed to move 'workflow-helper.sh' and 'workflow-helper-rc'"
exit 1
fi
popd
fi
else
echo "$INFO found existing osie files, skipping osie setup"
fi
)
check_container_status() (
local container_name="$1"
local container_id
container_id=$(docker-compose -f "$DEPLOYDIR/docker-compose.yml" ps -q "$container_name")
local start_moment
local current_status
start_moment=$(docker inspect "${container_id}" --format '{{ .State.StartedAt }}')
current_status=$(docker inspect "${container_id}" --format '{{ .State.Health.Status }}')
case "$current_status" in
starting)
: # move on to the events check
;;
healthy)
return 0
;;
unhealthy)
echo "$ERR $container_name is already running but not healthy. status: $current_status"
exit 1
;;
*)
echo "$ERR $container_name is already running but its state is a mystery. status: $current_status"
exit 1
;;
esac
local status
read -r status < <(docker events \
--since "$start_moment" \
--filter "container=$container_id" \
--filter "event=health_status" \
--format '{{.Status}}')
if [[ $status != "health_status: healthy" ]]; then
echo "$ERR $container_name is not healthy. status: $status"
exit 1
fi
)
generate_certificates() (
mkdir -p "$STATEDIR/certs"
if ! [[ -f "$STATEDIR/certs/ca.json" ]]; then
jq \
'.
| .names[0].L = $facility
' \
"$DEPLOYDIR/tls/ca.in.json" \
--arg ip "$TINKERBELL_HOST_IP" \
--arg facility "$FACILITY" \
>"$STATEDIR/certs/ca.json"
fi
if ! [[ -f "$STATEDIR/certs/server-csr.json" ]]; then
jq \
'.
| .hosts += [ $ip, "tinkerbell.\($facility).packet.net" ]
| .names[0].L = $facility
| .hosts = (.hosts | sort | unique)
' \
"$DEPLOYDIR/tls/server-csr.in.json" \
--arg ip "$TINKERBELL_HOST_IP" \
--arg facility "$FACILITY" \
>"$STATEDIR/certs/server-csr.json"
fi
docker build --tag "tinkerbell-certs" "$DEPLOYDIR/tls"
docker run --rm \
--volume "$STATEDIR/certs:/certs" \
--user "$UID:$(id -g)" \
tinkerbell-certs
local certs_dir="/etc/docker/certs.d/$TINKERBELL_HOST_IP"
# copy public key to NGINX for workers
if ! cmp --quiet "$STATEDIR/certs/ca.pem" "$STATEDIR/webroot/workflow/ca.pem"; then
cp "$STATEDIR/certs/ca.pem" "$STATEDIR/webroot/workflow/ca.pem"
fi
# update host to trust registry certificate
if ! cmp --quiet "$STATEDIR/certs/ca.pem" "$certs_dir/tinkerbell.crt"; then
if ! [[ -d "$certs_dir/" ]]; then
# The user will be told to create the directory
# in the next block, if copying the certs there
# fails.
mkdir -p "$certs_dir" || true >/dev/null 2>&1
fi
if ! cp "$STATEDIR/certs/ca.pem" "$certs_dir/tinkerbell.crt"; then
echo "$ERR please copy $STATEDIR/certs/ca.pem to $certs_dir/tinkerbell.crt"
echo "$BLANK and run $0 again:"
if ! [[ -d $certs_dir ]]; then
echo "sudo mkdir -p '$certs_dir'"
fi
echo "sudo cp '$STATEDIR/certs/ca.pem' '$certs_dir/tinkerbell.crt'"
exit 1
fi
fi
)
docker_login() (
echo -n "$TINKERBELL_REGISTRY_PASSWORD" | docker login -u="$TINKERBELL_REGISTRY_USERNAME" --password-stdin "$TINKERBELL_HOST_IP"
)
# This function takes an image specified as first parameter and it tags and
# push it using the second one. useful to proxy images from a repository to
# another.
docker_mirror_image() (
local from=$1
local to=$2
docker pull "$from"
docker tag "$from" "$to"
docker push "$to"
)
start_registry() (
docker-compose -f "$DEPLOYDIR/docker-compose.yml" up --build -d registry
check_container_status "registry"
)
# This function supposes that the registry is up and running.
# It configures with the required dependencies.
bootstrap_docker_registry() (
docker_login
# osie looks for tink-worker:latest, so we have to play with it a bit
# https://github.com/tinkerbell/osie/blob/master/apps/workflow-helper.sh#L66
docker_mirror_image "${TINKERBELL_TINK_WORKER_IMAGE}" "${TINKERBELL_HOST_IP}/tink-worker:latest"
)
setup_docker_registry() (
local registry_images="$STATEDIR/registry"
if ! [[ -d $registry_images ]]; then
mkdir -p "$registry_images"
fi
start_registry
bootstrap_docker_registry
)
start_components() (
local components=(db hegel tink-server boots tink-cli nginx)
for comp in "${components[@]}"; do
docker-compose -f "$DEPLOYDIR/docker-compose.yml" up --build -d "$comp"
sleep 3
check_container_status "$comp"
done
)
command_exists() (
command -v "$@" >/dev/null 2>&1
)
check_command() (
if ! command_exists "$1"; then
echo "$ERR Prerequisite executable command not found: $1"
return 1
fi
if ! [[ -s "$(which "$1")" ]]; then
echo "$ERR Prerequisite command is an empty file: $1"
fi
echo "$BLANK Found prerequisite: $1"
return 0
)
check_prerequisites() (
distro=$1
version=$2
echo "$INFO verifying prerequisites for $distro ($version)"
failed=0
check_command docker || failed=1
check_command docker-compose || failed=1
check_command ip || failed=1
check_command jq || failed=1
strategy=$(identify_network_strategy "$distro" "$version")
case "$strategy" in
"setup_networking_netplan")
check_command netplan || failed=1
;;
"setup_networking_ubuntu_legacy")
check_command ifdown || failed=1
check_command ifup || failed=1
;;
"setup_networking_centos")
check_command ifdown || failed=1
check_command ifup || failed=1
;;
"setup_networking_manually")
echo "$WARN this script cannot automatically configure your network."
;;
*)
echo "$ERR bug: unhandled network strategy: $strategy"
exit 1
;;
esac
if ((failed == 1)); then
echo "$ERR Prerequisites not met. Please install the missing commands and re-run $0."
exit 1
fi
)
whats_next() (
echo "$NEXT 1. Enter /deploy and run: source ../.env; docker-compose up -d"
echo "$BLANK 2. Try executing your first workflow."
echo "$BLANK Follow the steps described in https://tinkerbell.org/examples/hello-world/ to say 'Hello World!' with a workflow."
)
do_setup() (
# perform some very rudimentary platform detection
lsb_dist=$(get_distribution)
lsb_version=$(get_distro_version)
echo "$INFO starting tinkerbell stack setup"
check_prerequisites "$lsb_dist" "$lsb_version"
if ! [[ -f $ENV_FILE ]]; then
echo "$ERR Run './generate-env.sh network-interface > \"$ENV_FILE\"' before continuing."
exit 1
fi
# shellcheck disable=SC1090
source "$ENV_FILE"
if [[ -z $TINKERBELL_SKIP_NETWORKING ]]; then
setup_networking "$lsb_dist" "$lsb_version"
fi
setup_osie
generate_certificates
setup_docker_registry
echo "$INFO tinkerbell stack setup completed successfully on $lsb_dist server"
whats_next | tee /tmp/post-setup-message
)
# wrapped up in a function so that we have some protection against only getting
# half the file during "curl | sh"
do_setup
|
import random
def generateRandomList(length):
random_list = []
for _ in range(length):
random_list.append(random.randint(1, 10))
return random_list
if __name__ == '__main__':
print(generateRandomList(5)) |
<reponame>AlessandroPdaSilva/Atividades_Estacio<filename>C++/Atividade 3/questao 4 dv7.cpp
#include <iostream>
#include <locale.h>
using namespace std;
int main()
{
int idade,idadec,sexof,contador;
char sexo;
contador=1;
sexof=0;
idadec=0;
do{
cout << " qual o sexo da pessoa, f ou m (f = feminino ou m = masculino) "<<endl;
cin>> sexo;
cout << " qual a idade da pessoa "<<endl;
cin>>idade;
if(idade>=18) {
idadec++;
}
if(sexo=='f'){
sexof++;
}
contador++;
}while(contador<=10);
cout<< " foram digitados " << sexof << " do sexo feminino \n";
cout << " foram digitados "<< idadec << " maiores de idade \n";
return 0;
}
|
<reponame>hongdongni/swt-bling
package com.readytalk.swt.util;
import org.eclipse.swt.graphics.Color;
import org.eclipse.swt.graphics.DeviceData;
import org.eclipse.swt.graphics.RGB;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.Shell;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class ColorFactoryTest {
private Shell shell;
private Display display;
@Before
public void setup() throws Exception {
if (Display.getCurrent() != null) Display.getCurrent().dispose();
DeviceData data = new DeviceData();
data.tracking = true;
display = new Display(data);
shell = new Shell(display);
}
@After
public void tearDown() throws Exception {
if (display != null) {
display.dispose();
}
ColorFactory.disposeAll();
}
@Test
public void getColor_CreatesColor_OneObjectAddedToDeviceDataObjects() {
int numberOfItemsBefore = shell.getDisplay().getDeviceData().objects.length;
ColorFactory.getColor(shell.getDisplay(), 23, 34, 45);
Assert.assertEquals(numberOfItemsBefore+1, shell.getDisplay().getDeviceData().objects.length);
}
@Test
public void getColor_CreateAndDisposeColor_NoAdditionalObjectInoDeviceDataObjects() {
int numberOfItemsBefore = shell.getDisplay().getDeviceData().objects.length;
ColorFactory.getColor(shell.getDisplay(), 23, 34, 45);
ColorFactory.disposeAll();
Assert.assertEquals(shell.getDisplay().getDeviceData().objects.length, numberOfItemsBefore);
}
@Test
public void getColor_CreateTwoSameColor_OnlyOneObjectAddedToDeviceDataObjects() {
ColorFactory.getColor(shell.getDisplay(), 23, 34, 45);
int numberOfItemsBefore = shell.getDisplay().getDeviceData().objects.length;
ColorFactory.getColor(shell.getDisplay(), 23, 34, 45);
Assert.assertEquals(shell.getDisplay().getDeviceData().objects.length, numberOfItemsBefore);
}
@Test
public void getColor_CreateTwoSameColor_GetSameObjectFromColorMap() {
Color a = ColorFactory.getColor(shell.getDisplay(), 23, 34, 45);
Color b = ColorFactory.getColor(shell.getDisplay(), 23, 34, 45);
// Equals may compare RGB values in some cases; so we look at the original object hashcode
Assert.assertEquals(System.identityHashCode(a), System.identityHashCode(b));
}
@Test
public void getColor_CreateThreeDifferentColors_ColorMapSizeIs3() {
ColorFactory.getColor(shell.getDisplay(), 23, 34, 45);
ColorFactory.getColor(shell.getDisplay(), 255, 214, 55);
ColorFactory.getColor(shell.getDisplay(), 255, 94, 55);
Assert.assertEquals(3, ColorFactory.colorMap.size());
}
@Test
public void getColor_CreateAndDisposeColor_ColorMapSizeIs0() {
ColorFactory.getColor(shell.getDisplay(), 23, 34, 45);
ColorFactory.disposeAll();
Assert.assertEquals(0, ColorFactory.colorMap.size());
}
@Test
public void getColor_CreateColorViaDeviceAndInts_ReturnsExpectedColor() {
Color c = ColorFactory.getColor(shell.getDisplay(), 23, 34, 45);
Assert.assertEquals(c.getRGB(), new RGB(23, 34, 45));
}
@Test
public void getColor_CreateColorViaRGB_ReturnsExpectedColor() {
RGB rgb = new RGB(23, 34, 45);
Color c = ColorFactory.getColor(shell.getDisplay(),rgb);
Assert.assertEquals(c.getRGB(), rgb);
}
@Test
public void getColor_CreateColorViaInts_ReturnsExpectedColor() {
Color c = ColorFactory.getColor(33, 34, 45);
Assert.assertEquals(c.getRGB(), new RGB(33, 34, 45));
}
@Test(expected = IllegalArgumentException.class)
public void getColor_CreateColorViaNegativeInts_ThrowsIllegalArgumentException() {
ColorFactory.getColor(-33, -34, -45);
}
}
|
<filename>linked_list/singly_linked_list_test.go
package linked_list
import "testing"
func TestNewNode(t *testing.T) {
newNode := NewNode(10)
if newNode.next != nil || newNode.value != 10 {
t.Error(`TestNewNode failed`)
}
}
func TestNewSingleLinkedList(t *testing.T) {
singleLinkedList := NewSingleLinkedList()
if singleLinkedList.head != nil || singleLinkedList.length != 0 {
t.Error(`TestNewSingleLinkedList failed`)
}
}
func TestInsertToHead(t *testing.T) {
singleLinkedList := NewSingleLinkedList()
singleLinkedList.InsertToHead(1)
if singleLinkedList.length != 1 || singleLinkedList.head == nil || singleLinkedList.head.value != 1 {
t.Error(`TestInsertToHead failed`)
}
singleLinkedList.InsertToHead(2)
if singleLinkedList.length != 2 || singleLinkedList.head == nil || singleLinkedList.head.value != 2 {
t.Error(`TestInsertToHead failed`)
}
}
func TestInsertToTail(t *testing.T) {
singleLinkedList := NewSingleLinkedList()
singleLinkedList.InsertToTail(1)
if singleLinkedList.length != 1 || singleLinkedList.head == nil || singleLinkedList.head.value != 1 {
t.Error(`TestInsertToTail failed`)
}
singleLinkedList.InsertToTail(2)
if singleLinkedList.length != 2 || singleLinkedList.head == nil || singleLinkedList.head.value != 1 {
t.Error(`TestInsertToTail failed`, singleLinkedList.length, singleLinkedList.head.value)
}
}
func TestGetTailNode(t *testing.T) {
singleLinkedList := NewSingleLinkedList()
singleLinkedList.InsertToHead(1)
singleLinkedList.InsertToHead(2)
tailNode := singleLinkedList.getTailNode()
if tailNode == nil || tailNode.value != 1 {
t.Error(`TestGetTailNode failed`)
}
}
func TestFindByIndex(t *testing.T) {
singleLinkedList := NewSingleLinkedList()
singleLinkedList.InsertToHead(1)
singleLinkedList.InsertToHead(2)
targetNode := singleLinkedList.FindByIndex(0)
if targetNode == nil || targetNode.value != 2 {
t.Error(`TestFindByIndex failed`)
}
targetNode = singleLinkedList.FindByIndex(1)
if targetNode == nil || targetNode.value != 1 {
t.Error(`TestFindByIndex failed`)
}
}
func TestInsertBefore(t *testing.T) {
singleLinkedList := NewSingleLinkedList()
singleLinkedList.InsertToTail(1)
singleLinkedList.InsertToTail(2)
preNode := singleLinkedList.FindByIndex(1)
singleLinkedList.InsertBefore(preNode, 3)
if singleLinkedList.FindByIndex(1).value != 3 {
t.Error(`TestInsertBefore failed`)
}
noExistNode := NewNode(10)
if singleLinkedList.InsertBefore(noExistNode, 3) {
t.Error(`TestInsertBefore failed`)
}
}
func TestInsertAfter(t *testing.T) {
singleLinkedList := NewSingleLinkedList()
singleLinkedList.InsertToTail(1)
singleLinkedList.InsertToTail(2)
preNode := singleLinkedList.FindByIndex(1)
singleLinkedList.InsertAfter(preNode, 3)
if singleLinkedList.FindByIndex(2).value != 3 {
t.Error(`TestInsertAfter failed`)
}
}
func TestLen(t *testing.T) {
singleLinkedList := NewSingleLinkedList()
singleLinkedList.InsertToTail(1)
singleLinkedList.InsertToTail(2)
if singleLinkedList.Len() != 2 {
t.Error(`TestLen failed`)
}
}
func TestDeleteNode(t *testing.T) {
singleLinkedList := NewSingleLinkedList()
singleLinkedList.InsertToTail(1)
singleLinkedList.InsertToTail(2)
curNode := singleLinkedList.FindByIndex(1)
singleLinkedList.DeleteNode(curNode)
if singleLinkedList.length != 1 || singleLinkedList.head.value != 1 {
t.Error(`TestDeleteNode failed`)
}
singleLinkedList.DeleteNode(singleLinkedList.head)
if singleLinkedList.length != 0 || singleLinkedList.head != nil {
t.Error(`TestDeleteNode failed`)
}
}
func TestString(t *testing.T) {
singleLinkedList := NewSingleLinkedList()
singleLinkedList.InsertToTail(1)
singleLinkedList.InsertToTail(2)
want := "SingleLinkedList: size = 2\n[1,2]"
if want != singleLinkedList.String() {
t.Error(`TestString failed`)
}
}
|
def process_data(results):
if isinstance(results['invocation']['module_args']['data'], list):
results['invocation']['module_args']['data'] = ','.join(map(str, results['invocation']['module_args']['data']))
elif isinstance(results['invocation']['module_args']['data'], dict):
results['invocation']['module_args']['data'] = ','.join([f"{key}:{value}" for key, value in results['invocation']['module_args']['data'].items()])
return results |
#!/bin/sh
BUILD_ARKIME_INSTALL_DIR/bin/node BUILD_ARKIME_INSTALL_DIR/viewer/addUser.js -c BUILD_ARKIME_INSTALL_DIR/etc/config.ini "$@"
|
<reponame>melkishengue/cpachecker
/*
* CPAchecker is a tool for configurable software verification.
* This file is part of CPAchecker.
*
* Copyright (C) 2007-2014 <NAME>
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
* CPAchecker web page:
* http://cpachecker.sosy-lab.org
*/
package org.sosy_lab.cpachecker.cpa.livevar;
import com.google.common.base.Equivalence.Wrapper;
import com.google.common.collect.Multimap;
import org.sosy_lab.common.configuration.Configuration;
import org.sosy_lab.common.configuration.InvalidConfigurationException;
import org.sosy_lab.common.configuration.Option;
import org.sosy_lab.common.configuration.Options;
import org.sosy_lab.common.log.LogManager;
import org.sosy_lab.cpachecker.cfa.CFA;
import org.sosy_lab.cpachecker.cfa.ast.ASimpleDeclaration;
import org.sosy_lab.cpachecker.cfa.model.CFANode;
import org.sosy_lab.cpachecker.core.defaults.AbstractCPA;
import org.sosy_lab.cpachecker.core.defaults.AutomaticCPAFactory;
import org.sosy_lab.cpachecker.core.defaults.DelegateAbstractDomain;
import org.sosy_lab.cpachecker.core.interfaces.AbstractState;
import org.sosy_lab.cpachecker.core.interfaces.CPAFactory;
import org.sosy_lab.cpachecker.core.interfaces.MergeOperator;
import org.sosy_lab.cpachecker.core.interfaces.StateSpacePartition;
import org.sosy_lab.cpachecker.core.interfaces.StopOperator;
@Options
public class LiveVariablesCPA extends AbstractCPA {
@Option(secure=true, name = "merge", toUppercase = true, values = { "SEP", "JOIN" },
description = "which merge operator to use for LiveVariablesCPA")
private String mergeType = "JOIN";
@Option(secure=true, name = "stop", toUppercase = true, values = { "SEP", "JOIN", "NEVER" },
description = "which stop operator to use for LiveVariablesCPA")
private String stopType = "SEP";
public static CPAFactory factory() {
return AutomaticCPAFactory.forType(LiveVariablesCPA.class);
}
private LiveVariablesCPA(final Configuration pConfig,
final LogManager pLogger,
final CFA cfa) throws InvalidConfigurationException {
super(
DelegateAbstractDomain.getInstance(),
new LiveVariablesTransferRelation(
cfa.getVarClassification(), pConfig, cfa.getLanguage(), cfa, pLogger));
pConfig.inject(this, LiveVariablesCPA.class);
}
@Override
public MergeOperator getMergeOperator() {
return buildMergeOperator(mergeType);
}
@Override
public StopOperator getStopOperator() {
return buildStopOperator(stopType);
}
@Override
public AbstractState getInitialState(CFANode pNode, StateSpacePartition pPartition) {
return ((LiveVariablesTransferRelation) getTransferRelation()).getInitialState(pNode);
}
/**
* Returns the liveVariables that are currently computed. Calling this method
* makes only sense if the analysis was completed
* @return a Multimap containing the variables that are live at each location
*/
public Multimap<CFANode, Wrapper<ASimpleDeclaration>> getLiveVariables() {
return ((LiveVariablesTransferRelation) getTransferRelation()).getLiveVariables();
}
}
|
<table>
<tr>
<th>Name</th>
<th>City</th>
<th>Country</th>
</tr>
<tr>
<td>John Doe</td>
<td>New York</td>
<td>USA</td>
</tr>
<tr>
<td>Jane Doe</td>
<td>London</td>
<td>UK</td>
</tr>
</table> |
/**
******************************************************************************
* @file stm32f0xx_hal_conf.h
* @author MCD Application Team
* @version V1.7.0
* @date 04-November-2016
* @brief HAL configuration file.
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT(c) 2016 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __STM32F0xx_HAL_CONF_H
#define __STM32F0xx_HAL_CONF_H
#ifdef __cplusplus
extern "C" {
#endif
#define HAL_MODULE_ENABLED
#define HAL_CORTEX_MODULE_ENABLED
#define HAL_FLASH_MODULE_ENABLED
#define HAL_GPIO_MODULE_ENABLED
#define HAL_PCD_MODULE_ENABLED
#define HAL_PWR_MODULE_ENABLED
#define HAL_RCC_MODULE_ENABLED
#if !defined (HSE_VALUE)
#define HSE_VALUE ((uint32_t)8000000) /*!< Value of the External oscillator in Hz */
#endif /* HSE_VALUE */
#if !defined (HSE_STARTUP_TIMEOUT)
#define HSE_STARTUP_TIMEOUT ((uint32_t)100) /*!< Time out for HSE start up, in ms */
#endif /* HSE_STARTUP_TIMEOUT */
#if !defined (HSI_VALUE)
#define HSI_VALUE ((uint32_t)8000000) /*!< Value of the Internal oscillator in Hz*/
#endif /* HSI_VALUE */
#if !defined (HSI_STARTUP_TIMEOUT)
#define HSI_STARTUP_TIMEOUT ((uint32_t)5000) /*!< Time out for HSI start up */
#endif /* HSI_STARTUP_TIMEOUT */
#if !defined (HSI14_VALUE)
#define HSI14_VALUE ((uint32_t)14000000) /*!< Value of the Internal High Speed oscillator for ADC in Hz.
The real value may vary depending on the variations
in voltage and temperature. */
#endif /* HSI14_VALUE */
#if !defined (HSI48_VALUE)
#define HSI48_VALUE ((uint32_t)48000000) /*!< Value of the Internal High Speed oscillator for USB in Hz.
The real value may vary depending on the variations
in voltage and temperature. */
#endif /* HSI48_VALUE */
#if !defined (LSI_VALUE)
#define LSI_VALUE ((uint32_t)40000)
#endif /* LSI_VALUE */ /*!< Value of the Internal Low Speed oscillator in Hz
The real value may vary depending on the variations
in voltage and temperature. */
#if !defined (LSE_VALUE)
#define LSE_VALUE ((uint32_t)32768) /*!< Value of the External Low Speed oscillator in Hz */
#endif /* LSE_VALUE */
#if !defined (LSE_STARTUP_TIMEOUT)
#define LSE_STARTUP_TIMEOUT ((uint32_t)5000) /*!< Time out for LSE start up, in ms */
#endif /* LSE_STARTUP_TIMEOUT */
#if !defined (LSE_STARTUP_TIMEOUT)
#define LSE_STARTUP_TIMEOUT ((uint32_t)5000) /*!< Time out for LSE start up, in ms */
#endif /* HSE_STARTUP_TIMEOUT */
#define VDD_VALUE ((uint32_t)3300) /*!< Value of VDD in mv */
#define TICK_INT_PRIORITY 0 /*!< tick interrupt priority (lowest by default) */
/* Warning: Must be set to higher priority for HAL_Delay() */
/* and HAL_GetTick() usage under interrupt context */
#define USE_RTOS 0
#define PREFETCH_ENABLE 1
#define INSTRUCTION_CACHE_ENABLE 0
#define DATA_CACHE_ENABLE 0
#include "stm32f0xx_hal_rcc.h"
#include "stm32f0xx_hal_gpio.h"
#include "stm32f0xx_hal_cortex.h"
#include "stm32f0xx_hal_flash.h"
#include "stm32f0xx_hal_pcd.h"
#include "stm32f0xx_hal_pwr.h"
#define assert_param(expr) ((void)0U)
#ifdef __cplusplus
}
#endif
#endif /* __STM32F0xx_HAL_CONF_H */
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
#!/usr/bin/env zsh
# -------------------------------------------------------------------------------------------------
# Copyright (c) 2015 zsh-syntax-highlighting contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the zsh-syntax-highlighting contributors nor the names of its contributors
# may be used to endorse or promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -------------------------------------------------------------------------------------------------
# -*- mode: zsh; sh-indentation: 2; indent-tabs-mode: nil; sh-basic-offset: 2; -*-
# vim: ft=zsh sw=2 ts=2 et
# -------------------------------------------------------------------------------------------------
BUFFER=$'echo "foo1\n'
expected_region_highlight=(
"6 10 $ZSH_HIGHLIGHT_STYLES[double-quoted-argument]" # 'foo2"'
)
|
def find_sum(numbers, target):
for i, n1 in enumerate(numbers):
for n2 in numbers[i+1:]:
if n1 + n2 == target:
return [n1, n2] |
#!/bin/bash
# Root check
RootCheck () {
if [ $(id -u) != "0" ]; then
echo -e "Docker needs root-privileges"
echo -e "https://unix.stackexchange.com/questions/156938/why-does-docker-need-root-privileges"
sudo "$0" "$@"
exit $?
fi
}
# GPU Check
CheckNvidiaGpu () {
# Nvidia check
GPU=$(lspci | grep -i --color 'vga\|3d\|2d')
if [[ $GPU == *"NVIDIA"* ]]; then
echo -e "CheckNvidiaGpu: $GPU"
return true
else
return false
fi
}
# Get Type docker to build
GetDockerType () {
if ["$(CheckNvidiaGpu)" = true]; then
TYPE=gpu
else
TYPE=cpu
fi
}
# Get Docker Image
GetPathDockerImage() {
#1 Type
#2 Model
return "../images/${1}/${2}/Dockerfile"
}
# Build Docker image
DockerBuild () {
docker build -f ${1} . --tag ${2}
}
|
<filename>core/src/main/scala/spark/scheduler/cluster/ClusterTaskSetManager.scala
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.scheduler.cluster
import java.nio.ByteBuffer
import java.util.{Arrays, NoSuchElementException}
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
import scala.collection.mutable.HashSet
import scala.math.max
import scala.math.min
import spark.{FetchFailed, Logging, Resubmitted, SparkEnv, Success, TaskEndReason, TaskState, Utils}
import spark.{ExceptionFailure, SparkException, TaskResultTooBigFailure}
import spark.TaskState.TaskState
import spark.scheduler.{ShuffleMapTask, Task, TaskResult, TaskSet}
private[spark] object TaskLocality
extends Enumeration("PROCESS_LOCAL", "NODE_LOCAL", "RACK_LOCAL", "ANY") with Logging {
// process local is expected to be used ONLY within tasksetmanager for now.
val PROCESS_LOCAL, NODE_LOCAL, RACK_LOCAL, ANY = Value
type TaskLocality = Value
def isAllowed(constraint: TaskLocality, condition: TaskLocality): Boolean = {
// Must not be the constraint.
assert (constraint != TaskLocality.PROCESS_LOCAL)
constraint match {
case TaskLocality.NODE_LOCAL =>
condition == TaskLocality.NODE_LOCAL
case TaskLocality.RACK_LOCAL =>
condition == TaskLocality.NODE_LOCAL || condition == TaskLocality.RACK_LOCAL
// For anything else, allow
case _ => true
}
}
def parse(str: String): TaskLocality = {
// better way to do this ?
try {
val retval = TaskLocality.withName(str)
// Must not specify PROCESS_LOCAL !
assert (retval != TaskLocality.PROCESS_LOCAL)
retval
} catch {
case nEx: NoSuchElementException => {
logWarning("Invalid task locality specified '" + str + "', defaulting to NODE_LOCAL")
// default to preserve earlier behavior
NODE_LOCAL
}
}
}
}
/**
* Schedules the tasks within a single TaskSet in the ClusterScheduler.
*/
private[spark] class ClusterTaskSetManager(sched: ClusterScheduler, val taskSet: TaskSet)
extends TaskSetManager with Logging {
// Maximum time to wait to run a task in a preferred location (in ms)
val LOCALITY_WAIT = System.getProperty("spark.locality.wait", "3000").toLong
// CPUs to request per task
val CPUS_PER_TASK = System.getProperty("spark.task.cpus", "1").toDouble
// Maximum times a task is allowed to fail before failing the job
val MAX_TASK_FAILURES = System.getProperty("spark.task.maxFailures", "4").toInt
// Quantile of tasks at which to start speculation
val SPECULATION_QUANTILE = System.getProperty("spark.speculation.quantile", "0.75").toDouble
val SPECULATION_MULTIPLIER = System.getProperty("spark.speculation.multiplier", "1.5").toDouble
// Serializer for closures and tasks.
val env = SparkEnv.get
val ser = env.closureSerializer.newInstance()
val tasks = taskSet.tasks
val numTasks = tasks.length
val copiesRunning = new Array[Int](numTasks)
val finished = new Array[Boolean](numTasks)
val numFailures = new Array[Int](numTasks)
val taskAttempts = Array.fill[List[TaskInfo]](numTasks)(Nil)
var tasksFinished = 0
var weight = 1
var minShare = 0
var runningTasks = 0
var priority = taskSet.priority
var stageId = taskSet.stageId
var name = "TaskSet_"+taskSet.stageId.toString
var parent: Schedulable = null
// Last time when we launched a preferred task (for delay scheduling)
var lastPreferredLaunchTime = System.currentTimeMillis
// List of pending tasks for each node (process local to container).
// These collections are actually
// treated as stacks, in which new tasks are added to the end of the
// ArrayBuffer and removed from the end. This makes it faster to detect
// tasks that repeatedly fail because whenever a task failed, it is put
// back at the head of the stack. They are also only cleaned up lazily;
// when a task is launched, it remains in all the pending lists except
// the one that it was launched from, but gets removed from them later.
private val pendingTasksForHostPort = new HashMap[String, ArrayBuffer[Int]]
// List of pending tasks for each node.
// Essentially, similar to pendingTasksForHostPort, except at host level
private val pendingTasksForHost = new HashMap[String, ArrayBuffer[Int]]
// List of pending tasks for each node based on rack locality.
// Essentially, similar to pendingTasksForHost, except at rack level
private val pendingRackLocalTasksForHost = new HashMap[String, ArrayBuffer[Int]]
// List containing pending tasks with no locality preferences
val pendingTasksWithNoPrefs = new ArrayBuffer[Int]
// List containing all pending tasks (also used as a stack, as above)
val allPendingTasks = new ArrayBuffer[Int]
// Tasks that can be speculated. Since these will be a small fraction of total
// tasks, we'll just hold them in a HashSet.
val speculatableTasks = new HashSet[Int]
// Task index, start and finish time for each task attempt (indexed by task ID)
val taskInfos = new HashMap[Long, TaskInfo]
// Did the job fail?
var failed = false
var causeOfFailure = ""
// How frequently to reprint duplicate exceptions in full, in milliseconds
val EXCEPTION_PRINT_INTERVAL =
System.getProperty("spark.logging.exceptionPrintInterval", "10000").toLong
// Map of recent exceptions (identified by string representation and
// top stack frame) to duplicate count (how many times the same
// exception has appeared) and time the full exception was
// printed. This should ideally be an LRU map that can drop old
// exceptions automatically.
val recentExceptions = HashMap[String, (Int, Long)]()
// Figure out the current map output tracker generation and set it on all tasks
val generation = sched.mapOutputTracker.getGeneration
logDebug("Generation for " + taskSet.id + ": " + generation)
for (t <- tasks) {
t.generation = generation
}
// Add all our tasks to the pending lists. We do this in reverse order
// of task index so that tasks with low indices get launched first.
for (i <- (0 until numTasks).reverse) {
addPendingTask(i)
}
// Note that it follows the hierarchy.
// if we search for NODE_LOCAL, the output will include PROCESS_LOCAL and
// if we search for RACK_LOCAL, it will include PROCESS_LOCAL & NODE_LOCAL
private def findPreferredLocations(
_taskPreferredLocations: Seq[String],
scheduler: ClusterScheduler,
taskLocality: TaskLocality.TaskLocality): HashSet[String] =
{
if (TaskLocality.PROCESS_LOCAL == taskLocality) {
// straight forward comparison ! Special case it.
val retval = new HashSet[String]()
scheduler.synchronized {
for (location <- _taskPreferredLocations) {
if (scheduler.isExecutorAliveOnHostPort(location)) {
retval += location
}
}
}
return retval
}
val taskPreferredLocations = {
if (TaskLocality.NODE_LOCAL == taskLocality) {
_taskPreferredLocations
} else {
assert (TaskLocality.RACK_LOCAL == taskLocality)
// Expand set to include all 'seen' rack local hosts.
// This works since container allocation/management happens within master -
// so any rack locality information is updated in msater.
// Best case effort, and maybe sort of kludge for now ... rework it later ?
val hosts = new HashSet[String]
_taskPreferredLocations.foreach(h => {
val rackOpt = scheduler.getRackForHost(h)
if (rackOpt.isDefined) {
val hostsOpt = scheduler.getCachedHostsForRack(rackOpt.get)
if (hostsOpt.isDefined) {
hosts ++= hostsOpt.get
}
}
// Ensure that irrespective of what scheduler says, host is always added !
hosts += h
})
hosts
}
}
val retval = new HashSet[String]
scheduler.synchronized {
for (prefLocation <- taskPreferredLocations) {
val aliveLocationsOpt = scheduler.getExecutorsAliveOnHost(Utils.parseHostPort(prefLocation)._1)
if (aliveLocationsOpt.isDefined) {
retval ++= aliveLocationsOpt.get
}
}
}
retval
}
// Add a task to all the pending-task lists that it should be on.
private def addPendingTask(index: Int) {
// We can infer hostLocalLocations from rackLocalLocations by joining it against
// tasks(index).preferredLocations (with appropriate hostPort <-> host conversion).
// But not doing it for simplicity sake. If this becomes a performance issue, modify it.
val locs = tasks(index).preferredLocations
val processLocalLocations = findPreferredLocations(locs, sched, TaskLocality.PROCESS_LOCAL)
val hostLocalLocations = findPreferredLocations(locs, sched, TaskLocality.NODE_LOCAL)
val rackLocalLocations = findPreferredLocations(locs, sched, TaskLocality.RACK_LOCAL)
if (rackLocalLocations.size == 0) {
// Current impl ensures this.
assert (processLocalLocations.size == 0)
assert (hostLocalLocations.size == 0)
pendingTasksWithNoPrefs += index
} else {
// process local locality
for (hostPort <- processLocalLocations) {
// DEBUG Code
Utils.checkHostPort(hostPort)
val hostPortList = pendingTasksForHostPort.getOrElseUpdate(hostPort, ArrayBuffer())
hostPortList += index
}
// host locality (includes process local)
for (hostPort <- hostLocalLocations) {
// DEBUG Code
Utils.checkHostPort(hostPort)
val host = Utils.parseHostPort(hostPort)._1
val hostList = pendingTasksForHost.getOrElseUpdate(host, ArrayBuffer())
hostList += index
}
// rack locality (includes process local and host local)
for (rackLocalHostPort <- rackLocalLocations) {
// DEBUG Code
Utils.checkHostPort(rackLocalHostPort)
val rackLocalHost = Utils.parseHostPort(rackLocalHostPort)._1
val list = pendingRackLocalTasksForHost.getOrElseUpdate(rackLocalHost, ArrayBuffer())
list += index
}
}
allPendingTasks += index
}
// Return the pending tasks list for a given host port (process local), or an empty list if
// there is no map entry for that host
private def getPendingTasksForHostPort(hostPort: String): ArrayBuffer[Int] = {
// DEBUG Code
Utils.checkHostPort(hostPort)
pendingTasksForHostPort.getOrElse(hostPort, ArrayBuffer())
}
// Return the pending tasks list for a given host, or an empty list if
// there is no map entry for that host
private def getPendingTasksForHost(hostPort: String): ArrayBuffer[Int] = {
val host = Utils.parseHostPort(hostPort)._1
pendingTasksForHost.getOrElse(host, ArrayBuffer())
}
// Return the pending tasks (rack level) list for a given host, or an empty list if
// there is no map entry for that host
private def getRackLocalPendingTasksForHost(hostPort: String): ArrayBuffer[Int] = {
val host = Utils.parseHostPort(hostPort)._1
pendingRackLocalTasksForHost.getOrElse(host, ArrayBuffer())
}
// Number of pending tasks for a given host Port (which would be process local)
override def numPendingTasksForHostPort(hostPort: String): Int = {
getPendingTasksForHostPort(hostPort).count { index =>
copiesRunning(index) == 0 && !finished(index)
}
}
// Number of pending tasks for a given host (which would be data local)
override def numPendingTasksForHost(hostPort: String): Int = {
getPendingTasksForHost(hostPort).count { index =>
copiesRunning(index) == 0 && !finished(index)
}
}
// Number of pending rack local tasks for a given host
override def numRackLocalPendingTasksForHost(hostPort: String): Int = {
getRackLocalPendingTasksForHost(hostPort).count { index =>
copiesRunning(index) == 0 && !finished(index)
}
}
// Dequeue a pending task from the given list and return its index.
// Return None if the list is empty.
// This method also cleans up any tasks in the list that have already
// been launched, since we want that to happen lazily.
private def findTaskFromList(list: ArrayBuffer[Int]): Option[Int] = {
while (!list.isEmpty) {
val index = list.last
list.trimEnd(1)
if (copiesRunning(index) == 0 && !finished(index)) {
return Some(index)
}
}
return None
}
// Return a speculative task for a given host if any are available. The task should not have an
// attempt running on this host, in case the host is slow. In addition, if locality is set, the
// task must have a preference for this host/rack/no preferred locations at all.
private def findSpeculativeTask(hostPort: String, locality: TaskLocality.TaskLocality): Option[Int] = {
assert (TaskLocality.isAllowed(locality, TaskLocality.NODE_LOCAL))
speculatableTasks.retain(index => !finished(index)) // Remove finished tasks from set
if (speculatableTasks.size > 0) {
val localTask = speculatableTasks.find { index =>
val locations = findPreferredLocations(tasks(index).preferredLocations, sched,
TaskLocality.NODE_LOCAL)
val attemptLocs = taskAttempts(index).map(_.hostPort)
(locations.size == 0 || locations.contains(hostPort)) && !attemptLocs.contains(hostPort)
}
if (localTask != None) {
speculatableTasks -= localTask.get
return localTask
}
// check for rack locality
if (TaskLocality.isAllowed(locality, TaskLocality.RACK_LOCAL)) {
val rackTask = speculatableTasks.find { index =>
val locations = findPreferredLocations(tasks(index).preferredLocations, sched,
TaskLocality.RACK_LOCAL)
val attemptLocs = taskAttempts(index).map(_.hostPort)
locations.contains(hostPort) && !attemptLocs.contains(hostPort)
}
if (rackTask != None) {
speculatableTasks -= rackTask.get
return rackTask
}
}
// Any task ...
if (TaskLocality.isAllowed(locality, TaskLocality.ANY)) {
// Check for attemptLocs also ?
val nonLocalTask = speculatableTasks.find { i =>
!taskAttempts(i).map(_.hostPort).contains(hostPort)
}
if (nonLocalTask != None) {
speculatableTasks -= nonLocalTask.get
return nonLocalTask
}
}
}
return None
}
// Dequeue a pending task for a given node and return its index.
// If localOnly is set to false, allow non-local tasks as well.
private def findTask(hostPort: String, locality: TaskLocality.TaskLocality): Option[Int] = {
val processLocalTask = findTaskFromList(getPendingTasksForHostPort(hostPort))
if (processLocalTask != None) {
return processLocalTask
}
val localTask = findTaskFromList(getPendingTasksForHost(hostPort))
if (localTask != None) {
return localTask
}
if (TaskLocality.isAllowed(locality, TaskLocality.RACK_LOCAL)) {
val rackLocalTask = findTaskFromList(getRackLocalPendingTasksForHost(hostPort))
if (rackLocalTask != None) {
return rackLocalTask
}
}
// Look for no pref tasks AFTER rack local tasks - this has side effect that we will get to
// failed tasks later rather than sooner.
// TODO: That code path needs to be revisited (adding to no prefs list when host:port goes down).
val noPrefTask = findTaskFromList(pendingTasksWithNoPrefs)
if (noPrefTask != None) {
return noPrefTask
}
if (TaskLocality.isAllowed(locality, TaskLocality.ANY)) {
val nonLocalTask = findTaskFromList(allPendingTasks)
if (nonLocalTask != None) {
return nonLocalTask
}
}
// Finally, if all else has failed, find a speculative task
return findSpeculativeTask(hostPort, locality)
}
private def isProcessLocalLocation(task: Task[_], hostPort: String): Boolean = {
Utils.checkHostPort(hostPort)
val locs = task.preferredLocations
locs.contains(hostPort)
}
private def isHostLocalLocation(task: Task[_], hostPort: String): Boolean = {
val locs = task.preferredLocations
// If no preference, consider it as host local
if (locs.isEmpty) return true
val host = Utils.parseHostPort(hostPort)._1
locs.find(h => Utils.parseHostPort(h)._1 == host).isDefined
}
// Does a host count as a rack local preferred location for a task?
// (assumes host is NOT preferred location).
// This is true if either the task has preferred locations and this host is one, or it has
// no preferred locations (in which we still count the launch as preferred).
private def isRackLocalLocation(task: Task[_], hostPort: String): Boolean = {
val locs = task.preferredLocations
val preferredRacks = new HashSet[String]()
for (preferredHost <- locs) {
val rack = sched.getRackForHost(preferredHost)
if (None != rack) preferredRacks += rack.get
}
if (preferredRacks.isEmpty) return false
val hostRack = sched.getRackForHost(hostPort)
return None != hostRack && preferredRacks.contains(hostRack.get)
}
// Respond to an offer of a single slave from the scheduler by finding a task
override def slaveOffer(
execId: String,
hostPort: String,
availableCpus: Double,
overrideLocality: TaskLocality.TaskLocality = null): Option[TaskDescription] =
{
if (tasksFinished < numTasks && availableCpus >= CPUS_PER_TASK) {
// If explicitly specified, use that
val locality = if (overrideLocality != null) overrideLocality else {
// expand only if we have waited for more than LOCALITY_WAIT for a host local task ...
val time = System.currentTimeMillis
if (time - lastPreferredLaunchTime < LOCALITY_WAIT) {
TaskLocality.NODE_LOCAL
} else {
TaskLocality.ANY
}
}
findTask(hostPort, locality) match {
case Some(index) => {
// Found a task; do some bookkeeping and return a Mesos task for it
val task = tasks(index)
val taskId = sched.newTaskId()
// Figure out whether this should count as a preferred launch
val taskLocality =
if (isProcessLocalLocation(task, hostPort)) TaskLocality.PROCESS_LOCAL
else if (isHostLocalLocation(task, hostPort)) TaskLocality.NODE_LOCAL
else if (isRackLocalLocation(task, hostPort)) TaskLocality.RACK_LOCAL
else TaskLocality.ANY
val prefStr = taskLocality.toString
logInfo("Starting task %s:%d as TID %s on slave %s: %s (%s)".format(
taskSet.id, index, taskId, execId, hostPort, prefStr))
// Do various bookkeeping
copiesRunning(index) += 1
val time = System.currentTimeMillis
val info = new TaskInfo(taskId, index, time, execId, hostPort, taskLocality)
taskInfos(taskId) = info
taskAttempts(index) = info :: taskAttempts(index)
if (taskLocality == TaskLocality.PROCESS_LOCAL || taskLocality == TaskLocality.NODE_LOCAL) {
lastPreferredLaunchTime = time
}
// Serialize and return the task
val startTime = System.currentTimeMillis
// We rely on the DAGScheduler to catch non-serializable closures and RDDs, so in here
// we assume the task can be serialized without exceptions.
val serializedTask = Task.serializeWithDependencies(
task, sched.sc.addedFiles, sched.sc.addedJars, ser)
val timeTaken = System.currentTimeMillis - startTime
increaseRunningTasks(1)
logInfo("Serialized task %s:%d as %d bytes in %d ms".format(
taskSet.id, index, serializedTask.limit, timeTaken))
val taskName = "task %s:%d".format(taskSet.id, index)
if (taskAttempts(index).size == 1)
taskStarted(task,info)
return Some(new TaskDescription(taskId, execId, taskName, serializedTask))
}
case _ =>
}
}
return None
}
override def statusUpdate(tid: Long, state: TaskState, serializedData: ByteBuffer) {
SparkEnv.set(env)
state match {
case TaskState.FINISHED =>
taskFinished(tid, state, serializedData)
case TaskState.LOST =>
taskLost(tid, state, serializedData)
case TaskState.FAILED =>
taskLost(tid, state, serializedData)
case TaskState.KILLED =>
taskLost(tid, state, serializedData)
case _ =>
}
}
def taskStarted(task: Task[_], info: TaskInfo) {
sched.listener.taskStarted(task, info)
}
def taskFinished(tid: Long, state: TaskState, serializedData: ByteBuffer) {
val info = taskInfos(tid)
if (info.failed) {
// We might get two task-lost messages for the same task in coarse-grained Mesos mode,
// or even from Mesos itself when acks get delayed.
return
}
val index = info.index
info.markSuccessful()
decreaseRunningTasks(1)
if (!finished(index)) {
tasksFinished += 1
logInfo("Finished TID %s in %d ms on %s (progress: %d/%d)".format(
tid, info.duration, info.hostPort, tasksFinished, numTasks))
// Deserialize task result and pass it to the scheduler
try {
val result = ser.deserialize[TaskResult[_]](serializedData)
result.metrics.resultSize = serializedData.limit()
sched.listener.taskEnded(
tasks(index), Success, result.value, result.accumUpdates, info, result.metrics)
} catch {
case cnf: ClassNotFoundException =>
val loader = Thread.currentThread().getContextClassLoader
throw new SparkException("ClassNotFound with classloader: " + loader, cnf)
case ex => throw ex
}
// Mark finished and stop if we've finished all the tasks
finished(index) = true
if (tasksFinished == numTasks) {
sched.taskSetFinished(this)
}
} else {
logInfo("Ignoring task-finished event for TID " + tid +
" because task " + index + " is already finished")
}
}
def taskLost(tid: Long, state: TaskState, serializedData: ByteBuffer) {
val info = taskInfos(tid)
if (info.failed) {
// We might get two task-lost messages for the same task in coarse-grained Mesos mode,
// or even from Mesos itself when acks get delayed.
return
}
val index = info.index
info.markFailed()
decreaseRunningTasks(1)
if (!finished(index)) {
logInfo("Lost TID %s (task %s:%d)".format(tid, taskSet.id, index))
copiesRunning(index) -= 1
// Check if the problem is a map output fetch failure. In that case, this
// task will never succeed on any node, so tell the scheduler about it.
if (serializedData != null && serializedData.limit() > 0) {
val reason = ser.deserialize[TaskEndReason](serializedData, getClass.getClassLoader)
reason match {
case fetchFailed: FetchFailed =>
logInfo("Loss was due to fetch failure from " + fetchFailed.bmAddress)
sched.listener.taskEnded(tasks(index), fetchFailed, null, null, info, null)
finished(index) = true
tasksFinished += 1
sched.taskSetFinished(this)
decreaseRunningTasks(runningTasks)
return
case taskResultTooBig: TaskResultTooBigFailure =>
logInfo("Loss was due to task %s result exceeding Akka frame size; aborting job".format(
tid))
abort("Task %s result exceeded Akka frame size".format(tid))
return
case ef: ExceptionFailure =>
sched.listener.taskEnded(tasks(index), ef, null, null, info, ef.metrics.getOrElse(null))
val key = ef.description
val now = System.currentTimeMillis
val (printFull, dupCount) = {
if (recentExceptions.contains(key)) {
val (dupCount, printTime) = recentExceptions(key)
if (now - printTime > EXCEPTION_PRINT_INTERVAL) {
recentExceptions(key) = (0, now)
(true, 0)
} else {
recentExceptions(key) = (dupCount + 1, printTime)
(false, dupCount + 1)
}
} else {
recentExceptions(key) = (0, now)
(true, 0)
}
}
if (printFull) {
val locs = ef.stackTrace.map(loc => "\tat %s".format(loc.toString))
logInfo("Loss was due to %s\n%s\n%s".format(
ef.className, ef.description, locs.mkString("\n")))
} else {
logInfo("Loss was due to %s [duplicate %d]".format(ef.description, dupCount))
}
case _ => {}
}
}
// On non-fetch failures, re-enqueue the task as pending for a max number of retries
addPendingTask(index)
// Count failed attempts only on FAILED and LOST state (not on KILLED)
if (state == TaskState.FAILED || state == TaskState.LOST) {
numFailures(index) += 1
if (numFailures(index) > MAX_TASK_FAILURES) {
logError("Task %s:%d failed more than %d times; aborting job".format(
taskSet.id, index, MAX_TASK_FAILURES))
abort("Task %s:%d failed more than %d times".format(taskSet.id, index, MAX_TASK_FAILURES))
}
}
} else {
logInfo("Ignoring task-lost event for TID " + tid +
" because task " + index + " is already finished")
}
}
override def error(message: String) {
// Save the error message
abort("Error: " + message)
}
def abort(message: String) {
failed = true
causeOfFailure = message
// TODO: Kill running tasks if we were not terminated due to a Mesos error
sched.listener.taskSetFailed(taskSet, message)
decreaseRunningTasks(runningTasks)
sched.taskSetFinished(this)
}
override def increaseRunningTasks(taskNum: Int) {
runningTasks += taskNum
if (parent != null) {
parent.increaseRunningTasks(taskNum)
}
}
override def decreaseRunningTasks(taskNum: Int) {
runningTasks -= taskNum
if (parent != null) {
parent.decreaseRunningTasks(taskNum)
}
}
// TODO(xiajunluan): for now we just find Pool not TaskSetManager
// we can extend this function in future if needed
override def getSchedulableByName(name: String): Schedulable = {
return null
}
override def addSchedulable(schedulable:Schedulable) {
// nothing
}
override def removeSchedulable(schedulable:Schedulable) {
// nothing
}
override def getSortedTaskSetQueue(): ArrayBuffer[TaskSetManager] = {
var sortedTaskSetQueue = new ArrayBuffer[TaskSetManager]
sortedTaskSetQueue += this
return sortedTaskSetQueue
}
override def executorLost(execId: String, hostPort: String) {
logInfo("Re-queueing tasks for " + execId + " from TaskSet " + taskSet.id)
// If some task has preferred locations only on hostname, and there are no more executors there,
// put it in the no-prefs list to avoid the wait from delay scheduling
// host local tasks - should we push this to rack local or no pref list ? For now, preserving
// behavior and moving to no prefs list. Note, this was done due to impliations related to
// 'waiting' for data local tasks, etc.
// Note: NOT checking process local list - since host local list is super set of that. We need
// to ad to no prefs only if there is no host local node for the task (not if there is no
// process local node for the task)
for (index <- getPendingTasksForHost(Utils.parseHostPort(hostPort)._1)) {
val newLocs = findPreferredLocations(
tasks(index).preferredLocations, sched, TaskLocality.NODE_LOCAL)
if (newLocs.isEmpty) {
pendingTasksWithNoPrefs += index
}
}
// Re-enqueue any tasks that ran on the failed executor if this is a shuffle map stage
if (tasks(0).isInstanceOf[ShuffleMapTask]) {
for ((tid, info) <- taskInfos if info.executorId == execId) {
val index = taskInfos(tid).index
if (finished(index)) {
finished(index) = false
copiesRunning(index) -= 1
tasksFinished -= 1
addPendingTask(index)
// Tell the DAGScheduler that this task was resubmitted so that it doesn't think our
// stage finishes when a total of tasks.size tasks finish.
sched.listener.taskEnded(tasks(index), Resubmitted, null, null, info, null)
}
}
}
// Also re-enqueue any tasks that were running on the node
for ((tid, info) <- taskInfos if info.running && info.executorId == execId) {
taskLost(tid, TaskState.KILLED, null)
}
}
/**
* Check for tasks to be speculated and return true if there are any. This is called periodically
* by the ClusterScheduler.
*
* TODO: To make this scale to large jobs, we need to maintain a list of running tasks, so that
* we don't scan the whole task set. It might also help to make this sorted by launch time.
*/
override def checkSpeculatableTasks(): Boolean = {
// Can't speculate if we only have one task, or if all tasks have finished.
if (numTasks == 1 || tasksFinished == numTasks) {
return false
}
var foundTasks = false
val minFinishedForSpeculation = (SPECULATION_QUANTILE * numTasks).floor.toInt
logDebug("Checking for speculative tasks: minFinished = " + minFinishedForSpeculation)
if (tasksFinished >= minFinishedForSpeculation) {
val time = System.currentTimeMillis()
val durations = taskInfos.values.filter(_.successful).map(_.duration).toArray
Arrays.sort(durations)
val medianDuration = durations(min((0.5 * numTasks).round.toInt, durations.size - 1))
val threshold = max(SPECULATION_MULTIPLIER * medianDuration, 100)
// TODO: Threshold should also look at standard deviation of task durations and have a lower
// bound based on that.
logDebug("Task length threshold for speculation: " + threshold)
for ((tid, info) <- taskInfos) {
val index = info.index
if (!finished(index) && copiesRunning(index) == 1 && info.timeRunning(time) > threshold &&
!speculatableTasks.contains(index)) {
logInfo(
"Marking task %s:%d (on %s) as speculatable because it ran more than %.0f ms".format(
taskSet.id, index, info.hostPort, threshold))
speculatableTasks += index
foundTasks = true
}
}
}
return foundTasks
}
override def hasPendingTasks(): Boolean = {
numTasks > 0 && tasksFinished < numTasks
}
}
|
<reponame>OSADP/C2C-RI<filename>C2CRIBuildDir/projects/C2C-RI/src/jameleon-test-suite-3_3-RC1-C2CRI/jameleon-core/tst/java/net/sf/jameleon/result/TestResultWithChildrenTest.java<gh_stars>0
/*
Jameleon - An automation testing tool..
Copyright (C) 2003-2007 <NAME> (<EMAIL>)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package net.sf.jameleon.result;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import net.sf.jameleon.bean.FunctionalPoint;
import net.sf.jameleon.util.XMLHelper;
public class TestResultWithChildrenTest extends TestCase {
private TestResultWithChildren trwc;
private static final FunctionalPoint tag = new FunctionalPoint();
public static void main(String args[]) {
junit.textui.TestRunner.run( suite() );
}
public static Test suite() {
return new TestSuite( TestResultWithChildrenTest.class );
}
public TestResultWithChildrenTest( String name ) {
super( name );
}
public void setUp(){
tag.addTagName("some tag");
trwc = new MockTestResultWithChildren(tag);
}
public void testConstructor1(){
assertNotNull("tag",trwc.getTag());
assertEquals("tag", tag, trwc.getTag());
}
public void testConstructor2(){
TestResultWithChildren tr = new MockTestResultWithChildren(tag);
assertTrue(tr.tag == tag);
}
public void testConstructor3(){
TestResultWithChildren tr = new MockTestResultWithChildren(tag, trwc);
assertTrue(trwc == tr.parentResults);
assertTrue(tr.tag == tag);
}
public void testIsParent(){
assertTrue("Should be a parent", trwc.isParent());
}
public void testGetFailedCountableResults(){
TestResultWithChildren res = new MockCountableResult(createTag("one"), trwc);
TestResultWithChildren res2 = new MockCountableResult(createTag("two"), res);
SessionResult res3 = new SessionResult(createTag("session"), res2);
FunctionResult res4 = new FunctionResult(createTag("function"), res3);
res4.setError(new RuntimeException());
assertEquals(1, trwc.getFailedCountableResults().size());
assertEquals(1, res.getFailedCountableResults().size());
assertEquals(1, res2.getFailedCountableResults().size());
}
public void testGetCountableResults(){
TestResultWithChildren res = new MockCountableResult(tag, trwc);
assertEquals(1, res.getCountableResults().size());
TestResultWithChildren res2 = new MockCountableResult(tag, res);
TestResultWithChildren res3 = new MockCountableResult(tag, res2);
TestResultWithChildren res4 = new MockCountableResult(tag, res3);
assertEquals(4, trwc.getCountableResults().size());
assertEquals(4, res.getCountableResults().size());
MockTestResultWithChildren res5 = new MockTestResultWithChildren(tag, res4);
assertEquals(4, res.getCountableResults().size());
new MockCountableResult(tag, res5);
assertEquals(5, res.getCountableResults().size());
new CountableFunctionResult(tag, trwc);
assertEquals(6, trwc.getCountableResults().size());
}
public void testGetAllChildrenResults(){
TestResultWithChildren res = new MockTestResultWithChildren(tag, trwc);
TestResultWithChildren res2 = new MockTestResultWithChildren(tag, res);
TestResultWithChildren res3 = new MockTestResultWithChildren(tag, res2);
TestResultWithChildren res4 = new MockTestResultWithChildren(tag, res3);
assertEquals(0, res4.getAllChildrenResults().size());
assertEquals(1, res3.getAllChildrenResults().size());
assertEquals(2, res2.getAllChildrenResults().size());
assertEquals(3, res.getAllChildrenResults().size());
assertEquals(4, trwc.getAllChildrenResults().size());
res4.addChildResult(new MockTestResultWithChildren(tag));
assertEquals(5, trwc.getAllChildrenResults().size());
}
public void testGetAllFailedLeafChildrenResults(){
TestResultWithChildren res = new MockTestResultWithChildren(tag, trwc);
TestResultWithChildren res2 = new MockTestResultWithChildren(tag, res);
FunctionResult res3 = new FunctionResult();
FunctionResult res4 = new FunctionResult();
res.setFailed();
res2.setFailed();
res4.setFailed();
res2.addChildResult(res3);
res2.addChildResult(res4);
assertEquals(1, res2.getAllFailedLeafChildrenResults().size());
assertEquals(1, res.getAllFailedLeafChildrenResults().size());
assertEquals(1, trwc.getAllFailedLeafChildrenResults().size());
assertSame(res4, res2.getAllFailedLeafChildrenResults().get(0));
assertSame(res4, res.getAllFailedLeafChildrenResults().get(0));
assertSame(res4, trwc.getAllFailedLeafChildrenResults().get(0));
res3.setFailed();
assertEquals(2, res2.getAllFailedLeafChildrenResults().size());
assertEquals(2, res.getAllFailedLeafChildrenResults().size());
assertEquals(2, trwc.getAllFailedLeafChildrenResults().size());
}
public void testAddFailedResult(){
TestResultWithChildren res = new MockTestResultWithChildren(tag, trwc);
TestResultWithChildren res2 = new MockTestResultWithChildren(tag, res);
TestResultWithChildren res3 = new MockTestResultWithChildren(tag);
res2.addFailedResult(res3);
assertTrue("should have failed", res2.failed());
assertEquals("# of results", 1, res2.getFailedResults().size());
assertTrue("should have failed", res.failed());
assertEquals("# of results for parent", 1, res.getFailedResults().size());
assertTrue("should have failed", trwc.failed());
assertEquals("# of results for grandparent", 1, trwc.getFailedResults().size());
assertEquals("failed result", res3, res2.getFailedResults().get(0));
assertEquals("failed result for parent", res3, res.getFailedResults().get(0));
assertEquals("failed result for parent", res3, trwc.getFailedResults().get(0));
}
public void testAddChildResult(){
assertEquals("# of function results", 0, trwc.getChildrenResults().size());
FunctionResult fr = new FunctionResult();
trwc.addChildResult(fr);
assertEquals("# of function results", 1, trwc.getChildrenResults().size());
assertTrue("function result", fr == trwc.getChildrenResults().get(0));
}
public void testToXML(){
FunctionalPoint tag2 = new FunctionalPoint();
tag2.addTagName("another tag");
TestResultWithChildren mtr = new MockTestResultWithChildren(tag2);
TestResultWithChildren mtr2 = new MockTestResultWithChildren(tag);
trwc.addChildResult(mtr);
trwc.addChildResult(mtr2);
String xml = trwc.toXML();
XMLHelper xmlHelper = new XMLHelper(xml);
assertEquals("another tag", xmlHelper.getValueFromXPath("/mock-tr/children-results/mock-tr[1]/functional-point-info/tag-name"));
assertEquals("some tag", xmlHelper.getValueFromXPath("/mock-tr/children-results/mock-tr[2]/functional-point-info/tag-name"));
}
protected FunctionalPoint createTag(String name){
FunctionalPoint fp = new FunctionalPoint();
fp.addTagName(name);
return fp;
}
} |
<gh_stars>0
/*!
* REpresentacion del estado del problema del viajante de comercio
*/
package IA.probTSP;
import java.util.Random;
public class ProbTSPBoard {
/// String que describe el operador
public static String INTERCAMBIO = "Intercambio";
/// Nuemro de ciudades
private int ncities;
/// Orden entre las ciudades
private int [] path;
/// Distancias entre las ciudades
private int [][] dist;
/*!\brief Genera una instancia del problema del TSP
*
* Crea una nueva instancia del problema del viajante de comercion con nc ciudades
*
* @param [in] nc Numero de ciudades
*/
public ProbTSPBoard(int nc) {
Random myRandom=new Random();
int d;
path=new int[nc];
dist= new int[nc][nc];
ncities=nc;
for (int i = 0; i < nc; i++) path[i]=i;
for (int i = 0; i < nc; i++)
for (int j = i; j < nc; j++)
if (i==j) dist[i][j]=0;
else {
d= myRandom.nextInt(50)+10;
dist[i][j]=d;
dist[j][i]=d;
}
}
public ProbTSPBoard(int nc, int seed) {
Random myRandom=new Random();
int d;
myRandom.setSeed(seed);
path=new int[nc];
dist= new int[nc][nc];
ncities=nc;
for (int i = 0; i < nc; i++) path[i]=i;
for (int i = 0; i < nc; i++)
for (int j = i; j < nc; j++)
if (i==j) dist[i][j]=0;
else {
d= myRandom.nextInt(50)+10;
dist[i][j]=d;
dist[j][i]=d;
}
}
/*!\brief Genera una instancia del TSP con un camino inicial y una matriz de distancias
*
* Genera una instancia del problema del viajante de comercio recibiendo el numero de ciudades
* el camino inicial y la matriz de distancias
*
* @param[in] nc Numero de ciudades
* @param[in] p Camino inicial
* @param[in] d matriz de distancias
*
*/
public ProbTSPBoard(int nc, int [] p, int [][] d) {
path=new int[nc];
dist= new int[nc][nc];
ncities=nc;
for (int i = 0; i < nc; i++) {
path[i]=p[i];
}
for (int i = 0; i < nc; i++)
for (int j = 0; j < nc; j++)
dist[i][j]=d[i][j];
}
/*!\brief Retorna el numero de ciudades de la instancia
*
*/
public int getNCities(){return(ncities);}
/*!\brief Retorna el camino entre las ciudades
*
*/
public int [] getPath(){return(path);}
/*!\brief Retorna la matriz de distancias
*
*/
public int [][] getDists(){return(dist);}
/*!\brief Retorna la distancia entre la ciudad i y la siguiente ciudad en el camino
*
*/
public int distCities(int i){
if (i<ncities-1) return(dist[path[i]][path[i+1]]);
else return(dist[path[i]][path[0]]);
}
/*!\Brief Intercambia dos ciudades en el recorrido
*
* \pre los valores han de ser validos
*/
public void swapCities(int i, int j){
int tmp;
tmp=path[i];
path[i]=path[j];
path[j]=tmp;
}
/*!\brief Retorna un string indicando la diferencia entre los recorridos
*
*/
public String getDiff(ProbTSPBoard t){
int [] b;
String s="Intercambio ciudad ";
boolean primera=true;
b=t.getPath();
for (int i=0;i<ncities;i++){
if(b[i]!=path[i]){
if (primera) {
primera=false;
s=s+path[i]+" con ";
} else s=s+path[i];
}
}
return(s);
}
/*!\brief Retorna el coste del recorrido
*
*/
public int pathCost(){
int sum=0;
for(int i=0;i<ncities;i++) sum=sum+distCities(i);
return sum;
}
/*!\brief Retorna el recorrido como un string
*
*/
public String toString() {
String retVal = "|";
for (int i = 0; i < ncities; i++) {
retVal = retVal + path[i] + "|";
}
return retVal;
}
}
|
<filename>src/controllers/orders.controller.ts
import { Controller, Get, Post, Body, HttpException, HttpStatus, Query, Patch, Param, Delete, UseGuards } from '@nestjs/common';
import { ApiTags, ApiOperation, ApiOkResponse, ApiCreatedResponse, ApiNotFoundResponse, ApiBadRequestResponse, ApiQuery, ApiBearerAuth } from '@nestjs/swagger';
import { OrderDto, CreateOrderDto, StaffDto, CustomerDto, ServiceDto, UpdateOrderDto } from 'src/shared/dto';
import { RecordStatus, RecordStatusFinish } from 'src/shared/enums';
import { OrdersService, CustomersService, StaffService, ServicesService } from 'src/services';
import { ICustomerEntity, IOrderEntity } from 'src/shared/interfaces';
import { Utils } from 'src/shared/utils';
import { JwtAuthGuard } from 'src/services/auth';
@ApiTags('Orders')
@Controller('orders')
export class OrdersController {
constructor(
private readonly ordersService: OrdersService,
private readonly customerService: CustomersService,
private readonly staffService: StaffService,
private readonly servicesService: ServicesService
) { }
@Get()
@ApiBearerAuth()
@UseGuards(JwtAuthGuard)
@ApiOperation({ summary: 'Возвращает онлайн-записи на услуги' })
@ApiOkResponse({ type: [OrderDto] })
@ApiQuery({ name: 'from', description: 'Дата начала периода дат посещения', required: false })
@ApiQuery({ name: 'to', description: 'Дата конца периода дат посещения', required: false })
@ApiQuery({ name: 'status', enum: RecordStatus, description: 'Статус заявки', required: false })
@ApiQuery({ name: 'search', description: 'Поисковый запрос по ФИО клиента', required: false })
public getOrders(
@Query('from') from: string,
@Query('to') to: string,
@Query('status') status: RecordStatus,
@Query('search') search: string
): OrderDto[] {
return this.ordersService
.getAll()
.map(order => this._getOrderDto(order))
.filter(order => Utils.compare(order.customer.fullName, search))
.filter(order => status ? order.status === status : true)
.filter(order => {
if (from && order.visitDate) {
return Utils.getDateTime(order.visitDate) >= Utils.getDateTime(from);
}
return true;
})
.filter(order => {
if (to && order.visitDate) {
return Utils.getDateTime(order.visitDate) <= Utils.getDateTime(to);
}
return true;
})
.sort((a, b) => {
return Utils.getDateTime(a.createdDate) < Utils.getDateTime(b.createdDate) ? 1 : -1
});
}
@Post()
@ApiOperation({ summary: 'Создаёт онлайн-запись на услугу' })
@ApiBadRequestResponse({ description: 'Имя или номер клиента не заданы' })
@ApiNotFoundResponse({ description: 'Мастер или услуга не найдены' })
@ApiCreatedResponse({ description: 'Запись создана', type: OrderDto })
public createOrder(@Body() createOrderDto: CreateOrderDto): OrderDto {
if (!createOrderDto.name) {
throw new HttpException('Необходимо задать имя клиента', HttpStatus.BAD_REQUEST);
}
if (!createOrderDto.phone) {
throw new HttpException('Необходимо задать номер телефона', HttpStatus.BAD_REQUEST);
}
let customer: ICustomerEntity;
let foundCustomers = this.customerService.query(item => item.phone === createOrderDto.phone);
if (!foundCustomers.length) {
customer = this.customerService.create({
firstName: createOrderDto.name,
phone: createOrderDto.phone
});
} else {
customer = foundCustomers[0];
}
if (createOrderDto.masterId && !this.staffService.get(createOrderDto.masterId)) {
throw new HttpException(`Мастер по id ${createOrderDto.masterId} не найден`, HttpStatus.NOT_FOUND);
}
if (createOrderDto.serviceId && !this.servicesService.get(createOrderDto.serviceId)) {
throw new HttpException(`Услуга по id ${createOrderDto.serviceId} не найдена`, HttpStatus.NOT_FOUND);
}
const createdOrder = this.ordersService.create({
createdDate: new Date(),
visitDate: createOrderDto.visitDate,
status: RecordStatus.Opened,
masterId: createOrderDto.masterId,
serviceId: createOrderDto.serviceId,
customerId: customer.id
});
return this._getOrderDto(createdOrder);
}
@Patch(':id')
@ApiBearerAuth()
@UseGuards(JwtAuthGuard)
@ApiOkResponse({ description: 'Данные заявки изменены' })
@ApiNotFoundResponse({ description: 'Заявка не найдена' })
updateOrder(@Param('id') id: number, @Body() updateOrderDto: UpdateOrderDto) {
const foundOrder = this.ordersService.get(+id);
if (!foundOrder) {
throw new HttpException('Заявка не найдена', HttpStatus.NOT_FOUND);
}
this.ordersService.update({ id, ...updateOrderDto });
return this._getOrderDto(this.ordersService.get(+id));
}
@Patch('close/:id')
@ApiBearerAuth()
@UseGuards(JwtAuthGuard)
@ApiQuery({ name: 'finishStatus', enum: RecordStatusFinish, description: 'Услуга оказана или нет' })
@ApiNotFoundResponse({ description: 'Заявка не найдена' })
closeOrder(@Param('id') id: number, @Query('finishStatus') finishStatus: RecordStatusFinish) {
const foundOrder = this.ordersService.get(+id);
if (!foundOrder) {
throw new HttpException('Заявка не найдена', HttpStatus.NOT_FOUND);
}
this.ordersService.update({ ...foundOrder, finishStatus, status: RecordStatus.Closed });
return new OrderDto(this.ordersService.get(+id));
}
@Delete(':id')
@ApiBearerAuth()
@UseGuards(JwtAuthGuard)
@ApiOkResponse({ description: 'Заявка удалена' })
@ApiNotFoundResponse({ description: 'Заявка не найдена' })
removeOrder(@Param('id') id: number) {
const foundOrder = this.ordersService.get(+id);
if (!foundOrder) {
throw new HttpException('Заявка не найдена', HttpStatus.NOT_FOUND);
}
this.ordersService.delete(+id);
return;
}
private _getOrderDto(order: IOrderEntity) {
const orderDto = new OrderDto(order);
if (order.masterId) {
orderDto.master = new StaffDto(this.staffService.get(order.masterId));
}
if (order.customerId) {
orderDto.customer = new CustomerDto(this.customerService.get(order.customerId));
}
if (order.serviceId) {
orderDto.service = new ServiceDto(this.servicesService.get(order.serviceId));
}
return orderDto;
}
}
|
#include <iostream>
#include <string>
#include <map>
using namespace std;
map<int, string> numbers = {
{1, "One"},
{2, "Two"}
};
int main(){
int n;
cin>>n;
if (numbers.find(n) != numbers.end()){
cout<<numbers[n]<<endl;
}
return 0;
} |
def printFibonacciSeries(number):
"""Prints the Fibonacci series up to a given number"""
a = 0
b = 1
print(a, end=" ")
print(b, end=" ")
for i in range(2, number):
c = a + b
a = b
b = c
print(c, end=" ") |
#!/bin/bash
netstat -plant | grep '80\|443' | grep -v LISTEN | wc -l
|
import { AbstractPage, register } from '../components/AbstractPage'
import { log } from '../log'
function shortDate(timestamp) {
let date = new Date(timestamp)
// return `${date.getFullYear()}-${date.getMonth()+1}-${date.getDate()+1}`
return `${date.toISOString()}`
}
register("LogsPage", class LogsPage extends AbstractPage {
constructor(id) {
super(id)
}
enter() {
let html = this.html
let items = []
for (let i = 0; i < log.num_items(); i++) {
items.push(log.item(i))
}
let theHtml = html`
<div class="container">
<h2 class="mb-16 wball">${T("Displaying the technical logs")}</h2>
<ul>
${items.map(
({timestamp, desc}, i) => html`<li class="bb-1 wball">${shortDate(timestamp)}-${desc}</li>`
)}
</ul>
</div>`;
this.render(theHtml)
}
})
|
import { Dispatch } from 'redux';
import Api from 'store/api';
import { setAlert } from '../alert/alert.actions';
import { ADD_ANSWER, AnswerActions, ANSWER_ERROR, GET_ANSWERS, IAnswerCreate } from './answers.types';
export const getAnswers = (id: string) => async (dispatch: Dispatch<AnswerActions>) => {
try {
const { data } = await Api.get(`/posts/answers/${id}`);
dispatch({
type: GET_ANSWERS,
payload: data,
});
} catch (error) {
dispatch({
type: ANSWER_ERROR,
payload: error,
});
}
};
export const addAnswer = (answer: IAnswerCreate) => async (dispatch: Dispatch<AnswerActions>) => {
try {
const { body, postId } = answer;
const { data } = await Api.post(`/posts/answers/${postId}`, { body });
dispatch({
type: ADD_ANSWER,
payload: data,
});
dispatch<any>(getAnswers(postId));
} catch (error) {
dispatch<any>(setAlert(error.response.data.error, 'error'));
dispatch({
type: ANSWER_ERROR,
payload: error,
});
}
};
|
#!/bin/bash
# This script parses in the command line parameters from runCust,
# maps them to the correct command line parameters for DispNet training script and launches that task
# The last line of runCust should be: bash $CONFIG_FILE --data-dir $DATA_DIR --log-dir $LOG_DIR
# Parse the command line parameters
# that runCust will give out
DATA_DIR=NONE
LOG_DIR=NONE
CONFIG_DIR=NONE
MODEL_DIR=NONE
# Parsing command line arguments:
while [[ $# > 0 ]]
do
key="$1"
case $key in
-h|--help)
echo "Usage: run_dispnet_training_philly.sh [run_options]"
echo "Options:"
echo " -d|--data-dir <path> - directory path to input data (default NONE)"
echo " -l|--log-dir <path> - directory path to save the log files (default NONE)"
echo " -p|--config-file-dir <path> - directory path to config file directory (default NONE)"
echo " -m|--model-dir <path> - directory path to output model file (default NONE)"
exit 1
;;
-d|--data-dir)
DATA_DIR="$2"
shift # pass argument
;;
-p|--config-file-dir)
CONFIG_DIR="$2"
shift # pass argument
;;
-m|--model-dir)
MODEL_DIR="$2"
shift # pass argument
;;
-l|--log-dir)
LOG_DIR="$2"
shift
;;
*)
echo Unkown option $key
;;
esac
shift # past argument or value
done
# Prints out the arguments that were passed into the script
echo "DATA_DIR=$DATA_DIR"
echo "LOG_DIR=$LOG_DIR"
echo "CONFIG_DIR=$CONFIG_DIR"
echo "MODEL_DIR=$MODEL_DIR"
# Run training on philly
# Add the root folder of the code to the PYTHONPATH
export PYTHONPATH=$PYTHONPATH:$CONFIG_DIR
# Run the actual job
python $CONFIG_DIR/anytime_models/examples/resnet-ann.py \
--data_dir=$DATA_DIR \
--log_dir=$LOG_DIR \
--model_dir=$MODEL_DIR \
--load=${MODEL_DIR}/checkpoint \
-n=17 -c=32 -s=1 --opt_at=7 --ds_name=cifar100 --batch_size=64 --nr_gpu=1 -f=2 --samloss=0
|
<reponame>chryscloud/video-edge-ai-proxy<filename>server/mqtt/errors.go<gh_stars>10-100
package mqtt
import "errors"
var (
ErrNoMQTTSettings = errors.New("no mqtt settings")
)
|
<gh_stars>0
const Event = require('../../base/Event');
module.exports = class extends Event {
async run(guild) {
if(!guild) return;
let defaultChannel = "";
msg.guild.channels.cache.forEach((channel) => {
if(channel.type === "text" && ["general-chat", "general", "public-chat"].includes(channel.name) && defaultChannel === "") {
if(channel.permissionsFor(msg.guild.me).has("SEND_MESSAGES")) {
defaultChannel = channel;
}
}
if(channel.type == "text" && defaultChannel == "") {
if(channel.permissionsFor(msg.guild.me).has("SEND_MESSAGES")) {
defaultChannel = channel;
}
}
});
let embed = new MessageEmbed()
.setColor("GREEN")
.setThumbnail(this.client.user.avatarURL())
.setDescription(`Thanks for inviting me!
The default prefix is ${this.client.config.prefix} and it is customizable
and the help command is ${this.client.config.prefix}help
If you need any help related bot join our support server.
`)
.setFooter("RED Bots")
defaultChannel.send("https://discord.gg/pVjMsBX", embed);
this.client.webhook.guild({
color: this.client.colors.success,
title: 'Guild Joined',
description: `Name: ${guild.name}\nGuild ID: ${guild.id}\nMember Count: ${guild.memberCount}\nTotal Guilds: ${this.client.guilds.cache.size}`
});
}
};
|
# Copyright (C) 2017 Verizon. All Rights Reserved.
#
# File: _envelopes.py
# Author: <NAME>, <NAME>
# Date: 2017-02-17
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import uuid
# Third Party
from lxml import etree
# Module
from dractor.exceptions import WSMANSOAPEnvelopeError
from dractor.types import CIM_Reference
from ._namespace import NS
LOGGER = logging.getLogger(__name__)
class IdentifyEnvelope(object):
"""
This is a little bit of an odd one. It is not derived from our WSMANSoapEnvelope. I don't know
if it is worth adding a more fundamental SOAP Envelope class since it would only be for this,
which is just a template.
From DSP0266:
Note the absence of any WS-Addressing namespace, WS-Management namespace, or other versionspecific
concepts. This message is compatible only with the basic SOAP specification, and the presence
of the wsmid:Identify block in the s:Body is the embodiment of the request operation.
"""
ENVELOPE_TEMPLATE = """<?xml version="1.0"?>
<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsmid="http://schemas.dmtf.org/wbem/wsman/identity/1/wsmanidentity.xsd">
<s:Header></s:Header>
<s:Body>
<wsmid:Identify>
</wsmid:Identify>
</s:Body>
</s:Envelope>
"""
@property
def document(self):
""" Return xml document as string for consumption """
# Back and forth to make sure our Template is valid XML
root = etree.fromstring(self.ENVELOPE_TEMPLATE)
xml = etree.tostring(root, pretty_print=True, encoding='unicode')
return xml
class WSMANSOAPEnvelope(object):
"""
This is our basic message structure. It contains the necessary
Addressing and WSMAN namespaces that are fundamental to the basic
wsman calls.
I use XPath to update the required addressing tags rather than adding them
dynamically. I do this to make the basic required structure more clear, as
far as xml can be clear, in the template itself.
"""
ENVELOPE_TEMPLATE = """<?xml version="1.0"?>
<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope"
xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing"
xmlns:wsman="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd">
<s:Header>
<wsa:Action s:mustUnderstand="true"></wsa:Action>
<wsa:To s:mustUnderstand="true"></wsa:To>
<wsman:ResourceURI s:mustUnderstand="true"></wsman:ResourceURI>
<wsa:MessageID s:mustUnderstand="true"></wsa:MessageID>
<wsa:ReplyTo>
<wsa:Address>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:Address>
</wsa:ReplyTo>
</s:Header>
<s:Body>
</s:Body>
</s:Envelope>
"""
def __init__(self, to_url, action_ns_prefix, action, resource_uri, additional_namespaces=None):
self._nsmap = copy.deepcopy(NS)
if additional_namespaces:
self._nsmap.update(additional_namespaces)
# NS shortcuts
self._action_ns_prefix = action_ns_prefix
self._resource_uri = resource_uri
# Use a WSMAN SOAP Template to save on the boiler plate
self._root = etree.fromstring(self.ENVELOPE_TEMPLATE)
# Update the To
self._set_text("/s:Envelope/s:Header/wsa:To", to_url)
# Set the action
action_uri = "{}/{}".format(self._nsmap[action_ns_prefix], action)
self._set_text("/s:Envelope/s:Header/wsa:Action", action_uri)
# Set the Resource URI
self._set_text("/s:Envelope/s:Header/wsman:ResourceURI", resource_uri)
def _set_message_id(self):
""" Set a UUID for each message """
message_id = self._get_one_xpath("/s:Envelope/s:Header/wsa:MessageID")
message_id.text = "uuid:{}".format(str(uuid.uuid4()))
@property
def document(self):
""" Return as string for consumption """
self._set_message_id() # Make sure to generate a fresh UUID
xml = etree.tostring(self._root, pretty_print=True, encoding='unicode')
return xml
def _get_one_xpath(self, path):
""" Make sure our path exists and returns one element """
# Xpath returns an array of matches
element = self._root.xpath(path, namespaces=self._nsmap)
if not element:
raise WSMANSOAPEnvelopeError("Xpath '{}' did not return element".format(path))
if len(element) != 1:
raise WSMANSOAPEnvelopeError("Xpath '{}' returned multiple elements".format(path))
return element.pop()
def _set_text(self, path, text):
""" Set the text of the single element returned by path """
element = self._get_one_xpath(path)
element.text = text
def _add_wsman_selectors(self, selectors):
""" Add the selectors """
header = self._get_one_xpath("/s:Envelope/s:Header")
selectorset = etree.SubElement(header, "{{{wsman}}}SelectorSet".format(**self._nsmap))
for key, value in selectors.items():
selector = etree.SubElement(selectorset, "{{{wsman}}}Selector".format(**self._nsmap))
selector.set("{{{wsman}}}Name".format(**self._nsmap), key)
selector.text = value
class GetEnvelope(WSMANSOAPEnvelope):
""" SOAP Envelop for Get request """
ACTION_NS_PREFIX = "wstransfer" # Not used
ACTION = "Get"
def __init__(self, to_uri, dcim_class, selectors):
""" Setup an Enumeration for resource, such as DCIM_NICView """
resource_uri = "{}/{}".format(NS['dcim'], dcim_class)
super(GetEnvelope, self).__init__(to_uri,
self.ACTION_NS_PREFIX,
self.ACTION,
resource_uri)
self._add_wsman_selectors(selectors)
class EnumerationEnvelopes(WSMANSOAPEnvelope):
""" This forms the basis of our two Enumeration calls, Enumerate and Pull """
ACTION_NS_PREFIX = "wsen"
ACTION = None
def _setup_body(self):
pass
def __init__(self, to_uri, dcim_class):
""" Setup an Enumeration for dcim_class, such as DCIM_NICView """
resource_uri = "{}/{}".format(NS['dcim'], dcim_class)
super(EnumerationEnvelopes, self).__init__(to_uri,
self.ACTION_NS_PREFIX,
self.ACTION,
resource_uri)
self._setup_body()
class EnumerateEnvelope(EnumerationEnvelopes):
ACTION = "Enumerate"
def _setup_body(self):
""" Add the Enumeration element to the body """
body = self._get_one_xpath("/s:Envelope/s:Body")
etree.SubElement(body, "{{{wsen}}}Enumerate".format(**self._nsmap))
class PullEnvelope(EnumerationEnvelopes):
ACTION = "Pull"
def __init__(self, to_uri, dcim_class, context, max_elements=50):
self._context = context
self._max_elements = int(max_elements)
super(PullEnvelope, self).__init__(to_uri, dcim_class)
def _setup_body(self):
body = self._get_one_xpath("/s:Envelope/s:Body")
pull = etree.SubElement(body, "{{{wsen}}}Pull".format(**self._nsmap))
context_xml = etree.SubElement(pull, "{{{wsen}}}EnumerationContext".format(**self._nsmap))
context_xml.text = self._context
if self._max_elements > 1:
etree.SubElement(pull, "{{{wsman}}}OptimizeEnumeration".format(**self._nsmap))
max_elements = etree.SubElement(pull, "{{{wsman}}}MaxElements".format(**self._nsmap))
max_elements.text = str(self._max_elements)
class InvokeEnvelope(WSMANSOAPEnvelope):
def __init__(self, to_uri, dcim_class, method, selectors, properties):
resource_uri = "{}/{}".format(NS['dcim'], dcim_class)
action = "{}/{}".format(resource_uri, method)
additional_namespaces = {'dcim_class': resource_uri}
super(InvokeEnvelope, self).__init__(to_uri, 'dcim_class', method, resource_uri,
additional_namespaces)
self._add_wsman_selectors(selectors)
self._add_wsman_properties(method, properties)
def _add_wsman_properties(self, method, properties):
body = self._get_one_xpath("/s:Envelope/s:Body")
element_name = "{{{}}}{}_INPUT".format(self._resource_uri, method)
input_element = etree.SubElement(body, element_name)
for key, value in properties:
prop_name = "{{{}}}{}".format(self._resource_uri, key)
prop_element = etree.SubElement(input_element, prop_name)
if isinstance(value, str):
prop_element.text = value
elif isinstance(value, CIM_Reference):
# Construct a cim_reference
address = etree.SubElement(prop_element,
"{{{wsa}}}Address".format(**self._nsmap))
address.text = "http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous"
ref_params = etree.SubElement(prop_element,
"{{{wsa}}}ReferenceParameters".format(**self._nsmap))
resource_uri = etree.SubElement(ref_params,
"{{{wsman}}}ResourceURI".format(**self._nsmap))
resource_uri.text = value.resource_uri
selector_set = etree.SubElement(ref_params,
"{{{wsman}}}SelectorSet".format(**self._nsmap))
for name, value in value.selector_set.items():
selector = etree.SubElement(selector_set,
"{{{wsman}}}Selector".format(**self._nsmap))
selector.set("Name", name)
selector.text = value
else:
message = ("Unkown value type for {}: {} ({})").format(key, type(value), value)
raise WSMANSOAPEnvelopeError(message)
|
package elasta.module.impl;
import com.google.common.collect.ImmutableMap;
import elasta.commons.Utils;
import elasta.module.ExportScript;
import elasta.module.ModuleSystem;
import elasta.module.ModuleSystemBuilder;
import elasta.module.ModuleProvider;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* Created by sohan on 5/14/2017.
*/
final public class ModuleSystemBuilderImpl implements ModuleSystemBuilder {
final Map<ModuleSystemImpl.TypeAndNamePair, ModuleProvider> typeAndNamePairToModuleHolderMap = new LinkedHashMap<>();
final ModuleMapProxy moduleMapProxy = new ModuleMapProxy();
@Override
public <T> ModuleSystemBuilderImpl export(Class<T> moduleClass, ExportScript<T> exportScript) {
put(
ModuleSystemImpl.TypeAndNamePair.builder().type(moduleClass).build(),
exportScript
);
return this;
}
@Override
public <T> ModuleSystemBuilderImpl export(Class<T> moduleClass, String moduleName, ExportScript<T> exportScript) {
if (Utils.not(typeAndNamePairToModuleHolderMap.containsKey(
ModuleSystemImpl.TypeAndNamePair.builder().type(moduleClass).build()
))) {
put(
ModuleSystemImpl.TypeAndNamePair.builder()
.type(moduleClass)
.build(),
exportScript
);
}
put(
ModuleSystemImpl.TypeAndNamePair.builder()
.type(moduleClass)
.name(moduleName)
.build(),
exportScript
);
return this;
}
@Override
public ModuleSystem build() {
ImmutableMap<ModuleSystemImpl.TypeAndNamePair, ModuleProvider> map = ImmutableMap.copyOf(typeAndNamePairToModuleHolderMap);
return createImmutableModuleSystem(map);
}
private ModuleSystem createImmutableModuleSystem(ImmutableMap<ModuleSystemImpl.TypeAndNamePair, ModuleProvider> map) {
moduleMapProxy.setMap(map);
return new ModuleSystemImpl(
map
);
}
private <T> void put(ModuleSystemImpl.TypeAndNamePair typeAndNamePair, ExportScript<T> exportScript) {
typeAndNamePairToModuleHolderMap.put(
typeAndNamePair,
new ModuleProviderImpl(exportScript, moduleMapProxy)
);
}
}
|
def lcm(x, y):
"""
This function takes two integer numbers and returns the LCM
"""
if x > y:
greater = x
else:
greater = y
while(True):
if((greater % x == 0) and (greater % y == 0)):
lcm = greater
break
greater += 1
return lcm
x = 12
y = 15
print("The LCM is : ",lcm(x, y)) |
<reponame>TovaCohen/atlasmap
/*
Copyright (C) 2017 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import { Component, ViewChild, OnInit, OnDestroy } from '@angular/core';
import { ConfigModel } from '../models/config.model';
import { ErrorHandlerService } from '../services/error-handler.service';
import { DocumentManagementService } from '../services/document-management.service';
import { MappingManagementService } from '../services/mapping-management.service';
import { InitializationService } from '../services/initialization.service';
import { DataMapperAppComponent } from './data-mapper-app.component';
import { environment } from '../../../../environments/environment';
import { Examples } from '../models/examples';
@Component({
selector: 'data-mapper-example-host',
template: '<data-mapper #dataMapperComponent></data-mapper>',
providers: [MappingManagementService, ErrorHandlerService, DocumentManagementService],
})
export class DataMapperAppExampleHostComponent implements OnInit {
@ViewChild('dataMapperComponent')
dataMapperComponent: DataMapperAppComponent;
constructor(private initializationService: InitializationService) { }
ngOnInit() {
// initialize config information before initializing services
const c: ConfigModel = this.initializationService.cfg;
// store references to our services in our config model
// initialize base urls for our service calls
c.initCfg.baseJavaInspectionServiceUrl = environment.backendUrls.javaInspectionServiceUrl;
c.initCfg.baseXMLInspectionServiceUrl = environment.backendUrls.xmlInspectionServiceUrl;
c.initCfg.baseJSONInspectionServiceUrl = environment.backendUrls.jsonInspectionServiceUrl;
c.initCfg.baseMappingServiceUrl = environment.backendUrls.atlasServiceUrl;
if (environment.xsrf) {
c.initCfg.xsrfHeaderName = environment.xsrf.headerName;
c.initCfg.xsrfCookieName = environment.xsrf.cookieName;
c.initCfg.xsrfDefaultTokenValue = environment.xsrf.defaultTokenValue;
}
// initialize data for our class path service call
// note that quotes, newlines, and tabs are escaped
c.initCfg.pomPayload = Examples.pom;
c.initCfg.classPathFetchTimeoutInMilliseconds = 30000;
// if classPath is specified, maven call to resolve pom will be skipped
c.initCfg.classPath = null;
// enable mapping preview mode for standalone
c.initCfg.disableMappingPreviewMode = false;
// enable the navigation bar and import/export for stand-alone
c.initCfg.disableNavbar = false;
// initialize system
this.initializationService.initialize();
}
}
|
#!/bin/bash
# Program:
# This program install multiple web app
#
#Copyright (c) 2013 Intel Corporation.
#
#Redistribution and use in source and binary forms, with or without modification,
#are permitted provided that the following conditions are met:
#
#* Redistributions of works must retain the original copyright notice, this list
# of conditions and the following disclaimer.
#* Redistributions in binary form must reproduce the original copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#* Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
#ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
#OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
#NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
#EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author:
# IVAN CHEN <yufeix.chen@intel.com>
local_path=$(cd $(dirname $0);pwd)
source $local_path/Common
xpk_path=$local_path/../testapp
# install original xpk
app_id1=`pkgcmd -l | grep "diffid_same_version_tests" | awk '{print $4}'`
app_id1=`echo $app_id | awk '{print $1}'`
app_id1=${app_id:1:-1}
pkgcmd -u -n $app_id1 -q
pkgcmd -i -t xpk -p $xpk_path/diffid_same_version_tests.xpk -q
if [[ $? -eq 0 ]]; then
echo "Install Pass"
else
echo "Install Fail"
exit 1
fi
app_id1=`pkgcmd -l | grep "diffid_same_version_tests" | awk '{print $4}'`
app_id1=`echo $app_id1 | awk '{print $1}'`
app_id1=${app_id1:1:-1}
pkgcmd -i -t xpk -p $xpk_path/update_original_versionOne_tests.xpk -q
if [[ $? -eq 0 ]]; then
echo "Install Pass"
else
echo "Install Fail"
exit 1
fi
app_id2=`pkgcmd -l | grep "update_original_versionOne_tests" | awk '{print $4}'`
app_id2=`echo $app_id2 | awk '{print $1}'`
app_id2=${app_id2:1:-1}
pkgcmd -u -n $app_id1 -q
if [[ $? -eq 0 ]]; then
echo "Uninstall Pass"
else
echo "Uninstall Fail"
exit 1
fi
pkgcmd -u -n $app_id2 -q
if [[ $? -eq 0 ]]; then
echo "Uninstall Pass"
exit 0
else
echo "Uninstall Fail"
exit 1
fi
|
def add_one(data):
for i in range(len(data)):
data[i] += 1 |
#!/bin/bash
# Side note by adding the -y you are auto answering yes
# when program would usually ask if it should install or not
# General set up for linux 2 vm ---------------------------------
# Elevate to admin privileges to run everything as root user
sudo su
# update the yum
yum update -y
# install python 3.7
yum install -y python37
# install git to clone repos
yum install git -y
# Set up required to run pdffigures2 -----------------------------------------
# install java 8
yum install java-1.8.0-openjdk-devel -y
# install scala 2.13.0
wget http://downloads.typesafe.com/scala/2.13.0/scala-2.13.0.tgz
tar -xzvf scala-2.13.0.tgz
rm -rf scala-2.13.0.tgz
export SCALA_HOME=/home/ec2-user/scala-2.13.0
export PATH=$PATH:/home/ec2-user/scala-2.13.0/bin
# install sbt
curl https://bintray.com/sbt/rpm/rpm | sudo tee /etc/yum.repos.d/bintray-sbt-rpm.repo
sudo yum install sbt -y
# Set up needed for deepfigures ---------------------------------------------
# install pip
curl -O https://bootstrap.pypa.io/get-pip.py
python3.7 get-pip.py --user
# install click module that is needed for manage.py in deepfigures
python3.7 -m pip install click
python3.7 -m pip install scikit-image
pip3 install scikit-image
pip3 install click
# install docker -- vm that runs nueral network to extract figures
amazon-linux-extras install docker -y
service docker start
# may need to reboot instance to have this enabled
usermod -a -G docker ec2-user
# Getting deepfigures downloaded and ready to run -------------------------
# make a git directory and enter it
mkdir git # the name is subject to change......
cd git
# clone edited deepfigures into git
git clone https://github.com/Julen-Lujambio/deepfigures_open.git
cd deepfigures_open
# downloads the weights for nueral network
curl -O https://s3-us-west-2.amazonaws.com/ai2-s2-research-public/deepfigures/weights.tar.gz
tar -xvf weights.tar.gz
# compiling the .jar file for deepfigures
git clone https://github.com/allenai/pdffigures2
cd pdffigures2
sbt assembly
mv target/scala-2.12/pdffigures2-assembly-0.1.0.jar ../bin
cd ..
rm -rf pdffigures2
# Install modules for web scrapper
pip3 install pandas
pip3 install requests
pip3 install bs4
# SET UP is complete now and deepfigure should run -----------------------------
cd ../../
mv git home/ec2-user
cd home/ec2-user
touch DONE.txt |
<filename>Client/license-checker/src/main/java/ir/doorbash/licensechecker/util/NetUtil.java<gh_stars>1-10
package ir.doorbash.licensechecker.util;
import android.content.Context;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
/**
* Created by <NAME> on 3/16/16.
*/
public class NetUtil {
public static boolean isDeviceConnectedToInternet(Context c) {
try {
ConnectivityManager cm = (ConnectivityManager) c.getSystemService(Context.CONNECTIVITY_SERVICE);
NetworkInfo wifiNetwork = cm.getNetworkInfo(ConnectivityManager.TYPE_WIFI);
if (wifiNetwork != null && wifiNetwork.isConnected()) {
return true;
}
NetworkInfo mobileNetwork = cm.getNetworkInfo(ConnectivityManager.TYPE_MOBILE);
if (mobileNetwork != null && mobileNetwork.isConnected()) {
return true;
}
return false;
} catch (Throwable e) {
}
return true;
}
}
|
package io.github.rcarlosdasilva.weixin.api.weixin;
import java.util.Date;
import io.github.rcarlosdasilva.weixin.model.response.statistics.StatisticsGetInterfaceSummaryResponse;
import io.github.rcarlosdasilva.weixin.model.response.statistics.StatisticsGetMessageDistributedResponse;
import io.github.rcarlosdasilva.weixin.model.response.statistics.StatisticsGetMessageSummaryResponse;
import io.github.rcarlosdasilva.weixin.model.response.statistics.StatisticsGetNewsSummaryResponse;
import io.github.rcarlosdasilva.weixin.model.response.statistics.StatisticsGetUserSummaryResponse;
/**
* 公众号数据统计相关API
*
* @author <a href="mailto:<EMAIL>"><NAME></a>
*/
public interface StatisticsApi {
/**
* 获取用户增减数据,最大时间跨度7.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetUserSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141082&token=&lang=zh_CN"
* >用户分析数据接口</a>
*/
StatisticsGetUserSummaryResponse getUserSummary(Date begin, Date end);
/**
* 获取累计用户数据,最大时间跨度7.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetUserSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141082&token=&lang=zh_CN"
* >用户分析数据接口</a>
*/
StatisticsGetUserSummaryResponse getUserCumulate(Date begin, Date end);
/**
* 获取图文群发每日数据,最大时间跨度1.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141084&token=&lang=zh_CN"
* >图文分析数据接口</a>
*/
StatisticsGetNewsSummaryResponse getNewsSummary(Date begin, Date end);
/**
* 获取图文群发总数据,最大时间跨度1.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141084&token=&lang=zh_CN"
* >图文分析数据接口</a>
*/
StatisticsGetNewsSummaryResponse getNewsTotal(Date begin, Date end);
/**
* 获取图文统计数据,最大时间跨度3.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141084&token=&lang=zh_CN"
* >图文分析数据接口</a>
*/
StatisticsGetNewsSummaryResponse getNewsRead(Date begin, Date end);
/**
* 获取图文统计分时数据,最大时间跨度1.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141084&token=&lang=zh_CN"
* >图文分析数据接口</a>
*/
StatisticsGetNewsSummaryResponse getNewsReadHour(Date begin, Date end);
/**
* 获取图文分享转发数据,最大时间跨度7.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141084&token=&lang=zh_CN"
* >图文分析数据接口</a>
*/
StatisticsGetNewsSummaryResponse getNewsShare(Date begin, Date end);
/**
* 获取图文分享转发分时数据,最大时间跨度1.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141084&token=&lang=zh_CN"
* >图文分析数据接口</a>
*/
StatisticsGetNewsSummaryResponse getNewsShareHour(Date begin, Date end);
/**
* 获取消息发送概况数据,最大时间跨度7.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=<PASSWORD>1085&token=&lang=zh_CN"
* >消息分析数据接口</a>
*/
StatisticsGetMessageSummaryResponse getMessageSummary(Date begin, Date end);
/**
* 获取消息分送分时数据,最大时间跨度1.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141085&token=&lang=zh_CN"
* >消息分析数据接口</a>
*/
StatisticsGetMessageSummaryResponse getMessageSummaryHour(Date begin, Date end);
/**
* 获取消息发送周数据,最大时间跨度30.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141085&token=&lang=zh_CN"
* >消息分析数据接口</a>
*/
StatisticsGetMessageSummaryResponse getMessageSummaryWeek(Date begin, Date end);
/**
* 获取消息发送月数据,最大时间跨度30.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141085&token=&lang=zh_CN"
* >消息分析数据接口</a>
*/
StatisticsGetMessageSummaryResponse getMessageSummaryMonth(Date begin, Date end);
/**
* 获取消息发送分布数据,最大时间跨度15.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141085&token=&lang=zh_CN"
* >消息分析数据接口</a>
*/
StatisticsGetMessageDistributedResponse getMessageDistributed(Date begin, Date end);
/**
* 获取消息发送分布周数据,最大时间跨度30.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141085&token=&lang=zh_CN"
* >消息分析数据接口</a>
*/
StatisticsGetMessageDistributedResponse getMessageDistributedWeek(Date begin, Date end);
/**
* 获取消息发送分布月数据,最大时间跨度30.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141085&token=&lang=zh_CN"
* >消息分析数据接口</a>
*/
StatisticsGetMessageDistributedResponse getMessageDistributedMonth(Date begin, Date end);
/**
* 获取接口分析数据,最大时间跨度30.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141086&token=&lang=zh_CN"
* >接口分析数据接口</a>
*/
StatisticsGetInterfaceSummaryResponse getInterfaceSummary(Date begin, Date end);
/**
* 获取接口分析分时数据,最大时间跨度1.
*
* @param begin
* 获取数据的起始日期,begin_date和end_date的差值需小于“最大时间跨度”
* (比如最大时间跨度为1时,begin_date和end_date的差值只能为0,才能小于1),否则会报错
* @param end
* 获取数据的结束日期,end_date允许设置的最大值为昨日
* @return see {@link StatisticsGetNewsSummaryResponse}
* @see <a href=
* "https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421141086&token=&lang=zh_CN"
* >接口分析数据接口</a>
*/
StatisticsGetInterfaceSummaryResponse getInterfaceSummaryHour(Date begin, Date end);
}
|
<filename>curtow/string_test.go
package curtow
import (
"errors"
"math"
"strings"
"testing"
"github.com/gammban/numtow/lang/en"
"github.com/gammban/numtow/curtow/cur"
"github.com/gammban/numtow/internal/ds"
"github.com/gammban/numtow/lang/kz"
"github.com/gammban/numtow/lang/ru"
"github.com/gammban/numtow/lang"
)
//nolint:gochecknoglobals
var testCases = []struct {
giveAmountString string
giveAmountFloat64 float64
giveLang lang.Lang
giveOpts []interface{}
wantAmount string
wantErr error
}{
{
giveAmountString: "0", giveAmountFloat64: 0, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.USD), ru.WithCurConvMU(false), ru.WithCurIgnoreMU(false)},
wantAmount: "ноль долларов США 00 центов",
},
{
giveAmountString: "0.22", giveAmountFloat64: 0.22, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.USD), ru.WithCurConvMU(false), ru.WithCurIgnoreMU(false)},
wantAmount: "Ноль долларов США 22 цента",
},
{
giveAmountString: "0.22", giveAmountFloat64: 0.22, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.USD), ru.WithCurConvMU(false), ru.WithCurIgnoreMU(true)},
wantAmount: "Ноль долларов США",
},
{
giveAmountString: "0.22", giveAmountFloat64: 0.22, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.USD), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Ноль долларов США двадцать два цента",
},
{
giveAmountString: "0.223", giveAmountFloat64: 0.223, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.USD), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Ноль долларов США двадцать два цента",
},
{
giveAmountString: "0.229", giveAmountFloat64: 0.229, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.USD), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Ноль долларов США двадцать два цента",
},
{
giveAmountString: "1.1", giveAmountFloat64: 1.1, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.USD), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Один доллар США десять центов",
},
{
giveAmountString: "1.10", giveAmountFloat64: 1.10, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.USD), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Один доллар США десять центов",
},
{
giveAmountString: "bad", giveAmountFloat64: math.NaN(), giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.USD), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "", wantErr: ds.ErrParse,
},
{
giveAmountString: "-35.42", giveAmountFloat64: -35.42, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.USD), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Минус тридцать пять долларов США сорок два цента",
},
{
giveAmountString: "-35.42", giveAmountFloat64: -35.42, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.USD), ru.WithCurConvMU(false), ru.WithCurIgnoreMU(false)},
wantAmount: "Минус тридцать пять долларов США 42 цента",
},
{
giveAmountString: "1000000", giveAmountFloat64: 1000000, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.USD), ru.WithCurConvMU(false), ru.WithCurIgnoreMU(false)},
wantAmount: "Один миллион долларов США 00 центов",
},
{
giveAmountString: "1000000", giveAmountFloat64: 1000000, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.USD), ru.WithCurConvMU(false), ru.WithCurIgnoreMU(true)},
wantAmount: "Один миллион долларов США",
},
{
giveAmountString: "1999999.99", giveAmountFloat64: 1999999.99, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.USD), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Один миллион девятьсот девяносто девять тысяч девятьсот девяносто девять долларов США девяносто девять центов",
},
{
giveAmountString: "235.75", giveAmountFloat64: 235.75, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.EUR), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Двести тридцать пять евро семьдесят пять евроцентов",
},
{
giveAmountString: "-181.02", giveAmountFloat64: -181.02, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.EUR), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Минус сто восемьдесят один евро два евроцента",
},
{
giveAmountString: "4541782354.87", giveAmountFloat64: 4541782354.87, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.EUR), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Четыре миллиарда пятьсот сорок один миллион семьсот восемьдесят две тысячи триста пятьдесят четыре евро восемьдесят семь евроцентов",
},
{
giveAmountString: "450000", giveAmountFloat64: 450000, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.EUR), ru.WithCurConvMU(false), ru.WithCurIgnoreMU(false)},
wantAmount: "Четыреста пятьдесят тысяч евро 00 евроцентов",
},
{
giveAmountString: "1.02", giveAmountFloat64: 1.02, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Один рубль две копейки",
},
{
giveAmountString: "2.01", giveAmountFloat64: 2.01, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Два рубля одна копейка",
},
{
giveAmountString: "3.04", giveAmountFloat64: 3.04, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Три рубля четыре копейки",
},
{
giveAmountString: "4.5", giveAmountFloat64: 4.5, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Четыре рубля пятьдесят копеек",
},
{
giveAmountString: "5.69", giveAmountFloat64: 5.69, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Пять рублей шестьдесят девять копеек",
},
{
giveAmountString: "-10.11", giveAmountFloat64: -10.11, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Минус десять рублей одиннадцать копеек",
},
{
giveAmountString: "12.45", giveAmountFloat64: 12.45, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(true)},
wantAmount: "Двенадцать рублей",
},
{
giveAmountString: "13.14", giveAmountFloat64: 13.14, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Тринадцать рублей четырнадцать копеек",
},
{
giveAmountString: "315.16", giveAmountFloat64: 315.16, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Триста пятнадцать рублей шестнадцать копеек",
},
{
giveAmountString: "5617.18", giveAmountFloat64: 5617.18, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Пять тысяч шестьсот семнадцать рублей восемнадцать копеек",
},
{
giveAmountString: "100", giveAmountFloat64: 100, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(false), ru.WithCurIgnoreMU(false)},
wantAmount: "Сто рублей 00 копеек",
},
{
giveAmountString: "100", giveAmountFloat64: 100, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Сто рублей ноль копеек",
},
{
giveAmountString: "10.00000", giveAmountFloat64: 10.00000, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Десять рублей ноль копеек",
},
{
giveAmountString: "1000", giveAmountFloat64: 1000, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(true)},
wantAmount: "Одна тысяча рублей",
},
{
giveAmountString: "10000", giveAmountFloat64: 10000, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(false), ru.WithCurIgnoreMU(false)},
wantAmount: "Десять тысяч рублей 00 копеек",
},
{
giveAmountString: "100000", giveAmountFloat64: 100000, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Сто тысяч рублей ноль копеек",
},
{
giveAmountString: "10000000", giveAmountFloat64: 10000000, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Десять миллионов рублей ноль копеек",
},
{
giveAmountString: "100000000", giveAmountFloat64: 100000000, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Сто миллионов рублей ноль копеек",
},
{
giveAmountString: "1000000000", giveAmountFloat64: 1000000000, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Один миллиард рублей ноль копеек",
},
{
giveAmountString: "2000000000", giveAmountFloat64: 2000000000, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.RUB), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Два миллиарда рублей ноль копеек",
},
{
giveAmountString: "1.01", giveAmountFloat64: 1.01, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.KZT), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Одна тенге одна тиын",
},
{
giveAmountString: "3.02", giveAmountFloat64: 3.02, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.KZT), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Три тенге две тиын",
},
{
giveAmountString: "2.45", giveAmountFloat64: 2.45, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.KZT), ru.WithCurConvMU(true), ru.WithCurIgnoreMU(false)},
wantAmount: "Две тенге сорок пять тиын",
},
{
giveAmountString: "125545215.45", giveAmountFloat64: 125545215.45, giveLang: lang.RU, giveOpts: []interface{}{ru.WithCur(cur.KZT), ru.WithCurConvMU(false), ru.WithCurIgnoreMU(false)},
wantAmount: "Сто двадцать пять миллионов пятьсот сорок пять тысяч двести пятнадцать тенге 45 тиын",
},
{
giveAmountString: "187.51", giveAmountFloat64: 187.51, giveLang: lang.KZ, giveOpts: []interface{}{kz.WithCur(cur.KZT), kz.WithCurConvMU(true), kz.WithCurIgnoreMU(false)},
wantAmount: "жүз сексен жеті теңге елу бір тиын",
},
{
giveAmountString: "-1.05", giveAmountFloat64: -1.05, giveLang: lang.KZ, giveOpts: []interface{}{kz.WithCur(cur.KZT), kz.WithCurConvMU(true), kz.WithCurIgnoreMU(false)},
wantAmount: "минус бір теңге бес тиын",
},
{
giveAmountString: "92.5059", giveAmountFloat64: 92.5059, giveLang: lang.KZ, giveOpts: []interface{}{kz.WithCur(cur.KZT), kz.WithCurConvMU(true), kz.WithCurIgnoreMU(false)},
wantAmount: "тоқсан екі теңге елу тиын",
},
{
giveAmountString: "92.5059", giveAmountFloat64: 92.5059, giveLang: lang.KZ, giveOpts: []interface{}{kz.WithCur(cur.KZT), kz.WithCurConvMU(true), kz.WithCurIgnoreMU(false)},
wantAmount: "тоқсан екі теңге елу тиын",
},
{
giveAmountString: "bad", giveAmountFloat64: math.Inf(0), giveLang: lang.KZ, giveOpts: []interface{}{kz.WithCur(cur.KZT), kz.WithCurConvMU(true), kz.WithCurIgnoreMU(false)},
wantAmount: "", wantErr: ds.ErrParse,
},
{
giveAmountString: "100", giveAmountFloat64: 100, giveLang: lang.KZ, giveOpts: []interface{}{kz.WithCur(cur.KZT), kz.WithCurConvMU(true), kz.WithCurIgnoreMU(false)},
wantAmount: "жүз теңге нөл тиын",
},
{
giveAmountString: "1000", giveAmountFloat64: 1000, giveLang: lang.KZ, giveOpts: []interface{}{kz.WithCur(cur.KZT), kz.WithCurConvMU(true), kz.WithCurIgnoreMU(true)},
wantAmount: "бір мың теңге",
},
{
giveAmountString: "10000", giveAmountFloat64: 10000, giveLang: lang.KZ, giveOpts: []interface{}{kz.WithCur(cur.KZT), kz.WithCurConvMU(true), kz.WithCurIgnoreMU(true)},
wantAmount: "он мың теңге",
},
{
giveAmountString: "10000", giveAmountFloat64: 10000, giveLang: lang.KZ, giveOpts: []interface{}{kz.WithCur(cur.USD), kz.WithCurConvMU(true), kz.WithCurIgnoreMU(true)},
wantAmount: "он мың АҚШ доллары",
},
{
giveAmountString: "5956.5", giveAmountFloat64: 5956.5, giveLang: lang.KZ, giveOpts: []interface{}{kz.WithCur(cur.USD), kz.WithCurConvMU(true), kz.WithCurIgnoreMU(false)},
wantAmount: "бес мың тоғыз жүз елу алты АҚШ доллары елу цент",
},
{
giveAmountString: "964913.39", giveAmountFloat64: 964913.39, giveLang: lang.KZ, giveOpts: []interface{}{kz.WithCur(cur.EUR), kz.WithCurConvMU(true), kz.WithCurIgnoreMU(false)},
wantAmount: "тоғыз жүз алпыс төрт мың тоғыз жүз он үш еуро отыз тоғыз евроцент",
},
{
giveAmountString: "4.39", giveAmountFloat64: 4.39, giveLang: lang.KZ, giveOpts: []interface{}{kz.WithCur(cur.RUB), kz.WithCurConvMU(true), kz.WithCurIgnoreMU(false)},
wantAmount: "төрт рубль отыз тоғыз тиын",
},
{
giveAmountString: "4.39", giveAmountFloat64: 4.39, giveLang: lang.Unknown, giveOpts: []interface{}{kz.WithCur(cur.RUB), kz.WithCurConvMU(true), kz.WithCurIgnoreMU(false)},
wantAmount: "", wantErr: lang.ErrBadLanguage,
},
{
giveAmountString: "4.39", giveAmountFloat64: 4.39, giveLang: lang.Lang(10), giveOpts: []interface{}{kz.WithCur(cur.RUB), kz.WithCurConvMU(true), kz.WithCurIgnoreMU(false)},
wantAmount: "", wantErr: lang.ErrBadLanguage,
},
{
giveAmountString: "4", giveAmountFloat64: 4, giveLang: lang.EN, giveOpts: []interface{}{en.WithCur(cur.USD)},
wantAmount: "four dollars and 00 cents",
},
}
func TestString(t *testing.T) {
for _, v := range testCases {
gotAmount, gotErr := String(v.giveAmountString, v.giveLang, v.giveOpts...)
if !errors.Is(gotErr, v.wantErr) {
t.Errorf("%s: \nexp: '%s' \ngot: '%s'", v.giveAmountString, v.wantErr, gotErr)
return
}
if !strings.EqualFold(gotAmount, v.wantAmount) {
t.Errorf("%s: \nexp: '%s' \ngot: '%s'", v.giveAmountString, v.wantAmount, gotAmount)
}
}
}
func TestMustString(t *testing.T) {
for _, v := range testCases {
gotAmount := MustString(v.giveAmountString, v.giveLang, v.giveOpts...)
if v.wantErr != nil && gotAmount != "" {
t.Errorf("expected empty string got %s", gotAmount)
return
}
if !strings.EqualFold(gotAmount, v.wantAmount) {
t.Errorf("%s: \nexp: '%s' \ngot: '%s'", v.giveAmountString, v.wantAmount, gotAmount)
}
}
}
|
<filename>TIOJ/tioj 1603.cpp
// By KRT girl xiplus
#include <bits/stdc++.h>
#define endl '\n'
using namespace std;
int n;
struct T{
int l,r;
long long minn,maxn;
}v[400000];
void init(int p,int l,int r){
v[p].l=l;
v[p].r=r;
v[p].minn=1e18;
v[p].maxn=0;
if(l==r) return ;
int mid=(l+r)/2;
init(p*2,l,mid);
init(p*2+1,mid+1,r);
}
void add(int p,int i,long long val){
v[p].minn=min(v[p].minn,val);
v[p].maxn=max(v[p].maxn,val);
if(v[p].l==v[p].r) return ;
int mid=(v[p].l+v[p].r)/2;
if(i<=mid){
add(p*2,i,val);
}else {
add(p*2+1,i,val);
}
}
long long ansmin,ansmax;
void ans(int p,int l,int r){
if(v[p].l==l&&v[p].r==r){
ansmin=min(ansmin,v[p].minn);
ansmax=max(ansmax,v[p].maxn);
return ;
}
int mid=(v[p].l+v[p].r)/2;
if(l<=mid){
ans(p*2,l,min(r,mid));
}
if(r>mid){
ans(p*2+1,max(mid+1,l),r);
}
}
int main(){
// ios::sync_with_stdio(false);
// cin.tie(0);
int m;
cin>>n>>m;
init(1,1,n);
long long t;
for(int q=1;q<=n;q++){
cin>>t;
add(1,q,t);
}
int a,b;
while(m--){
cin>>a>>b;
ansmin=1e18;
ansmax=0;
ans(1,a,b);
cout<<ansmax-ansmin<<endl;
}
}
|
import React, { Component } from 'react';
import { MdClose } from 'react-icons/lib/md';
import { Query, Mutation } from "react-apollo";
import { BrowserRouter as Router, Route, Link } from "react-router-dom";
import { Form, Checkbox } from 'semantic-ui-react';
export default class extends Component {
constructor(props) {
super(props);
}
render() {
return (
<div className="modal">
modal here
</div>
)
}
} |
export default class Vector {
constructor(i, j) {
this._i = i;
this._j = j;
}
get i() {
return this._i;
}
get j() {
return this._j;
}
set i(value) {
this._i = value;
}
set j(value) {
this._j = value;
}
}
//# sourceMappingURL=vector.js.map |
<reponame>duongnguyensv/tiktok-videos
package main
import (
"fmt"
"os"
"strings"
checkErr "github.com/pikami/tiktok-dl/utils/checkErr"
fileio "github.com/pikami/tiktok-dl/utils/fileio"
)
type resource struct {
Package string
FileName string
Values map[string]string
}
func (r resource) generate() {
filename := fmt.Sprintf("%s/%s", outputDir, r.FileName)
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
checkErr.CheckErr(err)
defer f.Close()
// Header
header := fmt.Sprintf("// Package %s - This file is automatically generated.\n"+
"// Do not edit this file manually.\n"+
"// Check `/generator/resources.go` to change generated content\n"+
"package %s\n", r.Package, r.Package)
if _, err := f.WriteString(header); err != nil {
checkErr.CheckErr(err)
}
// Values
for key, value := range r.Values {
value = strings.ReplaceAll(value, "\n", "\\n")
value = strings.ReplaceAll(value, "\r", "\\r")
valueLine := fmt.Sprintf("\n//%s -\nvar %s = \"%s\"\n", key, key, value)
if _, err := f.WriteString(valueLine); err != nil {
checkErr.CheckErr(err)
}
}
}
func fileContentsOrDefault(file string) string {
defer func() {
if r := recover(); r != nil {
fmt.Printf("Failed to load file: %s\n", file)
}
}()
return safeString(fileio.ReadFileToString(file))
}
func safeString(str string) string {
escaped := strings.ReplaceAll(str, "\"", "\\\"")
return strings.ReplaceAll(escaped, "\n", "")
}
func main() {
for _, i := range res {
i.generate()
}
}
|
#!/bin/bash
set -e # everything must succeed.
temp_dir=./.temp
mkdir -p "$temp_dir"
./sciencebeam-download-and-convert.sh \
"https://cdn.elifesciences.org/articles/32671/elife-32671-v2.pdf" \
"$temp_dir/elife-32671-v2.xml"
|
echo "####################################################################"
echo "## VPC Test Scripts for CB-Spider IID Working Version "
echo "## VPC: Create -> List -> Get"
echo "####################################################################"
$CBSPIDER_ROOT/interface/spider vpc create --config $CBSPIDER_ROOT/interface/grpc_conf.yaml -i json -d \
'{
"ConnectionName":"'${CONN_CONFIG}'",
"ReqInfo": {
"Name": "vpc-01",
"IPv4_CIDR": "'${IPv4_CIDR}'",
"SubnetInfoList": [
{
"Name": "subnet-01",
"IPv4_CIDR": "'${IPv4_CIDR}'"
}
]
}
}'
$CBSPIDER_ROOT/interface/spider vpc list --config $CBSPIDER_ROOT/interface/grpc_conf.yaml --cname "${CONN_CONFIG}"
$CBSPIDER_ROOT/interface/spider vpc get --config $CBSPIDER_ROOT/interface/grpc_conf.yaml --cname "${CONN_CONFIG}" -n vpc-01
|
#!/bin/bash
#SBATCH --nodes=1
#SBATCH --ntasks=16
#SBATCH --mem=8gb
module load fastqc
module load trimgalore
module load samtools
module load bowtie2
module load bedtools
### Fastqc for untrimmed files
cd /gpfs/group/pipkin/hdiao/T_Cell_ChIP/0_fastq
fastq_untrimmed_1=/gpfs/group/pipkin/hdiao/T_Cell_ChIP/0_fastq/SRR8591889_1.fastq
fastq_untrimmed_2=/gpfs/group/pipkin/hdiao/T_Cell_ChIP/0_fastq/SRR8591889_2.fastq
fastqc $fastq_untrimmed_1
fastqc $fastq_untrimmed_2
### Trim Galore
trim_galore --paired --length 24 --stringency 3 $fastq_untrimmed_1 $fastq_untrimmed_2
trim_fastq_end1=/gpfs/group/pipkin/hdiao/T_Cell_ChIP/0_fastq/SRR8591889_1_val_1.fq
trim_fastq_end2=/gpfs/group/pipkin/hdiao/T_Cell_ChIP/0_fastq/SRR8591889_2_val_2.fq
### Fastqc for trimmed files
fastqc $trim_fastq_end1
fastqc $trim_fastq_end2
### Test if trimming is successful
filesize=$(stat -c%s $trim_fastq_end2)
if (( filesize < 10000 ))
then
trim_fastq_end1=$fastq_untrimmed_1
trim_fastq_end2=$fastq_untrimmed_2
fi
### Bowtie2 alignment
cd /gpfs/group/pipkin/hdiao/T_Cell_ChIP/1_bowtie2
bowtie2_index=/gpfs/group/pipkin/hdiao/ref_resources/mm/release102/GRCm38
sam_name=SRR8591889.sam
bowtie2 -p 16 -x $bowtie2_index -X 1000 --fr -1 $trim_fastq_end1 -2 $trim_fastq_end2 -S $sam_name
### Convert/sort/filter
bam_name=SRR8591889.bam
bam_name_srt=SRR8591889_srt.sam
sam_name_srt_dupr=SRR8591889_srt_dupr.sam
bam_name_srt_dupr=SRR8591889_srt_dupr.bam
flb_bam_name=SRR8591889_srt_dupr_flb.bam
blacklist_bed=/gpfs/group/pipkin/hdiao/ref_resources/mm/mm10_blacklisted_2016_nochr.bed
samtools view -bS $sam_name > $bam_name
samtools sort $bam_name -o $bam_name_srt
samtools rmdup -S $bam_name_srt $sam_name_srt_dupr
samtools view -bS $sam_name_srt_dupr > $bam_name_srt_dupr
bedtools intersect -abam $bam_name_srt_dupr -b $blacklist_bed -v > $flb_bam_name
### Remove intermediate files
filesize_preFlb=$(stat -c%s $bam_name_srt_dupr)
filesize=$(stat -c%s $flb_bam_name)
echo $filesize_preFlb $filesize >> SRR8591889_bamSizes_pre_post_flb.txt
if (( filesize > 10000 ))
then
rm $sam_name
rm $bam_name
rm $bam_name_srt
rm $sam_name_srt_dupr
rm $bam_name_srt_dupr
rm $trim_fastq_end1
rm $trim_fastq_end2
fi
|
#!/bin/bash
export CLAS_PARMS=/data/parms
export ROOTSYS=/usr/local/root
export MYSQLINC=/usr/include/mysql
export MYSQLLIB=/usr/lib64/mysql
export CLAS6=/usr/local/clas-software/build
export PATH=$CLAS6/bin:$PATH
export CERN=/usr/local/cernlib/x86_64_rhel6
export CERN_LEVEL=2005
export CERN_ROOT=$CERN/$CERN_LEVEL
export CVSCOSRC=$CERN/$CERN_LEVEL/src
export PATH=$CERN/$CERN_LEVEL/src:$PATH
export CERN_LIB=$CERN_ROOT/lib
export CERNLIB=$CERN_ROOT/lib
export CERN_BIN=$CERN_ROOT/bin
export CLAS_TOOL=/usr/local/clas-software/analysis/ClasTool
export PATH=$PATH:$CLAS_TOOL/bin/Linux
export LD_LIBRARY_PATH=$ROOTSYS/lib:$CLAS_TOOL/slib/Linux:$CLAS6/lib
source $ROOTSYS/bin/thisroot.sh
export CLAS_CALDB_DBNAME="calib"
export CLAS_CALDB_PASS=""
export CLAS_CALDB_RUNINDEX="RunIndex"
export RECSIS_RUNTIME="/recsis"
#export CLAS_CALDB_HOST=172.21.139.25
export CLAS_CALDB_HOST=172.21.139.204
#export CLAS_CALDB_HOST=71.68.137.124
export CLAS_CALDB_USER=root
echoerr() { printf "%s\n" "$*" >&1; printf "%s\n" "$*" >&2; }
#set -e
STARTTIME=$(date +%s)
echoerr "============ aao_rad ============"
aao_rad < aao_rad.inp
echoerr "============ aao_rad ============"
echoerr "============ gsim_bat ============"
#gsim_bat -nomcdata -ffread gsim.inp -mcin aao_rad.evt -bosout gsim.bos
gsim_bat -ffread gsim.inp -mcin aao_rad.evt -bosout gsim.bos
#cp gsim.bos gsim_no_gpp.bos
echoerr "============ gsim_bat ============"
echoerr "============ gpp ============"
gpp -ouncooked.bos -a2.35 -b2.35 -c2.35 -f0.97 -P0x1b -R23500 gsim.bos
#gpp -ouncooked.bos -R23500 gsim.bos
echoerr "============ gpp ============"
echoerr "============ user_ana ============"
#user_ana -t user_ana.tcl
#user_ana -t user_ana.tcl | grep -v HFITGA | grep -v HFITH | grep -v HFNT
echoerr "============ user_ana ============"
touch all.root
#h10maker -rpm cooked.bos all.root
ENDTIME=$(date +%s)
echo "Time for $HOSTNAME: $(($ENDTIME-$STARTTIME))"
|
<reponame>dzh/coca<filename>coca-co/src/test/java/coca/co/TestBasicCo.java
/**
*
*/
package coca.co;
import coca.co.ins.AckCoIns;
import coca.co.ins.InsConst;
import coca.co.util.IDUtil;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.UnsupportedEncodingException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
/**
* @author dzh
* @date Sep 2, 2017 8:39:11 PM
* @since 0.0.1
*/
public class TestBasicCo {
static Logger LOG = LoggerFactory.getLogger(TestBasicCo.class);
public void testCoId() throws UnsupportedEncodingException {
// BasicCo co = new BasicCo();
// System.out.println(co.id());
// System.out.println(co.id().length());
String a = "";
System.out.println(a.getBytes("utf-8").length);
System.out.println("a".getBytes().length);
System.out.println("z".getBytes().length);
System.out.println((byte) 127);
}
@Test
public void testIns() {
LOG.info(InsConst.JOIN.toString());
LOG.info(new AckCoIns(InsConst.ACK).id(IDUtil.uuid()).toString());
}
public void testMD5() throws NoSuchAlgorithmException {
MessageDigest md = MessageDigest.getInstance("MD5");
byte[] md5 = md.digest("coca".getBytes());
for (byte b : md5)
System.out.println(b);
System.out.println(md5.length);
}
public void testMagic() {
int magic = 0x43834361;// CoCa
System.out.println(magic);
System.out.println((char) (magic >>> 24));
magic = 0x63836361;// coca
System.out.println(magic);
System.out.println((char) (magic >>> 24));
}
}
|
#!/bin/sh
docker rm -f agent1
docker build --no-cache --rm=true -t agent .
docker run --rm=true --name agent1 -h agent_dev -it -p 8888:8888 -p 6060:6060 -e SERVICE_ID=agent1 agent
|
<reponame>andreapatri/cms_journal
var log = require('../utils/log')
module.exports = (encoding) => ({options, res, body, raw}) => {
raw = body
body = Buffer.from(body).toString(encoding)
log({string: {res, body}})
return {options, res, body, raw}
}
|
CREATE TABLE [dbo].[PhoneType] (
[ID] INT IDENTITY (1, 1) NOT NULL,
[Type] VARCHAR (50) NULL,
CONSTRAINT [PK_PhoneType] PRIMARY KEY CLUSTERED ([ID] ASC)
);
|
// Generated by script, don't edit it please.
import createSvgIcon from '../../createSvgIcon';
import CogSvg from '@rsuite/icon-font/lib/legacy/Cog';
const Cog = createSvgIcon({
as: CogSvg,
ariaLabel: 'cog',
category: 'legacy',
displayName: 'Cog'
});
export default Cog;
|
package no4
import (
"testing"
)
func TestIsPalindrome(t *testing.T) {
for i, cas := range []struct {
In string
Want bool
}{
{"madam", true},
{"madae", false},
{"abccba", true},
{"我是我", true},
{"我我是我我", true},
{"我我是妮妮", false},
} {
r := isPalindrome(cas.In)
if r != cas.Want {
t.Fatalf("No.%d: %v != %v\n", i+1, r, cas.Want)
}
}
}
func BenchmarkIsPalindrome(b *testing.B) {
for i := 0; i < b.N; i++ {
isPalindrome("madam")
}
}
|
#!/usr/bin/env bash
# Rails Tests
bundle exec bin/rails test
# License Finder
bundle exec license_finder
|
<gh_stars>0
package co.com.bancolombia.jms.sample.app.config;
import co.com.bancolombia.commons.jms.utils.ReactiveReplyRouter;
import co.com.bancolombia.jms.sample.domain.model.Result;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.ibm.mq.spring.boot.MQConnectionFactoryCustomizer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import javax.jms.JMSException;
import javax.jms.JMSRuntimeException;
import static com.ibm.msg.client.wmq.common.CommonConstants.WMQ_CLIENT_RECONNECT;
@Configuration
public class Config {
@Bean
public ObjectMapper objectMapper() {
return new ObjectMapper();
}
// Sample connection factory customization
@Bean
public MQConnectionFactoryCustomizer cfCustomizer() {
return mqConnectionFactory -> {
try {
mqConnectionFactory.setClientReconnectOptions(WMQ_CLIENT_RECONNECT);
} catch (JMSException e) {
throw new JMSRuntimeException(e.getErrorCode(), e.getMessage(), e);
}
};
}
@Bean
public ReactiveReplyRouter<Result> resultReactiveReplyRouter() {
return new ReactiveReplyRouter<>();
}
}
|
import { is, check } from './utils';
import proc from './proc';
import { emitter } from './channel';
export default function sagaMiddlewareFactory() {
var options = arguments.length <= 0 || arguments[0] === undefined ? {} : arguments[0];
var runSagaDynamically = void 0;
if (is.func(options)) {
if (process.env.NODE_ENV === 'production') {
throw new Error('Saga middleware no longer accept Generator functions. Use sagaMiddleware.run instead');
} else {
throw new Error('You passed a function to the Saga middleware. You are likely trying to start a Saga by directly passing it to the middleware. This is no longer possible starting from 0.10.0. To run a Saga, you must do it dynamically AFTER mounting the middleware into the store.\n Example:\n import createSagaMiddleware from \'redux-saga\'\n ... other imports\n\n const sagaMiddleware = createSagaMiddleware()\n const store = createStore(reducer, applyMiddleware(sagaMiddleware))\n sagaMiddleware.run(saga, ...args)\n ');
}
}
if (options.logger && !is.func(options.logger)) {
throw new Error('`options.logger` passed to the Saga middleware is not a function!');
}
if (options.onerror && !is.func(options.onerror)) {
throw new Error('`options.onerror` passed to the Saga middleware is not a function!');
}
function sagaMiddleware(_ref) {
var getState = _ref.getState;
var dispatch = _ref.dispatch;
runSagaDynamically = runSaga;
var sagaEmitter = emitter();
function runSaga(saga) {
for (var _len = arguments.length, args = Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {
args[_key - 1] = arguments[_key];
}
return proc(saga.apply(undefined, args), sagaEmitter.subscribe, dispatch, getState, options, 0, saga.name);
}
return function (next) {
return function (action) {
var result = next(action); // hit reducers
sagaEmitter.emit(action);
return result;
};
};
}
sagaMiddleware.run = function (saga) {
for (var _len2 = arguments.length, args = Array(_len2 > 1 ? _len2 - 1 : 0), _key2 = 1; _key2 < _len2; _key2++) {
args[_key2 - 1] = arguments[_key2];
}
check(runSagaDynamically, is.notUndef, 'Before running a Saga, you must mount the Saga middleware on the Store using applyMiddleware');
check(saga, is.func, 'sagaMiddleware.run(saga, ...args): saga argument must be a Generator function!');
return runSagaDynamically.apply(undefined, [saga].concat(args));
};
return sagaMiddleware;
} |
// 10164. 격자상의 경로
// 2019.05.22
// 다이나믹 프로그래밍
#include<iostream>
using namespace std;
int d[16][16]; // d[i][j] : 맨 처음 위치에서 i,j위치에 도달하는 경우의 수
int main()
{
int n, m, k;
cin >> n >> m >> k;
for (int i = 1; i <= n; i++)
{
d[i][1] = 1;
}
for (int i = 1; i <= m; i++)
{
d[1][i] = 1;
}
for (int i = 2; i <= n; i++)
{
for (int j = 2; j <= m; j++)
{
d[i][j] = d[i - 1][j] + d[i][j - 1];
}
}
if (k == 0)
{
cout << d[n][m] << endl;
}
else
{
// k의 행, 열을 구한다.
int x = k / m + (k%m > 0 ? 1 : 0); // 1부터 시작이므로 나누어 떨어지면 1을 더함
int y = k - (x - 1)*m;
cout << d[x][y] * d[n - x + 1][m - y + 1] << endl;
}
return 0;
}
|
#!/usr/bin/env bash
cd "$(dirname "${BASH_SOURCE}")"
rsync --exclude ".git/" --exclude ".DS_Store" --exclude "bootstrap.sh" \
--exclude "README.md" --exclude "LICENSE-MIT.txt" -avh --no-perms . ~
source ~/.bash_profile
|
use std::ops::DerefMut;
struct Node<T> {
value: T,
next: Option<Box<Node<T>>>,
}
impl<T> DerefMut for Node<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}
struct LinkedList<T> {
head: Option<Box<Node<T>>>,
}
impl<T> LinkedList<T> {
fn new() -> Self {
LinkedList { head: None }
}
fn push_front(&mut self, value: T) {
let new_node = Node {
value,
next: self.head.take(),
};
self.head = Some(Box::new(new_node));
}
fn pop_front(&mut self) -> Option<T> {
self.head.take().map(|mut node| {
self.head = node.next.take();
node.value
})
}
fn front(&self) -> Option<&T> {
self.head.as_ref().map(|node| &node.value)
}
fn front_mut(&mut self) -> Option<&mut T> {
self.head.as_mut().map(|node| &mut node.value)
}
} |
$curl = curl_init();
curl_setopt_array($curl, [
CURLOPT_URL => $url,
CURLOPT_RETURNTRANSFER => true,
CURLOPT_ENCODING => "",
CURLOPT_MAXREDIRS => 10,
CURLOPT_TIMEOUT => 30,
CURLOPT_HTTP_VERSION => CURL_HTTP_VERSION_1_1,
CURLOPT_CUSTOMREQUEST => "GET",
CURLOPT_HTTPHEADER => [
"accept: application/json",
"content-type: application/json"
],
]);
$response = curl_exec($curl);
$err = curl_error($curl);
curl_close($curl);
if ($err) {
echo "cURL Error #:" . $err;
} else {
echo $response;
} |
<reponame>kv-zuiwanyuan/kudu
/*
* Copyright (C) 2010-2012 The Async HBase Authors. All rights reserved.
* Portions copyright 2014 Cloudera, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* - Redistributions of source code must retain the aabove copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of the StumbleUpon nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package org.kududb.client;
import com.google.protobuf.ByteString;
import com.google.protobuf.ZeroCopyLiteralByteString;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.Channels;
import org.kududb.annotations.InterfaceAudience;
import org.kududb.rpc.RpcHeader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.sasl.RealmCallback;
import javax.security.sasl.RealmChoiceCallback;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslException;
import java.util.Map;
import java.util.TreeMap;
@InterfaceAudience.Private
public class SecureRpcHelper {
public static final Logger LOG = LoggerFactory.getLogger(TabletClient.class);
private final TabletClient client;
private SaslClient saslClient;
public static final String SASL_DEFAULT_REALM = "default";
public static final Map<String, String> SASL_PROPS =
new TreeMap<String, String>();
private static final int SASL_CALL_ID = -33;
private volatile boolean negoUnderway = true;
private boolean useWrap = false; // no QOP at the moment
public static final String USER_AND_PASSWORD = "<PASSWORD>";
public SecureRpcHelper(TabletClient client) {
this.client = client;
try {
saslClient = Sasl.createSaslClient(new String[]{"PLAIN"
}, null, null, SASL_DEFAULT_REALM,
SASL_PROPS, new SaslClientCallbackHandler(USER_AND_PASSWORD, USER_AND_PASSWORD));
} catch (SaslException e) {
throw new RuntimeException("Could not create the SASL client", e);
}
}
public void sendHello(Channel channel) {
sendNegotiateMessage(channel);
}
private void sendNegotiateMessage(Channel channel) {
RpcHeader.SaslMessagePB.Builder builder = RpcHeader.SaslMessagePB.newBuilder();
builder.setState(RpcHeader.SaslMessagePB.SaslState.NEGOTIATE);
sendSaslMessage(channel, builder.build());
}
private void sendSaslMessage(Channel channel, RpcHeader.SaslMessagePB msg) {
RpcHeader.RequestHeader.Builder builder = RpcHeader.RequestHeader.newBuilder();
builder.setCallId(SASL_CALL_ID);
RpcHeader.RequestHeader header = builder.build();
ChannelBuffer buffer = KuduRpc.toChannelBuffer(header, msg);
Channels.write(channel, buffer);
}
public ChannelBuffer handleResponse(ChannelBuffer buf, Channel chan) throws SaslException {
if (!saslClient.isComplete() || negoUnderway) {
RpcHeader.SaslMessagePB response = parseSaslMsgResponse(buf);
switch (response.getState()) {
case NEGOTIATE:
handleNegotiateResponse(chan, response);
break;
case CHALLENGE:
handleChallengeResponse(chan, response);
break;
case SUCCESS:
handleSuccessResponse(chan, response);
break;
default:
System.out.println("Wrong sasl state");
}
return null;
}
return unwrap(buf);
}
/**
* When QOP of auth-int or auth-conf is selected
* This is used to unwrap the contents from the passed
* buffer payload.
*/
public ChannelBuffer unwrap(ChannelBuffer payload) {
if(!useWrap) {
return payload;
}
int len = payload.readInt();
try {
payload =
ChannelBuffers.wrappedBuffer(saslClient.unwrap(payload.readBytes(len).array(), 0, len));
return payload;
} catch (SaslException e) {
throw new IllegalStateException("Failed to unwrap payload", e);
}
}
/**
* When QOP of auth-int or auth-conf is selected
* This is used to wrap the contents
* into the proper payload (ie encryption, signature, etc)
*/
public ChannelBuffer wrap(ChannelBuffer content) {
if(!useWrap) {
return content;
}
try {
byte[] payload = new byte[content.writerIndex()];
content.readBytes(payload);
byte[] wrapped = saslClient.wrap(payload, 0, payload.length);
ChannelBuffer ret = ChannelBuffers.wrappedBuffer(new byte[4 + wrapped.length]);
ret.clear();
ret.writeInt(wrapped.length);
ret.writeBytes(wrapped);
if (LOG.isDebugEnabled()) {
LOG.debug("Wrapped payload: "+Bytes.pretty(ret));
}
return ret;
} catch (SaslException e) {
throw new IllegalStateException("Failed to wrap payload", e);
}
}
private RpcHeader.SaslMessagePB parseSaslMsgResponse(ChannelBuffer buf) {
CallResponse response = new CallResponse(buf);
RpcHeader.ResponseHeader responseHeader = response.getHeader();
int id = responseHeader.getCallId();
if (id != SASL_CALL_ID) {
throw new IllegalStateException("Received a call that wasn't for SASL");
}
RpcHeader.SaslMessagePB.Builder saslBuilder = RpcHeader.SaslMessagePB.newBuilder();
KuduRpc.readProtobuf(response.getPBMessage(), saslBuilder);
return saslBuilder.build();
}
private void handleNegotiateResponse(Channel chan, RpcHeader.SaslMessagePB response) throws
SaslException {
RpcHeader.SaslMessagePB.SaslAuth negotiatedAuth = null;
for (RpcHeader.SaslMessagePB.SaslAuth auth : response.getAuthsList()) {
negotiatedAuth = auth;
}
byte[] saslToken = new byte[0];
if (saslClient.hasInitialResponse())
saslToken = saslClient.evaluateChallenge(saslToken);
RpcHeader.SaslMessagePB.Builder builder = RpcHeader.SaslMessagePB.newBuilder();
if (saslToken != null) {
builder.setToken(ZeroCopyLiteralByteString.wrap(saslToken));
}
builder.setState(RpcHeader.SaslMessagePB.SaslState.INITIATE);
builder.addAuths(negotiatedAuth);
sendSaslMessage(chan, builder.build());
}
private void handleChallengeResponse(Channel chan, RpcHeader.SaslMessagePB response) throws
SaslException {
ByteString bs = response.getToken();
byte[] saslToken = saslClient.evaluateChallenge(bs.toByteArray());
if (saslToken == null) {
throw new IllegalStateException("Not expecting an empty token");
}
RpcHeader.SaslMessagePB.Builder builder = RpcHeader.SaslMessagePB.newBuilder();
builder.setToken(ZeroCopyLiteralByteString.wrap(saslToken));
builder.setState(RpcHeader.SaslMessagePB.SaslState.RESPONSE);
sendSaslMessage(chan, builder.build());
}
private void handleSuccessResponse(Channel chan, RpcHeader.SaslMessagePB response) {
LOG.debug("nego finished");
negoUnderway = false;
client.sendContext(chan);
}
private static class SaslClientCallbackHandler implements CallbackHandler {
private final String userName;
private final char[] userPassword;
public SaslClientCallbackHandler(String user, String password) {
this.userName = user;
this.userPassword = <PASSWORD>();
}
public void handle(Callback[] callbacks)
throws UnsupportedCallbackException {
NameCallback nc = null;
PasswordCallback pc = null;
RealmCallback rc = null;
for (Callback callback : callbacks) {
if (callback instanceof RealmChoiceCallback) {
continue;
} else if (callback instanceof NameCallback) {
nc = (NameCallback) callback;
} else if (callback instanceof PasswordCallback) {
pc = (PasswordCallback) callback;
} else if (callback instanceof RealmCallback) {
rc = (RealmCallback) callback;
} else {
throw new UnsupportedCallbackException(callback,
"Unrecognized SASL client callback");
}
}
if (nc != null) {
nc.setName(userName);
}
if (pc != null) {
pc.setPassword(<PASSWORD>);
}
if (rc != null) {
rc.setText(rc.getDefaultText());
}
}
}
}
|
#!/bin/bash
## -----------------------------------------------------------------------------
## Linux Scripts.
## Run tests
##
## @package ojullien\bash\tests
## @license MIT <https://github.com/ojullien/bash-sys/blob/master/LICENSE>
## -----------------------------------------------------------------------------
#set -o errexit
set -o nounset
set -o pipefail
if [[ ${BASH_VERSINFO[0]} -lt 4 ]]; then
echo "At least Bash version 4 is needed!" >&2
exit 4
fi
## -----------------------------------------------------------------------------
## Shell scripts directory, eg: /root/work/Shell/tests
## -----------------------------------------------------------------------------
readonly m_DIR_REALPATH="$(realpath "$(dirname "$0")")"
## -----------------------------------------------------------------------------
## Load constants
## -----------------------------------------------------------------------------
# shellcheck source=/dev/null
. "${m_DIR_REALPATH}/framework/constant.sh"
## -----------------------------------------------------------------------------
## Includes sources
## -----------------------------------------------------------------------------
# shellcheck source=/dev/null
. "${m_DIR_SYS}/string.sh"
# shellcheck source=/dev/null
. "${m_DIR_SYS}/filesystem.sh"
# shellcheck source=/dev/null
. "${m_DIR_SYS}/option.sh"
# shellcheck source=/dev/null
. "${m_DIR_REALPATH}/framework/library.sh"
## -----------------------------------------------------------------------------
## Trace
## -----------------------------------------------------------------------------
Test::Constant::trace
## -----------------------------------------------------------------------------
## Start
## -----------------------------------------------------------------------------
declare aPackages=("config" "filesystem" "string" "package" "service" "mysql" "mariadb" "ssh")
declare aFiles=("config" "filesystem" "string" "package" "service" "db/mysql" "db/mariadb" "ssh")
declare -i iChoice=-1
while ((iChoice>=${#aPackages[*]})) || ((iChoice<0)); do
String::separateLine
declare -i iIndex=0
echo "Packages list:"
for iIndex in ${!aPackages[*]}
do
printf "%4d: %s\n" "$iIndex" "${aPackages[$iIndex]}"
done
echo -n "Enter your choice (0..$iIndex): "
read -r -N 1 iChoice
echo
done
String::separateLine
String::notice "Today is: $(date -R)"
String::notice "The PID for $(basename "$0") process is: $$"
Console::waitUser
# shellcheck source=/dev/null
. "${m_TEST_DIR_SYS}/${aFiles[$iChoice]}_test.sh"
Test::"${aPackages[$iChoice]}"::main
Console::waitUser
## -----------------------------------------------------------------------------
## END
## -----------------------------------------------------------------------------
String::notice "Now is: $(date -R)"
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/512+0+512-rare/7-model --tokenizer_name model-configs/1024-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/512+0+512-rare/7-512+0+512-pad-1 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function truncate_and_pad_first_half_full --eval_function last_element_eval |
<gh_stars>100-1000
module.exports = async ({ graphql, actions, reporter }) => {
const { createPage } = actions;
const result = await graphql(`
query ExamplesQuery {
allExample {
nodes {
id
example
example_id
scope {
name
path
default
}
exampleKnobs {
component
knobs {
defaultValue
options
name
type
}
}
exampleVariants {
name
code
}
}
}
}
`);
if (result.errors) {
reporter.panicOnBuild(`Error when querying examples.`);
return;
}
result.data.allExample.nodes.forEach(ctx => {
if (!ctx.example) return;
const { example_id, id } = ctx;
createPage({
path: `examples/${example_id.toLowerCase()}`,
component: `${process.cwd()}/src/templates/Sandbox/index.tsx`,
context: { ...ctx },
});
createPage({
path: `examples/${id}`,
component: `${process.cwd()}/src/templates/Sandbox/PureSandbox.tsx`,
context: { ...ctx },
});
});
};
|
#!/bin/bash
# Install the C compiler and supporting libraries first:
apt-get update -y
apt-get install build-essential libpcre3 libpcre3-dev zlib1g zlib1g-dev libssl-dev make -y
# Make sure the download link is up to date.
# get the latest link from here:
# https://nginx.org/en/download.html
wget https://nginx.org/download/nginx-1.21.5.tar.gz
# extract, cd, configure:
# the extract part should be updated acording to nginx version.
tar -zxvf nginx-1.21.5.tar.gz
cd nginx-1.21.5
./configure --sbin-path=/usr/bin/nginx --conf-path=/etc/nginx/nginx.conf --error-log-path=/var/log/nginx/error.log --http-log-path=/var/log/nginx/access.log --with-pcre --pid-path=/var/run/nginx.pid --with-http_ssl_module
# Compile and install Nginx
make
make install
# checkinstall can be used instead of make install, so later on nginx can be uninstalled more easily.
# add the nginx systemd file:
echo "
[Unit]
Description=The NGINX HTTP and reverse proxy server
After=syslog.target network-online.target remote-fs.target nss-lookup.target
Wants=network-online.target
[Service]
Type=forking
PIDFile=/var/run/nginx.pid
ExecStartPre=/usr/bin/nginx -t
ExecStart=/usr/bin/nginx
ExecReload=/usr/bin/nginx -s reload
ExecStop=/bin/kill -s QUIT $MAINPID
PrivateTmp=true
[Install]
WantedBy=multi-user.target
" >> /lib/systemd/system/nginx.service
# reload the systemctl
systemctl daemon-reload
# Start and enable the service(auto-start on boot):
systemctl --now enable nginx
# To verify the installation:
nginx -v
|
import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { SketchSelectDemoFilesComponent } from './sketch-select-demo-files.component';
import { NO_ERRORS_SCHEMA } from '@angular/core';
import { NgxsModule } from '@ngxs/store';
import { SketchService } from './sketch.service';
import { HttpClientModule } from '@angular/common/http';
import { UiState } from '@app/core/state';
import { CodeGenState } from '@app/core/state/page.state';
import { MatSnackBarModule } from '@angular/material/snack-bar';
describe('SketchSelectDemoFilesComponent', () => {
let component: SketchSelectDemoFilesComponent;
let fixture: ComponentFixture<SketchSelectDemoFilesComponent>;
beforeEach(async(() => {
TestBed.configureTestingModule({
schemas: [NO_ERRORS_SCHEMA],
imports: [
NgxsModule.forRoot([UiState, CodeGenState]),
MatSnackBarModule,
HttpClientModule
],
providers: [SketchService],
declarations: [SketchSelectDemoFilesComponent]
})
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(SketchSelectDemoFilesComponent);
component = fixture.componentInstance;
});
it('should be created', () => {
expect(component).toBeTruthy();
});
it('should confirm select file', () => {
fixture.detectChanges();
const selectedDemoFile = 'some_file';
component.confirmSelectedDemoFile();
component['changed'].subscribe((selectedDemoFileEvent) => {
expect(selectedDemoFileEvent).toBe(selectedDemoFile);
});
});
});
|
#pragma once
#include <std/types.h>
typedef std::byte key_t;
// When there is a keyboard event
void onKeyInterrupt();
|
def optimize(problem):
# Initialize best_solution
best_solution = None
best_cost = infinity
# Create a solution
solution = create_solution()
while not is_feasible(solution):
# Calculate cost of the solution
cost = calculate_cost(solution)
# Update best solution, if needed
if cost < best_cost:
best_solution = solution
best_cost = cost
# Create new solution and explore it
solution = create_new_solution()
# Return the best solution
return best_solution |
#!/bin/bash
RED='\033[0;31m' # Error color
YELLOW='\033[0;33m' # Warning color
NC='\033[0m' # No Color
set -u
DIR="."
GetPathToCurrentlyExecutingScript () {
# Absolute path of this script, e.g. /opt/corda/node/foo.sh
set +e
ABS_PATH=$(readlink -f "$0" 2>&1)
if [ "$?" -ne "0" ]; then
echo "Using macOS alternative to readlink -f command..."
# Unfortunate MacOs issue with readlink functionality, see https://github.com/corda/corda-kubernetes-deployment/issues/4
TARGET_FILE=$0
cd $(dirname $TARGET_FILE)
TARGET_FILE=$(basename $TARGET_FILE)
ITERATIONS=0
# Iterate down a (possible) chain of symlinks
while [ -L "$TARGET_FILE" ]
do
TARGET_FILE=$(readlink $TARGET_FILE)
cd $(dirname $TARGET_FILE)
TARGET_FILE=$(basename $TARGET_FILE)
ITERATIONS=$((ITERATIONS + 1))
if [ "$ITERATIONS" -gt 1000 ]; then
echo "symlink loop. Critical exit."
exit 1
fi
done
# Compute the canonicalized name by finding the physical path
# for the directory we're in and appending the target file.
PHYS_DIR=$(pwd -P)
ABS_PATH=$PHYS_DIR/$TARGET_FILE
fi
# Absolute path of the directory this script is in, thus /opt/corda/node/
DIR=$(dirname "$ABS_PATH")
}
GetPathToCurrentlyExecutingScript
set -eu
checkStatus () {
status=$1
if [ $status -eq 0 ]
then
echo "."
else
echo -e "${RED}ERROR${NC}"
echo "The previous step failed"
exit 1
fi
return 0
}
$DIR/helm/delete-all.sh
checkStatus $?
|
#!/bin/bash
if [ "$FROM_HADOOP_PROFILE" != "1" ]
then
/bin/bash -x /etc/profile.d/FromHadoopProf.sh
fi
|
<filename>open-sphere-base/analysis/src/main/java/io/opensphere/analysis/export/controller/DataElementProvider.java
package io.opensphere.analysis.export.controller;
import java.awt.Color;
import java.io.Serializable;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.PrimitiveIterator.OfInt;
import java.util.stream.IntStream;
import javax.swing.JTable;
import org.apache.commons.lang3.StringUtils;
import io.opensphere.analysis.export.model.DelegateDataElement;
import io.opensphere.analysis.export.model.DelegateMapDataElement;
import io.opensphere.analysis.export.model.ExportOptionsModel;
import io.opensphere.analysis.export.model.ExtraColumnsMetaDataProvider;
import io.opensphere.analysis.export.model.LatLonFormat;
import io.opensphere.analysis.table.model.MetaColumn;
import io.opensphere.analysis.table.model.MetaColumnsTableModel;
import io.opensphere.core.model.time.TimeSpan;
import io.opensphere.core.preferences.ListToolPreferences;
import io.opensphere.core.util.DateTimeFormats;
import io.opensphere.core.util.collections.New;
import io.opensphere.core.util.swing.table.JTableUtilities;
import io.opensphere.mantle.data.cache.DataElementCache;
import io.opensphere.mantle.data.element.DataElement;
import io.opensphere.mantle.data.element.MapDataElement;
import io.opensphere.mantle.data.element.MetaDataProvider;
import io.opensphere.mantle.data.element.impl.SimpleMetaDataProvider;
import io.opensphere.mantle.data.geom.MapGeometrySupport;
import io.opensphere.mantle.data.impl.specialkey.LatitudeKey;
import io.opensphere.mantle.data.impl.specialkey.LongitudeKey;
import io.opensphere.mantle.util.TimeSpanUtility;
/**
* This class will modify/filter {@link DataElement} based on user inputs
* contained within {@link ExportOptionsModel}.
*/
public class DataElementProvider
{
/**
* The column name for the separated date column.
*/
private static final String ourDateColumnName = "DATE";
/**
* The value to append to any special lat lon columns.
*/
private static final String ourLatLonAppend = " (DMS)";
/**
* The column name for the separated time column.
*/
private static final String ourTimeColumnName = "TIME OF DAY";
/**
* The column name for the WKT values.
*/
private static final String ourWKTColumnName = "WKT Geometry";
/**
* Used to format the color value to a string.
*/
private final ColorFormatter myColorFormatter;
/**
* Contains the user's inputs selected for export.
*/
private final ExportOptionsModel myExportModel;
/**
* Formats the latitude and longitude to the user specified format.
*/
private final LatLonFormatter myLatLonFormatter;
/**
* Gets the wkt values if the user chooses to add those to the export.
*/
private final WKTValueProvider myWktValueProvider;
/**
* Gets the data elements to export from the table.
*
* @param tableModel The table model to export.
* @param table The table to export.
* @param onlySelected whether to include only selected rows
* @return the data elements
*/
public static List<DataElement> getDataElementsToExport(MetaColumnsTableModel tableModel, JTable table, boolean onlySelected)
{
return getDataElementsToExport(tableModel, table, onlySelected, false, null);
}
/**
* Create the appropriate {@link DelegateDataElement} or
* {@link DelegateMapDataElement}.
*
* @param element The element to wrap.
* @param provider The {@link MetaDataProvider} to use.
* @return The new delegating data element.
*/
private static DataElement createDelegateElement(DataElement element, MetaDataProvider provider)
{
DataElement copied = null;
if (element instanceof MapDataElement)
{
copied = new DelegateMapDataElement((MapDataElement)element, provider);
}
else
{
copied = new DelegateDataElement(element, provider);
}
return copied;
}
/**
* Gets the data element to export for the given row.
*
* @param row the view row index
* @param tableModel The table model to export.
* @param table The table to export.
* @param columnNames the column names being displayed
* @param includeMetaColumns whether to include meta columns
* @param colorFormatter the color formatter
* @return the data element
*/
private static DataElement getDataElement(int row, MetaColumnsTableModel tableModel, JTable table, List<String> columnNames,
boolean includeMetaColumns, ColorFormatter colorFormatter)
{
int modelRow = table.convertRowIndexToModel(row);
DataElement element = tableModel.getDataAt(modelRow);
// Get the normal columns
Map<String, Serializable> metaData = new LinkedHashMap<>();
for (String columnName : columnNames)
{
Object value = element.getMetaData().getValue(columnName);
if (value instanceof Serializable)
{
metaData.put(columnName, (Serializable)value);
}
}
MetaDataProvider provider = new SimpleMetaDataProvider(metaData);
// Get the meta data columns
if (includeMetaColumns && !tableModel.getMetaColumns().isEmpty())
{
Map<String, Object> extraColumns = New.map();
for (MetaColumn<?> metaColumn : tableModel.getMetaColumns())
{
String columnName = metaColumn.getColumnIdentifier();
int columnIndex = tableModel.findColumn(columnName);
Object value = tableModel.getValueAt(modelRow, columnIndex);
if (value instanceof Color)
{
value = colorFormatter.format(value);
}
extraColumns.put(columnName, value);
}
provider = new ExtraColumnsMetaDataProvider(provider, extraColumns);
}
// Return a copy of the data element with new meta data
return createDelegateElement(element, provider);
}
/**
* Gets the data elements to export from the table.
*
* @param tableModel The table model to export.
* @param table The table to export.
* @param onlySelected whether to include only selected rows
* @param includeMetaColumns whether to include meta columns
* @param colorFormatter the color formatter
* @return the data elements
*/
private static List<DataElement> getDataElementsToExport(MetaColumnsTableModel tableModel, JTable table, boolean onlySelected,
boolean includeMetaColumns, ColorFormatter colorFormatter)
{
int size;
IntStream rowStream;
if (onlySelected)
{
int[] selRows = table.getSelectedRows();
size = selRows.length;
rowStream = Arrays.stream(selRows);
}
else
{
size = table.getRowCount();
rowStream = IntStream.range(0, size);
}
List<DataElement> elements = New.list(size);
List<String> columnNames = JTableUtilities.getColumnNames(table);
for (OfInt iter = rowStream.iterator(); iter.hasNext();)
{
int row = iter.nextInt();
DataElement element = getDataElement(row, tableModel, table, columnNames, includeMetaColumns, colorFormatter);
elements.add(element);
}
return elements;
}
/**
* Constructs a new element modifier.
*
* @param exportModel Contains the user's inputs selected for export.
* @param elementCache Used to get the elements {@link MapGeometrySupport}.
*/
public DataElementProvider(ExportOptionsModel exportModel, DataElementCache elementCache)
{
myExportModel = exportModel;
myWktValueProvider = new WKTValueProvider(myExportModel);
myLatLonFormatter = new LatLonFormatter(myExportModel);
myColorFormatter = new ColorFormatter(myExportModel);
}
/**
* Modifies and/or filters out the elements passed in based on the user
* inputs contained in the {@link ExportOptionsModel}.
*
* @param tableModel The table model to export.
* @param table The table to export.
* @param timePrecision The precision to format the time to.
* @return The elements.
*/
public List<DataElement> provideElements(MetaColumnsTableModel tableModel, JTable table, int timePrecision)
{
List<DataElement> modified = New.list();
List<DataElement> elements = getDataElementsToExport(tableModel, table, false, myExportModel.isIncludeMetaColumns(),
myColorFormatter);
for (DataElement element : elements)
{
if (element.getVisualizationState().isSelected() || !myExportModel.isSelectedRowsOnly())
{
Map<String, Object> extraValues = New.map();
addWkt(element, extraValues);
separateDateAndTime(element, extraValues, timePrecision);
formatLatLon(element, extraValues);
if (!extraValues.isEmpty())
{
element = createDelegateElement(element, new ExtraColumnsMetaDataProvider(element.getMetaData(), extraValues));
}
modified.add(element);
}
}
return modified;
}
/**
* Adds a wkt value to the data to export if the user chooses to do so.
*
* @param element The element to add wkt value for.
* @param extraValues The map to add the wkt value to.
*/
private void addWkt(DataElement element, Map<String, Object> extraValues)
{
String wktValue = myWktValueProvider.getWKTValue(element);
if (StringUtils.isNotEmpty(wktValue))
{
extraValues.put(ourWKTColumnName, wktValue);
}
}
/**
* Formats latitude and longitude values to the user specified format.
*
* @param element The element to format its latitude and longitude values
* for.
* @param extraValues The map to add the latitude longitude values to.
*/
private void formatLatLon(DataElement element, Map<String, Object> extraValues)
{
if (myExportModel.getSelectedLatLonFormat() != LatLonFormat.DECIMAL)
{
String latKey = element.getDataTypeInfo().getMetaDataInfo().getLatitudeKey();
String lonKey = element.getDataTypeInfo().getMetaDataInfo().getLongitudeKey();
Object latitude = element.getMetaData().getValue(latKey);
Object longitude = element.getMetaData().getValue(lonKey);
latitude = myLatLonFormatter.format(latitude, LatitudeKey.DEFAULT);
longitude = myLatLonFormatter.format(longitude, LongitudeKey.DEFAULT);
if (latitude instanceof Serializable)
{
extraValues.put(latKey + ourLatLonAppend, latitude);
}
if (longitude instanceof Serializable)
{
extraValues.put(lonKey + ourLatLonAppend, longitude);
}
}
}
/**
* Separates the Date/Time into separate date and time columns if the user
* chooses to do so.
*
* @param element The element to separate the date and time.
* @param extraValues The map to add the separated date and time values to.
* @param timePrecision The precision to format the time to.
*/
private void separateDateAndTime(DataElement element, Map<String, Object> extraValues, int timePrecision)
{
if (myExportModel.isSeparateDateTimeColumns())
{
TimeSpan val = element.getTimeSpan();
String date = TimeSpanUtility.formatTimeSpanSingleTimeOnly(new SimpleDateFormat(DateTimeFormats.DATE_FORMAT), val);
// Add the time portion (added to the list below)
SimpleDateFormat timeFormatter = ListToolPreferences.getSimpleTimeFormatForPrecision(timePrecision);
String time = TimeSpanUtility.formatTimeSpanSingleTimeOnly(timeFormatter, val);
extraValues.put(ourDateColumnName, date);
extraValues.put(ourTimeColumnName, time);
}
}
}
|
import { ConfigurationUtil } from '@kumuluz/kumuluzee-config';
import DiscoveryUtil from 'common/DiscoveryUtil';
class KumuluzeeDiscovery {
async initialize({ extension, configPath }) {
ConfigurationUtil.initDefaultConfigurationSources({ configPath });
await DiscoveryUtil.initialize(extension);
}
async registerService(properties) {
const serviceName = await ConfigurationUtil.get('kumuluzee.name') || (properties && properties.value) || null;
if (!serviceName) {
console.error('Service name not provided!');
return;
}
const ttl = await ConfigurationUtil.get('kumuluzee.discovery.ttl') || (properties && properties.ttl) || 30;
const pingInterval = await ConfigurationUtil.get('kumuluzee.discovery.ping-interval') || (properties && properties.pingInterval) || 20;
const environment = await ConfigurationUtil.get('kumuluzee.env.name') || (properties && properties.environment) || 'dev';
const version = await ConfigurationUtil.get('kumuluzee.version') || (properties && properties.version) || '1.0.0';
const singleton = (properties && properties.singleton) || false;
console.info(`Registering service: ${serviceName}`);
DiscoveryUtil.register(serviceName, version, environment, ttl, pingInterval, singleton);
}
async deregisterService() {
await DiscoveryUtil.deregister();
}
discoverService({ value, version = '*', environment = 'dev', accessType = 'GATEWAY' }) {
return DiscoveryUtil.getServiceInstance(value, version, environment, accessType);
}
async disableServiceInstance({ value, version, environment, url }) {
await DiscoveryUtil.disableServiceInstance(value, version, environment, url);
}
}
export { DiscoveryUtil };
export default new KumuluzeeDiscovery();
|
#!/bin/sh
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file will be fetched as: curl -L https://git.io/getLatestIstio | sh -
# so it should be pure bourne shell, not bash (and not reference other scripts)
#
# The script fetches the latest Istio release candidate and untars it.
# You can pass variables on the command line to download a specific version
# or to override the processor architecture. For example, to download
# Istio 1.6.8 for the x86_64 architecture,
# run curl -L https://istio.io/downloadIstio | ISTIO_VERSION=1.6.8 TARGET_ARCH=x86_64 sh -.
set -e
# Determines the operating system.
OS="$(uname)"
if [ "x${OS}" = "xDarwin" ] ; then
OSEXT="osx"
else
OSEXT="linux"
fi
# Determine the latest Istio version by version number ignoring alpha, beta, and rc versions.
if [ "x${ISTIO_VERSION}" = "x" ] ; then
ISTIO_VERSION="$(curl -sL https://github.com/istio/istio/releases | \
grep -o 'releases/[0-9]*.[0-9]*.[0-9]*/' | sort --version-sort | \
tail -1 | awk -F'/' '{ print $2}')"
ISTIO_VERSION="${ISTIO_VERSION##*/}"
fi
LOCAL_ARCH=$(uname -m)
if [ "${TARGET_ARCH}" ]; then
LOCAL_ARCH=${TARGET_ARCH}
fi
case "${LOCAL_ARCH}" in
x86_64)
ISTIO_ARCH=amd64
;;
armv8*)
ISTIO_ARCH=arm64
;;
aarch64*)
ISTIO_ARCH=arm64
;;
armv*)
ISTIO_ARCH=armv7
;;
amd64|arm64)
ISTIO_ARCH=${LOCAL_ARCH}
;;
*)
echo "This system's architecture, ${LOCAL_ARCH}, isn't supported"
exit 1
;;
esac
if [ "x${ISTIO_VERSION}" = "x" ] ; then
printf "Unable to get latest Istio version. Set ISTIO_VERSION env var and re-run. For example: export ISTIO_VERSION=1.0.4"
exit 1;
fi
NAME="istio-$ISTIO_VERSION"
URL="https://github.com/istio/istio/releases/download/${ISTIO_VERSION}/istio-${ISTIO_VERSION}-${OSEXT}.tar.gz"
ARCH_URL="https://github.com/istio/istio/releases/download/${ISTIO_VERSION}/istio-${ISTIO_VERSION}-${OSEXT}-${ISTIO_ARCH}.tar.gz"
with_arch() {
printf "\nDownloading %s from %s ...\n" "$NAME" "$ARCH_URL"
if ! curl -o /dev/null -sIf "$ARCH_URL"; then
printf "\n%s is not found, please specify a valid ISTIO_VERSION and TARGET_ARCH\n" "$ARCH_URL"
exit 1
fi
curl -fsLO "$ARCH_URL"
filename="istio-${ISTIO_VERSION}-${OSEXT}-${ISTIO_ARCH}.tar.gz"
tar -xzf "${filename}"
rm "${filename}"
}
without_arch() {
printf "\nDownloading %s from %s ..." "$NAME" "$URL"
if ! curl -o /dev/null -sIf "$URL"; then
printf "\n%s is not found, please specify a valid ISTIO_VERSION\n" "$URL"
exit 1
fi
curl -fsLO "$URL"
filename="istio-${ISTIO_VERSION}-${OSEXT}.tar.gz"
tar -xzf "${filename}"
rm "${filename}"
}
# Istio 1.6 and above support arch
ARCH_SUPPORTED=$(echo "$ISTIO_VERSION" | awk -F'.' '{print $1"."$2}' )
# Istio 1.5 and below do not have arch support
ARCH_UNSUPPORTED="1.5"
if [ "${OS}" = "Linux" ] ; then
# This checks if 1.6 <= 1.5 or 1.4 <= 1.5
if [ "$(expr "${ARCH_SUPPORTED}" \<= "${ARCH_UNSUPPORTED}")" -eq 1 ]; then
without_arch
else
with_arch
fi
elif [ "x${OS}" = "xDarwin" ] ; then
without_arch
else
printf "\n\n"
printf "Unable to download Istio %s at this moment!\n" "$ISTIO_VERSION"
printf "Please verify the version you are trying to download.\n\n"
exit 1
fi
printf ""
printf "\nIstio %s Download Complete!\n" "$ISTIO_VERSION"
printf "\n"
printf "Istio has been successfully downloaded into the %s folder on your system.\n" "$NAME"
printf "\n"
BINDIR="$(cd "$NAME/bin" && pwd)"
printf "Next Steps:\n"
printf "See https://istio.io/latest/docs/setup/install/ to add Istio to your Kubernetes cluster.\n"
printf "\n"
printf "To configure the istioctl client tool for your workstation,\n"
printf "add the %s directory to your environment path variable with:\n" "$BINDIR"
printf "\t export PATH=\"\$PATH:%s\"\n" "$BINDIR"
printf "\n"
printf "Begin the Istio pre-installation check by running:\n"
printf "\t istioctl x precheck \n"
printf "\n"
printf "Need more information? Visit https://istio.io/latest/docs/setup/install/ \n"
|
# from db_utils.utils import *
from utils.utils import *
import pdb
from nltk.tokenize import word_tokenize
import pygtrie
import klepto
import random
ILIKE_PRED_FMT = "'%{ILIKE_PRED}%'"
class QueryGenerator():
'''
Generates sql queries based on a template.
TODO: explain rules etc.
'''
def __init__(self, query_template, user, db_host, port,
pwd, db_name):
self.user = user
self.pwd = <PASSWORD>
self.db_host = db_host
self.port = port
self.db_name = db_name
# self.query_template = query_template
self.base_sql = query_template["base_sql"]["sql"]
self.templates = query_template["templates"]
self.sampling_outputs = {}
self.ilike_output_size = {}
self.bad_sqls = []
self.trie_archive = klepto.archives.dir_archive("./qgen_tries/",
cached=True, serialized=True)
self.max_in_vals = 15
def _update_preds_range(self, sql, column, key, pred_val):
'''
'''
print("key: ", key)
print("pred_val: ", pred_val)
pdb.set_trace()
sql = sql.replace(key, pred_val)
return sql
def _generate_sql(self, pred_vals):
'''
'''
sql = self.base_sql
# for each group, select appropriate predicates
for key, val in pred_vals.items():
if key not in sql:
print("key not in sql!")
print(key)
pdb.set_trace()
sql = sql.replace(key, val)
return sql
def _update_sql_ilike(self, ilike_filter, pred_group, pred_vals):
columns = pred_group["columns"]
assert len(columns) == 1
key = pred_group["keys"][0]
pred_type = pred_group["pred_type"]
pred_str = columns[0] + " " + pred_type + " " + ilike_filter
pred_vals[key] = pred_str
def _update_sql_in(self, samples, pred_group, pred_vals):
'''
@samples: ndarray, ith index correspond to possible values for ith
index of pred_group["keys"], and last index is the count of the
combined values.
@pred_vals: all the predicates in the base sql string that have already
been assigned a value. This will be updated after we assign values to
the unspecified columns in the present pred_group.
@pred_group: [[template.predicate]] section of the toml that this
predicate corresponds to.
@ret: updated sql string
'''
keys = pred_group["keys"]
columns = pred_group["columns"]
pred_type = pred_group["pred_type"]
assert len(keys) == len(columns)
for i, key in enumerate(keys):
# key will be replaced with a predicate string
pred_str = ""
none_cond = None
column = columns[i]
vals = []
# can have multiple values for IN statements, including None / NULL
for s in samples:
val = s[i]
if val:
val = str(val)
vals.append("'{}'".format(val.replace("'","")))
else:
# None value
none_cond = column + " IS NULL"
vals = [s for s in set(vals)]
if len(vals) == 0:
assert none_cond
pred_str = none_cond
else:
vals.sort()
new_pred_str = ",".join(vals)
pred_str = column + " " + pred_type + " "
pred_str += "(" + new_pred_str + ")"
if none_cond:
pred_str += " OR " + none_cond
pred_vals[key] = pred_str
def _gen_query_str(self, templated_preds):
'''
@templated_preds
Modifies the base sql to plug in newer values at all the unspecified
values.
Handling of NULLs:
'''
# dictionary that is used to keep track of the column values that have
# already been selected so far.
pred_vals = {}
# for each group, select appropriate predicates
for pred_group in templated_preds:
if "multi" in pred_group:
# multiple predicate conditions, choose any one
pred_group = random.choice(pred_group["multi"])
if "sql" in pred_group["type"]:
# cur_sql will be the sql used to sample for this predicate
# value
if pred_group["type"] == "sqls":
cur_sql = random.choice(pred_group["sqls"])
else:
cur_sql = pred_group["sql"]
if pred_group["dependencies"]:
# need to replace placeholders in cur_sql
for key, val in pred_vals.items():
cur_sql = cur_sql.replace(key, val)
# get possible values to use
cur_key = deterministic_hash(cur_sql)
if cur_key in self.sampling_outputs:
output = self.sampling_outputs[cur_key]
else:
if cur_sql in self.bad_sqls:
return None
output = cached_execute_query(cur_sql, self.user,
self.db_host, self.port, self.pwd, self.db_name,
100, "./.lc_cache/sql_outputs/", None)
if pred_group["pred_type"].lower() == "ilike":
cur_sql_key = deterministic_hash(cur_sql)
if cur_sql_key in self.trie_archive.archive:
print(cur_sql)
print("found in archive")
trie = self.trie_archive.archive[cur_sql_key]
else:
print("going to tokenize: ", cur_sql)
tokens = []
for out in output:
if out[0] is None:
continue
cur_tokens = word_tokenize(out[0].lower())
if len(cur_tokens) > 150:
print("too many tokens in column: ",
pred_group["columns"][0], pred_vals)
self.bad_sqls.append(cur_sql)
return None
tokens += cur_tokens
print("going to make a trie..")
trie = pygtrie.CharTrie()
for token in tokens:
if token in trie:
trie[token] += 1
else:
trie[token] = 1
self.trie_archive.archive[cur_sql_key] = trie
self.sampling_outputs[cur_key] = trie
output_keys = []
weights = []
for k,v in trie.items():
output_keys.append(k)
weights.append(v)
self.ilike_output_size[cur_key] = (output_keys, weights)
output = trie
else:
self.sampling_outputs[cur_key] = output
if len(output) == 0:
# no point in doing shit
return None
if pred_group["pred_type"].lower() == "in":
# now use one of the different sampling methods
num_samples = random.randint(pred_group["min_samples"],
pred_group["max_samples"])
if pred_group["sampling_method"] == "quantile":
num_quantiles = pred_group["num_quantiles"]
curp = random.randint(0, num_quantiles-1)
chunk_len = int(len(output) / num_quantiles)
tmp_output = output[curp*chunk_len: (curp+1)*chunk_len]
if len(tmp_output) == 0:
# really shouldn't be happenning right?
return None
if len(tmp_output) <= num_samples:
samples = [random.choice(tmp_output) for _ in
range(num_samples)]
else:
samples = random.sample(tmp_output, num_samples)
self._update_sql_in(samples,
pred_group, pred_vals)
else:
samples = [random.choice(output) for _ in
range(num_samples)]
self._update_sql_in(samples,
pred_group, pred_vals)
elif pred_group["pred_type"].lower() == "ilike":
assert isinstance(output, pygtrie.CharTrie)
# Note: the trie will only provide a lower-bound on the
# number of matches, since ILIKE predicates would also
# consider substrings. But this seems to be enough for our
# purposes, as we will avoid queries that zero out
output_keys, weights = self.ilike_output_size[cur_key]
if len(output_keys) <= 1:
return None
# choose min_target, max_target for regex matches.
if "thresholds" in pred_group:
threshs = pred_group["thresholds"]
idx = random.randint(0, len(threshs)-1)
min_target = threshs[idx]
if idx+1 == len(threshs):
max_target = 100000000000
else:
max_target = threshs[idx+1]
else:
if random.random() > 0.5:
cur_partition = random.randint(0, pred_group["num_quantiles"]-1)
min_percentile = 100.0 / pred_group["num_quantiles"] * cur_partition
max_percentile = 100.0 / pred_group["num_quantiles"] * (cur_partition+1)
min_target = max(pred_group["min_count"],
np.percentile(weights, min_percentile))
max_target = np.percentile(weights, max_percentile)
else:
num_rows = sum(weights)
partition_size = num_rows / pred_group["num_quantiles"]
cur_partition = random.randint(0, pred_group["num_quantiles"]-1)
min_target = max(pred_group["min_count"],
cur_partition*partition_size)
max_target = (cur_partition+1)*partition_size
cur_partition = random.randint(0, pred_group["num_quantiles"]-1)
min_percentile = 100.0 / pred_group["num_quantiles"] * cur_partition
max_percentile = 100.0 / pred_group["num_quantiles"] * (cur_partition+1)
min_target = max(pred_group["min_count"],
np.percentile(weights, min_percentile))
max_target = np.percentile(weights, max_percentile)
if min_target > max_target:
print("min target {} > max target {}".format(min_target,
max_target))
return None
print("col: {}, min: {}, max: {}".format(pred_group["columns"],
min_target, max_target))
ilike_pred = None
for i in range(1000):
i += 1
if i % 1000 == 0:
print(i)
if i % 2 == 0:
# just choose randomly, more likely to find
# relevant one
key = random.choice(output_keys)
else:
key = random.choices(population=output_keys,
weights=weights, k=1)[0]
if len(key) < pred_group["min_chars"]:
continue
max_filter_len = min(len(key), pred_group["max_chars"])
num_chars = random.randint(pred_group["min_chars"],
max_filter_len)
ilike_pred = key[0:num_chars]
est_size = sum(output[ilike_pred:])
if est_size > min_target and est_size < max_target:
break
else:
ilike_pred = None
if ilike_pred is None:
# print("did not find an appropriate predicate for ",
# pred_group["columns"])
return None
else:
print("col: {}, filter: {}, est size: {}".format(
pred_group["columns"][0], ilike_pred, est_size))
ilike_pred = ilike_pred.replace("'","")
ilike_filter = ILIKE_PRED_FMT.format(ILIKE_PRED = ilike_pred)
self._update_sql_ilike(ilike_filter, pred_group, pred_vals)
else:
assert False
elif pred_group["type"] == "list":
## assuming it is a single column
columns = pred_group["columns"]
assert len(columns) == 1
if pred_group["sampling_method"] == "uniform":
if pred_group["pred_type"] == "range":
col = columns[0]
assert len(pred_group["keys"]) == 2
options = pred_group["options"]
pred_choice = random.choice(options)
assert len(pred_choice) == 2
lower_key = pred_group["keys"][0]
upper_key = pred_group["keys"][1]
lower_val = pred_choice[0]
upper_val = pred_choice[1]
assert len(pred_choice) == 2
if "numeric_col_type" in pred_group:
col_type = pred_group["numeric_col_type"]
# add a chec for both conditions
float_regex = '^(?:[1-9]\d*|0)?(?:\.\d+)?$'
num_check_cond_tmp = "{col} ~ '{regex}' AND {cond}"
upper_cond = "{val} <= {col}::{col_type}".format(col=col,
val=lower_val,
col_type=col_type)
lower_cond = "{col}::{col_type} <= {val}".format(col=col,
val=upper_val,
col_type=col_type)
lower_cond = num_check_cond_tmp.format(col=col,
cond = lower_cond,
regex = float_regex)
upper_cond = num_check_cond_tmp.format(col=col,
cond = upper_cond, regex =
float_regex)
else:
lower_key = pred_group["keys"][0]
upper_key = pred_group["keys"][1]
lower_val = pred_choice[0]
upper_val = pred_choice[1]
lower_cond = "{} >= {}".format(col, lower_val)
upper_cond = "{} <= {}".format(col, upper_val)
pred_vals[lower_key] = lower_cond
pred_vals[upper_key] = upper_cond
else:
options = pred_group["options"]
pred_choice = random.choice(options)
if "replace" in pred_group:
# assert len(pred_choice) == 1
assert len(pred_group["keys"]) == 1
# cur_choice = pred_choice[0]
cur_key = pred_group["keys"][0]
pred_vals[cur_key] = pred_choice
pdb.set_trace()
else:
# probably only deals with `=` ?
assert len(pred_group["keys"]) == 1
self._update_sql_in([[pred_choice]],
pred_group, pred_vals)
else:
assert False
gen_sql = self._generate_sql(pred_vals)
return gen_sql
def gen_queries(self, num_samples, column_stats=None):
'''
@ret: [sql queries]
'''
print("going to generate ", num_samples)
start = time.time()
all_query_strs = []
while len(all_query_strs) < num_samples:
for template in self.templates:
query_str = self._gen_query_str(template["predicates"])
if query_str is not None:
all_query_strs.append(query_str)
print(query_str)
# pdb.set_trace()
else:
pass
# print("query str was None")
print("{} took {} seconds to generate".format(len(all_query_strs),
time.time()-start))
return all_query_strs
|
import React from "react";
function Input(props: any) {
function handleChange(event: any) {
props.onInputChange(event.target.value);
}
return (
<input
data-testid={props.testid}
placeholder={props.placeholder}
disabled={props.disabled}
value={props.value}
id="amount"
type="number"
onChange={handleChange}
/>
);
}
export default Input;
|
(function() {
"use strict";
/*global sap, window, setTimeout*/
sap.ui.controller("c4c.details-accountcollection.local.view.vhelpdialog", {
/**
* Called when a controller is instantiated and its View controls (if available) are already created.
* Can be used to modify the View before it is displayed, to bind event handlers and do other one-time initialization.
* @memberOf view.vhelp
*/
oDialog: null,
onInit: function(data) {
this.eventBus = data.eventBus;
this.translationAnnotationModel = data.translationAnnotationModel;
this.params = this.convertParams(data.params);
this.staticModel = data.staticModel;
this.oDialog = sap.ui.jsfragment("c4c.details-accountcollection.local.view.vhelpdialog", this);
},
initiateFilter: function initiateFilter(oEvt) {
var value = oEvt.getSource().getValue();
// Replace first and last char: *
value = value.replace(/^\*|\*$/g, "");
this.filterTable(value);
/*Update the item count lable at the top of the table*/
},
filterTable: function(value) {
var tableRowCount = this.oDialog.getModel().aBindings[0].getLength();
this.oDialog.itemCount.$().text(tableRowCount + " Items");
/*If there is value in the search field filter the results*/
var filters = [];
if (value) {
var oFilter = new sap.ui.model.Filter(this.params.searchByField, sap.ui.model.FilterOperator.Contains, value);
filters.push(oFilter);
}
this.oDialog.table.bindItems("/" + this.params.CollectionPath, new sap.m.ColumnListItem({
cells: this.oDialog.cells
}), null, filters);
},
convertParams: function convertParams(params) {
var res = {
BusinessObject: params.BusinessObject,
CollectionPath: params.CollectionPath.indexOf("Collection") >= 0 ? params.CollectionPath : (params.CollectionPath + "Collection"), //TODO remove Collection
searchSupported: params.searchSupported,
fieldName: params.fieldName,
searchValue: params.searchValue,
columns: [],
titleKey: params.titleKey,
prefixKey: params.prefixKey
},
searchField,
columnName,
i;
for (i in params.viewParams) {
if (params.viewParams.hasOwnProperty(i)) {
columnName = this.getTranslatedTitle(params.prefixKey, i);
res.columns.push({
name: columnName,
path: i,
localsIds: params.viewParams[i]
});
searchField = params.viewParams[i] && params.viewParams[i][0];
if (searchField === params.fieldName) {
res.searchField = i;
}
}
}
if (params.filterable.length > 0) {
res.searchByField = params.filterable[0];
}
return res;
},
getTranslatedTitle: function(titlePrefix, key) {
var annotationBundle = this.translationAnnotationModel.getResourceBundle();
var titleKey = titlePrefix + "." + key;
var translatedTitle = annotationBundle.getText(titleKey);
return translatedTitle;
},
closeDialog: function closeDialog() {
this.oDialog.close();
},
openDialog: function() {
var dialog = this.oDialog;
dialog.table.setBusy(true);
dialog.open();
setTimeout(function() {
var url = this.getServiceUrl(window.generalNameSpace.businessObject.oDataService);
var oDataModel = new sap.ui.model.odata.ODataModel(url, false);
oDataModel.attachRequestCompleted(null, function() {
dialog.table.setBusy(false);
});
dialog.setModel(oDataModel);
}.bind(this), 100);
},
getServiceUrl: function(path) {
return window.generalNameSpace.pathToDestination + path;
},
onDialogCloseButton: function onDialogCloseButton() {
this.closeDialog();
this.destroy();
},
onRowSelected: function onRowSelected(oEvent) {
var res = this.getSelectedRowObject(oEvent.getSource());
this.closeDialog();
this.eventBus.publish("dialog.event", "value.changed", res);
},
getSelectedRowObject: function getSelectedRowObject(table) {
var res = {},
data = this.oDialog.getModel("dataModel").getData(),
columns = data.columns,
selectedCells = table.getSelectedItem().getCells(),
column,
i;
for (i = 0; i < selectedCells.length; i++) {
column = columns[i];
if (column.localsIds && column.localsIds.length > 0) {
res[column.name] = {
value: selectedCells[i].getText(),
localsIds: column.localsIds
};
}
}
return res;
}
});
}()); |
rm -rf judged
rm -rf expert
rm -rf independent
rm -rf distilled
|
'use strict';
const { getUnauthorizedError, getInvalidAuthProviderError } = require('./errors');
const fb = require('./fb');
module.exports = function sessionMiddleWare(req, res, next) {
const { authorization } = req.headers;
if (!authorization) {
return next(getUnauthorizedError());
}
const [provider, token] = authorization.split(' ');
if (!provider || !token) {
return next(getUnauthorizedError());
}
let promise;
switch (provider) {
case 'Facebook': promise = fb(req, token);
break;
default: return next(getInvalidAuthProviderError());
}
promise
.then(user => {
/*
* `user` should have at least the following properties:
* - id: a string value.
* */
req.user = user;
next();
})
.catch(err => {
if (err.status === 404) {
// The user has not been found in the system.
return next(getUnauthorizedError());
}
next(err);
});
};
|
package Chapter1_2Low;
import java.util.ArrayList;
//Exercise 1.2.2
public class Interval1D {
private double lo;
private double hi;
public Interval1D(double tlo, double thi) {
if (tlo > thi) {
double tempDouble = tlo;
tlo = thi;
thi = tempDouble;
}
this.hi = thi;
this.lo = tlo;
}
public double length() {
return Math.abs(lo - hi);
}
public boolean contains(double x) {
return (x > lo) && (x < hi);
}
/*
* 判断两个间隔是否相交
* @param that
* @return
* */
public boolean intersect(Interval1D that) {
if (this.hi < that.lo) {
return false;
} else if ((this.hi > that.lo) && (this.lo < that.hi)) { //一个间隔的上界大于另一个间隔的下界,并且该下界小于另一个间隔的上界
return true;
} else {
return false;
}
}
@Override
public String toString() {
return "( " + lo + ", " + hi + " )";
}
public static void main(String[] args) {
ArrayList<Interval1D> interval1ds = new ArrayList<>();
Interval1D interval1d1 = new Interval1D(3, 5);
Interval1D interval1d2 = new Interval1D(4, 5);
Interval1D interval1d3 = new Interval1D(1, 5);
Interval1D interval1d4 = new Interval1D(7, 9);
Interval1D interval1d5 = new Interval1D(1, 2);
interval1ds.add(interval1d1);
interval1ds.add(interval1d2);
interval1ds.add(interval1d3);
interval1ds.add(interval1d4);
interval1ds.add(interval1d5);
for (int i = 0; i < interval1ds.size(); i++) {
for (int j = i + 1; j < interval1ds.size(); j++) {
Interval1D tempInterval1d1 = interval1ds.get(i);
Interval1D tempInterval1d2 = interval1ds.get(j);
if (tempInterval1d1.intersect(tempInterval1d2)) {
System.out.println("intersect: " + tempInterval1d1 + " " + tempInterval1d2);
}
}
}
}
}
|
<gh_stars>0
module.exports = {
purge: ["./src/**/*.js", "./src/**/*.jsx", "./src/**/*.ts", "./src/**/*.tsx"],
darkMode: "class", // or 'media' or 'class'
theme: {
flex: {
'33': '1 0 33.333%',
'66': '1 0 66.666%',
'100': '1 0 100%'
},
// screens: {
// 'sm': {'min': '320px', 'max': '767px'},
// 'md': {'min': '768px', 'max': '1023px'},
// 'lg': {'min': '1024px', 'max': '1279px'},
// 'xl': {'min': '1280px', 'max': '1535px'},
// '2xl': {'min': '1536px'},
// },
extend: {
colors: {
"logo-pink-dot": "#FF4C60",
},
fontFamily: {
heading: ["Source Sans Pro", "sans-serif"],
body: [
"Open Sans",
"-apple-system",
"BlinkMacSystemFont",
"Segoe UI",
"Roboto",
"Helvetica Neue",
"Arial",
"sans-serif",
],
},
typography: theme => ({
DEFAULT: {
css: {
h1: {
color: theme("colors.gray.700"),
fontFamily: theme("fontFamily.heading").join(", "),
},
h2: {
color: theme("colors.gray.700"),
fontFamily: theme("fontFamily.heading").join(", "),
},
},
},
dark: {
css: {
color: theme("colors.white"),
"h1, h2, h3, h4, h5, h6": {
color: theme("colors.white"),
fontFamily: theme("fontFamily.heading").join(", "),
},
a: {
color: theme("colors['logo-pink-dot']"),
},
"strong, blockquote": {
color: theme("colors.white"),
},
},
},
}),
},
},
variants: {
extend: {
typography: ["dark"],
flexGrow: ["last"]
},
},
plugins: [require("@tailwindcss/typography")],
}
|
#!/usr/bin/env bash
# global variables used by Functions:run
declare -gx bash_framework_status
declare -gix bash_framework_duration
declare -gx bash_framework_output
# Public: check if command specified exists or exits
# with error and message if not
#
# **Arguments**:
# * $1 commandName on which existence must be checked
# * $2 helpIfNotExists a help command to display if the command does not exist
#
# **Exit**: code 1 if the command specified does not exist
Functions::checkCommandExists() {
local commandName="$1"
local helpIfNotExists="$2"
command -v "${commandName}" >/dev/null 2>&1 || {
Log::displayError "${commandName} is not installed, please install it"
if [[ -n "${helpIfNotExists}" ]]; then
Log::displayInfo "${helpIfNotExists}"
fi
exit 1
}
}
# Public: determine if the script is executed under windows
# <pre>
# uname GitBash windows (with wsl) => MINGW64_NT-10.0 ZOXFL-6619QN2 2.10.0(0.325/5/3) 2018-06-13 23:34 x86_64 Msys
# uname GitBash windows (wo wsl) => MINGW64_NT-10.0 frsa02-j5cbkc2 2.9.0(0.318/5/3) 2018-01-12 23:37 x86_64 Msys
# uname wsl => Linux ZOXFL-6619QN2 4.4.0-17134-Microsoft #112-Microsoft Thu Jun 07 22:57:00 PST 2018 x86_64 x86_64 x86_64 GNU/Linux
# </pre>
#
# **Echo**: "1" if windows, else "0"
Functions::isWindows() {
if [[ "$(uname -o)" = "Msys" ]]; then
echo "1"
else
echo "0"
fi
}
# Public: check if hostname exists by pinging it
# with error and message if not
#
# **Arguments**:
# * $1 is the dns hostname
#
# **Return**:
## * 0 if OK
## * 1 => fail to call ping
## * 2 => fail to call ipconfig/ifconfig
## * 3 => host doesn't resolve to local ip address
## * other ping error codes possible
Functions::checkDnsHostname() {
local host="$1"
if [[ -z "${host}" ]]; then
return 1
fi
# check if host is reachable
local returnCode=0
if [[ "$(Functions::isWindows)" = "1" ]]; then
COMMAND_OUTPUT=$(ping -4 -n 1 "${host}" 2>&1)
returnCode=$?
else
COMMAND_OUTPUT=$(ping -c 1 "${host}" 2>&1)
returnCode=$?
fi
if [[ "${returnCode}" = "0" ]]; then
# get ip from ping outputcallCommandSafely
# under windows: Pinging willywonka.fchastanet.lan [127.0.0.1] with 32 bytes of data
# under linux: PING willywonka.fchastanet.lan (127.0.1.1) 56(84) bytes of data.
local ip
ip=$(echo "${COMMAND_OUTPUT}" | grep -i ping | grep -Eo '[0-9.]{4,}' | head -1)
# now we have to check if ip is bound to local ip address
if [[ ${ip} != 127.0.* ]]; then
# resolve to a non local address
# check if ip resolve to our ips
Log::displayInfo "check if ip(${ip}) associated to host(${host}) is listed in your network configuration"
if [[ "$(Functions::isWindows)" = "1" ]]; then
COMMAND_OUTPUT=$(ipconfig 2>&1 | grep "${ip}" | cat )
returnCode=$?
else
COMMAND_OUTPUT=$(ifconfig 2>&1 | grep "${ip}" | cat )
returnCode=$?
fi
if [[ "${returnCode}" != "0" ]]; then
returnCode=2
elif [[ -z "${COMMAND_OUTPUT}" ]]; then
returnCode=3
fi
fi
fi
return ${returnCode}
}
# Public: quote a string
# replace ' with \'
#
# **Arguments**:
# * $1 the string to quote
#
# **Output**: the string quoted
Functions::quote() {
local quoted=${1//\'/\'\\\'\'};
printf "'%s'" "$quoted"
}
# Public: list files of dir with given extension and display it as a list one by line
#
# **Arguments**:
# * $1 the directory to list
# * $2 the extension (default: sh)
# * $3 the indentation (' - ' by default) can be any string compatible with sed not containing any /
# **Output**: list of files without extension/directory
# eg:
# - default.local
# - default.remote
# - localhost-root
Functions::getList() {
local DIR="$1"
local EXT="${2:-sh}"
local INDENT_STR="${3:- - }"
local extension="${EXT}"
if [[ -n "${EXT}" && "${EXT:0:1}" != "." ]]; then
extension=".${EXT}"
fi
(
cd "${DIR}" && find . -type f -name "*${extension}" | sed 's#^./##g' | sed "s/\.${EXT}\$//g" | sort | sed "s/^/${INDENT_STR}/"
)
}
# Public: get absolute file from name deduced using these rules
# * using absolute/relative <conf> file (ignores <confFolder> and <extension>
# * from home/.bash-tools/<confFolder>/<conf><extension> file
# * from framework conf/<conf><extension> file
#
# **Arguments**:
# * $1 confFolder to use below bash-tools conf folder
# * $2 conf file to use without extension
# * $3 file extension to use (default: sh)
#
# Returns 1 if file not found or error during file loading
Functions::loadConf() {
local confFolder="$1"
local conf="$2"
local extension="${3:-sh}"
local confFile=""
if [[ -n "${extension}" && "${extension:0:1}" != "." ]]; then
extension=".${extension}"
fi
# if conf is absolute
if [[ "${conf}" == /* ]]; then
# file contains /, consider it as absolute filename
confFile="${conf}"
else
# shellcheck source=/conf/dsn/default.local.env
confFile="${HOME}/.bash-tools/${confFolder}/${conf}${extension}"
if [ ! -f "${confFile}" ]; then
confFile="${__BASH_FRAMEWORK_VENDOR_PATH:?}/conf/${confFolder}/${conf}${extension}"
fi
fi
if [ ! -f "${confFile}" ]; then
return 1
fi
# shellcheck disable=SC1090
source "${confFile}"
}
# Public: list the conf files list available in bash-tools/conf/<conf> folder
# and those overriden in $HOME/.bash-tools/<conf> folder
# **Arguments**:
# * $1 confFolder the directory name (not the path) to list
# * $2 the extension (sh by default)
# * $3 the indentation (' - ' by default) can be any string compatible with sed not containing any /
#
# **Output**: list of files without extension/directory
# eg:
# - default.local
# - default.remote
# - localhost-root
Functions::getConfMergedList() {
local confFolder="$1"
local extension="${2:-sh}"
local indentStr="${3:- - }"
DEFAULT_CONF_DIR="${__BASH_FRAMEWORK_VENDOR_PATH:?}/conf/${confFolder}"
HOME_CONF_DIR="${HOME}/.bash-tools/${confFolder}"
(
Functions::getList "${DEFAULT_CONF_DIR}" "${extension}" "${indentStr}"
Functions::getList "${HOME_CONF_DIR}" "${extension}" "${indentStr}"
) | sort | uniq
}
# Public: get absolute conf file from specified conf folder deduced using these rules
# * from absolute file (ignores <confFolder> and <extension>)
# * relative to where script is executed (ignores <confFolder> and <extension>)
# * from home/.bash-tools/<confFolder>
# * from framework conf/<confFolder>
#
# **Arguments**:
# * $1 confFolder the directory name (not the path) to list
# * $2 conf file to use without extension
# * $3 the extension (sh by default)
#
# Returns absolute conf filename
Functions::getAbsoluteConfFile() {
local confFolder="$1"
local conf="$2"
local extension="${3-.sh}"
getAbs() {
local absoluteConfFile=""
# load conf from absolute file, then home folder, then bash framework conf folder
absoluteConfFile="${conf}"
if [[ "${absoluteConfFile:0:1}" = "/" && -f "${absoluteConfFile}" ]]; then
# file contains /, consider it as absolute filename
echo "${absoluteConfFile}"
return 0
fi
# relative to where script is executed
absoluteConfFile="$(realpath "${__BASH_FRAMEWORK_CALLING_SCRIPT}/${conf}" 2>/dev/null || echo "")"
if [ -f "${absoluteConfFile}" ]; then
echo "${absoluteConfFile}"
return 0
fi
# take extension into account
if [[ -n "${extension}" && "${extension:0:1}" != "." ]]; then
extension=".${extension}"
fi
# shellcheck source=/conf/dsn/default.local.env
absoluteConfFile="${HOME}/.bash-tools/${confFolder}/${conf}${extension}"
if [ -f "${absoluteConfFile}" ]; then
echo "${absoluteConfFile}"
return 0
fi
absoluteConfFile="${__BASH_FRAMEWORK_VENDOR_PATH:?}/conf/${confFolder}/${conf}${extension}"
if [ -f "${absoluteConfFile}" ]; then
echo "${absoluteConfFile}"
return 0
fi
return 1
}
local abs=""
abs="$(getAbs)" || {
# file not found
Log::displayError "conf file '${conf}' not found"
return 1
}
Log::displayDebug "conf file '${conf}' matching '${abs}' file"
echo "${abs}"
return 0
}
# appends a command to a trap
#
# - 1st arg: code to add
# - remaining args: names of traps to modify
#
Functions::trapAdd() {
local trapAddCmd="$1"
shift || Log::fatal "${FUNCNAME[0]} usage error"
# helper fn to get existing trap command from output
# of trap -p
extract_trap_cmd() { printf '%s\n' "$3"; }
for trapAddName in "$@"; do
trap -- "$(
# print existing trap command with newline
eval "extract_trap_cmd $(trap -p "${trapAddName}")"
# print the new trap command
printf '%s\n' "${trapAddCmd}"
)" "${trapAddName}" \
|| Log::fatal "unable to add to trap ${trapAddName}"
done
}
# *Public*: run command and store data in following global variables :
# * bash_framework_status the exit status of the command
# * bash_framework_duration the duration of the command
# * bash_framework_output the output of the command
# redirecting error output to stdout is not supported, you can instead redirect stderr to a file if needed
# **Arguments**:
# * $@ command with arguments to execute
Functions::run() {
# 'bash_framework_status', 'bash_framework_duration' are global variables
local -i start end
start=$(date +%s)
bash_framework_status=0
bash_framework_output=""
local origFlags="$-"
set +eET
local origIFS="$IFS"
# execute command
bash_framework_output="$("$@")"
bash_framework_status="$?"
IFS="$origIFS"
set "-$origFlags"
# calculate duration
end=$(date +%s)
# shellcheck disable=SC2034
bash_framework_duration=$(( end - start ))
}
|
#!/usr/bin/env bash
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$DIR" || exit 1
NODE=/Users/szymon/.nvm/versions/node/v12*/bin/node
$NODE ../cli.js cache
|
<reponame>DirectoryTree/Scout<filename>resources/js/loading-indicator.js
import * as Ladda from 'ladda';
export default class {
/**
* Binds the Ladda loading indicators.
*/
static bind() {
Ladda.bind('button[type=submit]:not(.no-loading)');
}
/**
* Stops all Ladda loading indicators.
*/
static stopAll() {
Ladda.stopAll();
}
}
|
import getPort from 'get-port';
import {spawn, exec, ChildProcess} from 'child_process';
import rimraf from 'rimraf';
import path from 'path';
import puppeteer, {Browser, Page} from 'puppeteer';
import fs from 'fs';
// @ts-ignore
import wait from 'wait-on';
import kill from 'tree-kill';
export interface GotifyTest {
url: string;
close: () => Promise<void>;
browser: Browser;
page: Page;
}
const windowsPrefix = process.platform === 'win32' ? '.exe' : '';
const appDotGo = path.join(__dirname, '..', '..', '..', 'app.go');
const testBuildPath = path.join(__dirname, 'build');
export const newPluginDir = async (plugins: string[]): Promise<string> => {
const {dir, generator} = testPluginDir();
for (const pluginName of plugins) {
await buildGoPlugin(generator(), pluginName);
}
return dir;
};
export const newTest = async (pluginsDir = ''): Promise<GotifyTest> => {
const port = await getPort();
const gotifyFile = testFilePath();
await buildGoExecutable(gotifyFile);
const gotifyInstance = startGotify(gotifyFile, port, pluginsDir);
const gotifyURL = 'http://localhost:' + port;
await waitForGotify('http-get://localhost:' + port);
const browser = await puppeteer.launch({
headless: process.env.CI === 'true',
args: [`--window-size=1920,1080`, '--no-sandbox'],
});
const page = await browser.newPage();
await page.setViewport({width: 1920, height: 1080});
await page.goto(gotifyURL);
return {
close: async () => {
await Promise.all([
browser.close(),
new Promise((resolve) => kill(gotifyInstance.pid, 'SIGKILL', () => resolve())),
]);
rimraf.sync(gotifyFile, {maxBusyTries: 8});
},
url: gotifyURL,
browser,
page,
};
};
const testPluginDir = (): {dir: string; generator: () => string} => {
const random = Math.random()
.toString(36)
.substring(2, 15);
const dirName = 'gotifyplugin_' + random;
const dir = path.join(testBuildPath, dirName);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, 0o755);
}
return {
dir,
generator: () => {
const randomFn = Math.random()
.toString(36)
.substring(2, 15);
return path.join(dir, randomFn + '.so');
},
};
};
const testFilePath = (): string => {
const random = Math.random()
.toString(36)
.substring(2, 15);
const filename = 'gotifytest_' + random + windowsPrefix;
return path.join(testBuildPath, filename);
};
const waitForGotify = (url: string): Promise<void> => {
return new Promise((resolve, err) => {
wait({resources: [url], timeout: 40000}, (error: string) => {
if (error) {
console.log(error);
err(error);
} else {
resolve();
}
});
});
};
const buildGoPlugin = (filename: string, pluginPath: string): Promise<void> => {
process.stdout.write(`### Building Plugin ${pluginPath}\n`);
return new Promise((resolve) =>
exec(`go build -o ${filename} -buildmode=plugin ${pluginPath}`, () => resolve())
);
};
const buildGoExecutable = (filename: string): Promise<void> => {
const envGotify = process.env.GOTIFY_EXE;
if (envGotify) {
if (!fs.existsSync(testBuildPath)) {
fs.mkdirSync(testBuildPath);
}
fs.copyFileSync(envGotify, filename);
process.stdout.write(`### Copying ${envGotify} to ${filename}\n`);
return Promise.resolve();
} else {
process.stdout.write(`### Building Gotify ${filename}\n`);
return new Promise((resolve) =>
exec(`go build -ldflags="-X main.Mode=prod" -o ${filename} ${appDotGo}`, () =>
resolve()
)
);
}
};
const startGotify = (filename: string, port: number, pluginDir: string): ChildProcess => {
const gotify = spawn(filename, [], {
env: {
GOTIFY_SERVER_PORT: '' + port,
GOTIFY_DATABASE_CONNECTION: 'file::memory:?mode=memory&cache=shared',
GOTIFY_PLUGINSDIR: pluginDir,
NODE_ENV: process.env.NODE_ENV,
PUBLIC_URL: process.env.PUBLIC_URL,
},
});
gotify.stdout.pipe(process.stdout);
gotify.stderr.pipe(process.stderr);
return gotify;
};
|
TEST_PKG_VERSION="$(echo "${TEST_PKG_IDENT}" | cut -d/ -f3)"
@test "Version matches" {
result="$(hab pkg exec "${TEST_PKG_IDENT}" strace -V | head -1 | awk '{print $4}')"
[ "$result" = "${TEST_PKG_VERSION}" ]
}
@test "Can strace" {
run hab pkg exec "${TEST_PKG_IDENT}" strace strace -h
[ $status -eq 0 ]
}
|
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
require 'date'
# rubocop:disable Lint/UnneededCopDisableDirective, Metrics/LineLength
module OCI
# References an OCI-managed protection capability. Checks if HTTP requests/responses are malicious.
#
class Waf::Models::ProtectionCapability
# **[Required]** Unique key of referenced protection capability.
# @return [String]
attr_accessor :key
# **[Required]** Version of referenced protection capability.
# @return [Integer]
attr_accessor :version
# @return [OCI::Waf::Models::ProtectionCapabilityExclusions]
attr_accessor :exclusions
# Override action to take if capability was triggered, defined in Protection Rule for this capability.
# Only actions of type CHECK are allowed.
#
# @return [String]
attr_accessor :action_name
# The minimum sum of weights of associated collaborative protection capabilities that have triggered which
# must be reached in order for _this_ capability to trigger.
# This field is ignored for non-collaborative capabilities.
#
# @return [Integer]
attr_accessor :collaborative_action_threshold
# Explicit weight values to use for associated collaborative protection capabilities.
#
# @return [Array<OCI::Waf::Models::CollaborativeCapabilityWeightOverride>]
attr_accessor :collaborative_weights
# Attribute mapping from ruby-style variable name to JSON key.
def self.attribute_map
{
# rubocop:disable Style/SymbolLiteral
'key': :'key',
'version': :'version',
'exclusions': :'exclusions',
'action_name': :'actionName',
'collaborative_action_threshold': :'collaborativeActionThreshold',
'collaborative_weights': :'collaborativeWeights'
# rubocop:enable Style/SymbolLiteral
}
end
# Attribute type mapping.
def self.swagger_types
{
# rubocop:disable Style/SymbolLiteral
'key': :'String',
'version': :'Integer',
'exclusions': :'OCI::Waf::Models::ProtectionCapabilityExclusions',
'action_name': :'String',
'collaborative_action_threshold': :'Integer',
'collaborative_weights': :'Array<OCI::Waf::Models::CollaborativeCapabilityWeightOverride>'
# rubocop:enable Style/SymbolLiteral
}
end
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines, Style/SymbolLiteral
# Initializes the object
# @param [Hash] attributes Model attributes in the form of hash
# @option attributes [String] :key The value to assign to the {#key} property
# @option attributes [Integer] :version The value to assign to the {#version} property
# @option attributes [OCI::Waf::Models::ProtectionCapabilityExclusions] :exclusions The value to assign to the {#exclusions} property
# @option attributes [String] :action_name The value to assign to the {#action_name} property
# @option attributes [Integer] :collaborative_action_threshold The value to assign to the {#collaborative_action_threshold} property
# @option attributes [Array<OCI::Waf::Models::CollaborativeCapabilityWeightOverride>] :collaborative_weights The value to assign to the {#collaborative_weights} property
def initialize(attributes = {})
return unless attributes.is_a?(Hash)
# convert string to symbol for hash key
attributes = attributes.each_with_object({}) { |(k, v), h| h[k.to_sym] = v }
self.key = attributes[:'key'] if attributes[:'key']
self.version = attributes[:'version'] if attributes[:'version']
self.exclusions = attributes[:'exclusions'] if attributes[:'exclusions']
self.action_name = attributes[:'actionName'] if attributes[:'actionName']
raise 'You cannot provide both :actionName and :action_name' if attributes.key?(:'actionName') && attributes.key?(:'action_name')
self.action_name = attributes[:'action_name'] if attributes[:'action_name']
self.collaborative_action_threshold = attributes[:'collaborativeActionThreshold'] if attributes[:'collaborativeActionThreshold']
raise 'You cannot provide both :collaborativeActionThreshold and :collaborative_action_threshold' if attributes.key?(:'collaborativeActionThreshold') && attributes.key?(:'collaborative_action_threshold')
self.collaborative_action_threshold = attributes[:'collaborative_action_threshold'] if attributes[:'collaborative_action_threshold']
self.collaborative_weights = attributes[:'collaborativeWeights'] if attributes[:'collaborativeWeights']
raise 'You cannot provide both :collaborativeWeights and :collaborative_weights' if attributes.key?(:'collaborativeWeights') && attributes.key?(:'collaborative_weights')
self.collaborative_weights = attributes[:'collaborative_weights'] if attributes[:'collaborative_weights']
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines, Style/SymbolLiteral
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity, Layout/EmptyLines
# Checks equality by comparing each attribute.
# @param [Object] other the other object to be compared
def ==(other)
return true if equal?(other)
self.class == other.class &&
key == other.key &&
version == other.version &&
exclusions == other.exclusions &&
action_name == other.action_name &&
collaborative_action_threshold == other.collaborative_action_threshold &&
collaborative_weights == other.collaborative_weights
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity, Layout/EmptyLines
# @see the `==` method
# @param [Object] other the other object to be compared
def eql?(other)
self == other
end
# rubocop:disable Metrics/AbcSize, Layout/EmptyLines
# Calculates hash code according to all attributes.
# @return [Fixnum] Hash code
def hash
[key, version, exclusions, action_name, collaborative_action_threshold, collaborative_weights].hash
end
# rubocop:enable Metrics/AbcSize, Layout/EmptyLines
# rubocop:disable Metrics/AbcSize, Layout/EmptyLines
# Builds the object from hash
# @param [Hash] attributes Model attributes in the form of hash
# @return [Object] Returns the model itself
def build_from_hash(attributes)
return nil unless attributes.is_a?(Hash)
self.class.swagger_types.each_pair do |key, type|
if type =~ /^Array<(.*)>/i
# check to ensure the input is an array given that the the attribute
# is documented as an array but the input is not
if attributes[self.class.attribute_map[key]].is_a?(Array)
public_method("#{key}=").call(
attributes[self.class.attribute_map[key]]
.map { |v| OCI::Internal::Util.convert_to_type(Regexp.last_match(1), v) }
)
end
elsif !attributes[self.class.attribute_map[key]].nil?
public_method("#{key}=").call(
OCI::Internal::Util.convert_to_type(type, attributes[self.class.attribute_map[key]])
)
end
# or else data not found in attributes(hash), not an issue as the data can be optional
end
self
end
# rubocop:enable Metrics/AbcSize, Layout/EmptyLines
# Returns the string representation of the object
# @return [String] String presentation of the object
def to_s
to_hash.to_s
end
# Returns the object in the form of hash
# @return [Hash] Returns the object in the form of hash
def to_hash
hash = {}
self.class.attribute_map.each_pair do |attr, param|
value = public_method(attr).call
next if value.nil? && !instance_variable_defined?("@#{attr}")
hash[param] = _to_hash(value)
end
hash
end
private
# Outputs non-array value in the form of hash
# For object, use to_hash. Otherwise, just return the value
# @param [Object] value Any valid value
# @return [Hash] Returns the value in the form of hash
def _to_hash(value)
if value.is_a?(Array)
value.compact.map { |v| _to_hash(v) }
elsif value.is_a?(Hash)
{}.tap do |hash|
value.each { |k, v| hash[k] = _to_hash(v) }
end
elsif value.respond_to? :to_hash
value.to_hash
else
value
end
end
end
end
# rubocop:enable Lint/UnneededCopDisableDirective, Metrics/LineLength
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.