text stringlengths 1 1.05M |
|---|
#!/usr/bin/env bash
SRV_PORT=$(($RANDOM + 1024))
./daytimeudpsrv_byname "0.0.0.0" $SRV_PORT &
SRV_PID=$!
sleep 1
./daytimeudpcli_byname "127.0.0.1" $SRV_PORT
kill $SRV_PID
|
namespace TogglerService.Models
{
public class ExcludedService
{
public GlobalToggle GlobalToggle { get; set; }
public string ToggleId { get; set; }
public string ServiceId { get; set; }
}
} |
<gh_stars>1-10
/*
*
*/
package net.community.chest.db.sql.impl;
import java.sql.ClientInfoStatus;
import java.sql.Connection;
import java.sql.SQLClientInfoException;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
/**
* <P>Copyright 2008 as per GPLv2</P>
*
* <P>Provides some default implementations for {@link Connection} interface</P>
*
* @author <NAME>.
* @since Feb 11, 2009 11:24:44 AM
*/
public abstract class AbstractConnection implements Connection {
protected AbstractConnection ()
{
super();
}
private boolean _autoCommit;
/*
* @see java.sql.Connection#getAutoCommit()
*/
@Override
public boolean getAutoCommit () throws SQLException
{
return _autoCommit;
}
/*
* @see java.sql.Connection#setAutoCommit(boolean)
*/
@Override
public void setAutoCommit (boolean autoCommit) throws SQLException
{
_autoCommit = autoCommit;
}
private String _catalog;
/*
* @see java.sql.Connection#getCatalog()
*/
@Override
public String getCatalog () throws SQLException
{
return _catalog;
}
/*
* @see java.sql.Connection#setCatalog(java.lang.String)
*/
@Override
public void setCatalog (String catalog) throws SQLException
{
_catalog = catalog;
}
private Properties _clientInfo;
/*
* @see java.sql.Connection#getClientInfo()
*/
@Override
public Properties getClientInfo () throws SQLException
{
return _clientInfo;
}
/*
* @see java.sql.Connection#setClientInfo(java.util.Properties)
*/
@Override
public void setClientInfo (Properties properties) throws SQLClientInfoException
{
_clientInfo = properties;
}
/*
* @see java.sql.Connection#getClientInfo(java.lang.String)
*/
@Override
public String getClientInfo (String name) throws SQLException
{
if ((null == name) || (name.length() <= 0))
return null;
final Properties ci=getClientInfo();
if (null == ci)
return null;
return ci.getProperty(name);
}
/*
* @see java.sql.Connection#setClientInfo(java.lang.String, java.lang.String)
*/
@Override
public void setClientInfo (String name, String value)
throws SQLClientInfoException
{
if ((null == name) || (name.length() <= 0))
{
final Map<String, ClientInfoStatus> pm=new HashMap<String,ClientInfoStatus>(2, 1.0f);
pm.put(String.valueOf(name), ClientInfoStatus.REASON_UNKNOWN);
throw new SQLClientInfoException(pm);
}
try
{
Properties ci=getClientInfo();
if (null == ci)
{
ci = new Properties();
setClientInfo(ci);
}
if ((value != null) && (value.length() > 0))
ci.put(name, value);
else
ci.remove(name);
}
catch(SQLException e)
{
final Map<String, ClientInfoStatus> pm=new HashMap<String,ClientInfoStatus>(2, 1.0f);
pm.put(name, ClientInfoStatus.REASON_VALUE_INVALID);
throw new SQLClientInfoException(pm);
}
}
private int _holdability=(-1);
/*
* @see java.sql.Connection#getHoldability()
*/
@Override
public int getHoldability () throws SQLException
{
return _holdability;
}
/*
* @see java.sql.Connection#setHoldability(int)
*/
@Override
public void setHoldability (int holdability) throws SQLException
{
_holdability = holdability;
}
private int _xactIsolation=(-1);
/*
* @see java.sql.Connection#getTransactionIsolation()
*/
@Override
public int getTransactionIsolation () throws SQLException
{
return _xactIsolation;
}
/*
* @see java.sql.Connection#setTransactionIsolation(int)
*/
@Override
public void setTransactionIsolation (int level) throws SQLException
{
_xactIsolation = level;
}
private Map<String,Class<?>> _typeMap;
/*
* @see java.sql.Connection#getTypeMap()
*/
@Override
public Map<String,Class<?>> getTypeMap () throws SQLException
{
return _typeMap;
}
/*
* @see java.sql.Connection#setTypeMap(java.util.Map)
*/
@Override
public void setTypeMap (Map<String,Class<?>> map) throws SQLException
{
_typeMap = map;
}
private boolean _readOnly;
/*
* @see java.sql.Connection#isReadOnly()
*/
@Override
public boolean isReadOnly () throws SQLException
{
return _readOnly;
}
/*
* @see java.sql.Connection#setReadOnly(boolean)
*/
@Override
public void setReadOnly (boolean readOnly) throws SQLException
{
_readOnly = readOnly;
}
}
|
#!/usr/bin/env sh
set -e
# Ubuntu
#sudo apt-get update
#sudo apt-get install -y git docker #ansible
# CentOS/RHEL
#sudo yum install -y git docker ansible curl tar zip unzip
#ssh-copy-id
sudo yum install -y docker iptables-services
sudo sh -c 'echo EXTRA_STORAGE_OPTIONS=\"--storage-opt overlay2.override_kernel_check=true\">/etc/sysconfig/docker-storage-setup'
sudo sh -c 'echo STORAGE_DRIVER=\"overlay2\" >>/etc/sysconfig/docker-storage-setup'
sudo rm -f /etc/sysconfig/docker-storage || true
# Firewalld (and selinux) do not play well with k8s (and especially with kubeadm).
# NOTE: A machine reboot may be required if SELinux was enforced previously
systemctl stop firewalld || true
systemctl disable firewalld || true
systemctl mask firewalld || true
systemctl start iptables
systemctl enable iptables
systemctl unmask iptables
sudo systemctl stop docker
sudo systemctl start docker-storage-setup
sudo systemctl restart docker
sudo systemctl enable docker
#sudo chown vagrant /var/run/docker.sock # optional
# SET Default Policies to ACCEPT
iptables -P FORWARD ACCEPT
iptables -P INPUT ACCEPT
iptables -P OUTPUT ACCEPT
# Remove the Default REJECT rules, so it will hit the default Policy
iptables -D INPUT -j REJECT --reject-with icmp-host-prohibited
iptables -D FORWARD -j REJECT --reject-with icmp-host-prohibited
# If someone wants to enable only some ports (there will be many, and most of them dynamic), here is a start: 6443 (k8s api), 10250, etc. (maybe both tcp and udp...)
#sudo iptables -I INPUT -p tcp --dport 6443 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT
#sudo iptables -I OUTPUT -p tcp --sport 6443 -m conntrack --ctstate ESTABLISHED -j ACCEPT
# DEBUG LIVE WITH:
# watch -n1 iptables -vnL
|
package events
import "github.com/DisgoOrg/disgo/api"
// NewEvent constructs a new GenericEvent with the provided Disgo instance
func NewEvent(disgo api.Disgo, sequenceNumber int) GenericEvent {
event := GenericEvent{disgo: disgo, sequenceNumber: sequenceNumber}
disgo.EventManager().Dispatch(event)
return event
}
// GenericEvent the base event structure
type GenericEvent struct {
disgo api.Disgo
sequenceNumber int
}
// Disgo returns the Disgo instance for this event
func (d GenericEvent) Disgo() api.Disgo {
return d.disgo
}
// SequenceNumber returns the sequence number of the gateway event
func (d GenericEvent) SequenceNumber() int {
return d.sequenceNumber
}
|
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Kingfisher/Kingfisher.framework"
install_framework "${BUILT_PRODUCTS_DIR}/SMPager/SMPager.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Kingfisher/Kingfisher.framework"
install_framework "${BUILT_PRODUCTS_DIR}/SMPager/SMPager.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
const sanitizeInput = (userInput) => {
let sanitizedString = ""
for(let i = 0; i < userInput.length; i++){
let char = userInput.charAt(i);
if(char === "<"){
while(userInput.charAt(i) !== ">"){
i++;
}
continue;
}
sanitizedString += char;
}
return sanitizedString;
} |
echo "Building with debug flag..."
cd bin
cmake -DCMAKE_BUILD_TYPE=Debug ..
make
cd ..
echo "Running..."
./bin/src/Jam3D |
# https://github.com/moteus/lua-travis-example
export PATH=${PATH}:$HOME/.lua:$HOME/.local/bin:${TRAVIS_BUILD_DIR}/install/luarocks/bin
bash .travis/setup_lua.sh
eval `$HOME/.lua/luarocks path`
|
#!/bin/sh
#SBATCH --clusters=ub-hpc
#SBATCH --partition=largemem --qos=largemem
#SBATCH --time=72:00:00
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --output=slurm.out
cur_dir=$(pwd)
#export INFILE=inp_ut.inp
#export OUTFILE=out_put.out
infile here
outfile here
if [ ! -f $INFILE ]; then
echo "Error! Input file does not exist (${INFILE})"
echo "Aborting the job..."
exit
fi
BASE=`basename $INFILE .inp`
# a parallel version of the infile
PINFILE=${BASE}.pin
#construct nodefile, named as per Orca manual
export SLURM_NODEFILE=${BASE}.nodes
#use analogous naming convention for rankfile
export ORCA_RANKFILE=${BASE}.ranks
tic=`date +%s`
echo "Start Time = "`date`
# load modules
echo "Loading modules ..."
module load openmpi/gcc-4.8.3/1.6.5
export NBOEXE=/util/academic/nbo/nbo6/bin/nbo6.i4.exe
module load orca
module load nbo/6
ulimit -s unlimited
module list
# disable PSM interface if the nodes have Mellanox hardware
bMLX=`/usr/sbin/ibstat | head -n1 | grep mlx | wc -l`
if [ "$bMLX" == "1" ]; then
export OMPI_MCA_mtl=^psm
fi
# change to working directory
#cd $SLURM_SUBMIT_DIR
echo "%pal nprocs $SLURM_NPROCS end" > $PINFILE
cat $INFILE | grep -v "^%pal nprocs" >> $PINFILE
echo "Launching orca ..."
echo "SLURM job ID = "$SLURM_JOB_ID
echo "Working Dir = "$SLURM_SUBMIT_DIR
echo "Compute Nodes = "`nodeset -e $SLURM_NODELIST`
echo "Number of Processors = "$SLURM_NPROCS
echo "Number of Nodes = "$SLURM_NNODES
echo "mpirun command = "`which mpirun`
echo "orca nodefile = "$SLURM_NODEFILE
echo "orca rankfile = "$ORCA_RANKFILE
echo " "
echo "Input file"
cat $PINFILE
echo " "
# create rank file to explicitly bind cores
echo "creating hostfile and rankfile"
uid=`id -u`
jid=$SLURM_JOB_ID
nodes=`nodeset -e $SLURM_NODELIST`
# trigger creation of cpuset information and save to working dir
srun bash -c "cat /cgroup/cpuset/slurm/uid_${uid}/job_${jid}/cpuset.cpus > cpus.\`hostname\`.$SLURM_JOB_ID"
rm -f $ORCA_RANKFILE
rm -f $SLURM_NODEFILE
rank=0
for i in ${nodes}; do
# extract space-separated list of assigned cpus
cpus=`cat cpus.${i}.${SLURM_JOB_ID}`
cpus=`nodeset -Re $cpus`
# add cpu assignments to the rank file
for j in ${cpus}; do
echo "rank ${rank}=$i slot=$j" >> $ORCA_RANKFILE
echo "$i" >> $SLURM_NODEFILE
rank=`expr $rank + 1`
if [ "$rank" == "$SLURM_NPROCS" ]; then
break;
fi
done
if [ "$rank" == "$SLURM_NPROCS" ]; then
break;
fi
done
# use ssh instead of slurm as the launcher
# the rankfile that was just created will ensure cpusets are still honored.
export OMPI_MCA_plm=rsh
# launch application using mpirun
echo "Launching application using mpirun"
# launch application
$ORCA_PATH/orca $PINFILE >> $OUTFILE
echo "All Done!"
|
<reponame>yinfuquan/spring-boot-examples
package com.yin.springboot.mybatis.server;
import java.util.List;
import com.yin.springboot.mybatis.domain.UmsMemberRuleSetting;
public interface UmsMemberRuleSettingService{
int deleteByPrimaryKey(Long id);
int insert(UmsMemberRuleSetting record);
int insertOrUpdate(UmsMemberRuleSetting record);
int insertOrUpdateSelective(UmsMemberRuleSetting record);
int insertSelective(UmsMemberRuleSetting record);
UmsMemberRuleSetting selectByPrimaryKey(Long id);
int updateByPrimaryKeySelective(UmsMemberRuleSetting record);
int updateByPrimaryKey(UmsMemberRuleSetting record);
int updateBatch(List<UmsMemberRuleSetting> list);
int batchInsert(List<UmsMemberRuleSetting> list);
}
|
#!/usr/bin/env bash
set -ex
if [ -d "${HOME}/.local/bin" ]; then
export PATH="${HOME}/.local/bin:${PATH}"
fi
SRC_ROOT=${SRC_ROOT:-"${PWD}"}
PYTHON=${PYTHON:-"python3"}
function run_publish_pkg() {
if [ "x${GITHUB_ACTIONS}" != "xtrue" ]; then
echo "Did not detect github actions, exiting."
exit 1
fi
if [[ "x${GITHUB_REF}" != "xrefs/tags/"* ]]; then
echo "Did not detect TAG, got ${GITHUB_REF}."
echo "exiting."
exit 1
fi
git status
git reset --hard HEAD
git clean -xdf
pypi_version=$(python -c 'import json, urllib.request; print(json.loads(urllib.request.urlopen("https://pypi.org/pypi/tpm2-pytss/json").read())["info"]["version"])')
tag=${GITHUB_REF/refs\/tags\//}
if [ "x${tag}" == "x${pypi_version}" ]; then
echo "Git Tag is same as PyPI version: ${tag} == ${pypi_version}"
echo "Nothing to do, exiting."
exit 0
fi
python setup.py sdist
python -m twine upload dist/*
}
function run_test() {
ci_env=""
if [ "$ENABLE_COVERAGE" == "true" ]; then
ci_env=$(bash <(curl -s https://codecov.io/env))
fi
docker run --rm \
-u $(id -u):$(id -g) \
-v "${PWD}:/workspace/tpm2-pytss" \
--env-file .ci/docker.env \
$ci_env \
tpm2software/tpm2-tss-python \
/bin/bash -c '/workspace/tpm2-pytss/.ci/docker.run'
}
function run_whitespace() {
export whitespace=$(mktemp -u)
function rmtempfile () {
rm -f "$whitespace"
}
trap rmtempfile EXIT
find . -type f -name '*.py' -exec grep -EHn " +$" {} \; 2>&1 > "$whitespace"
lines=$(wc -l < "$whitespace")
if [ "$lines" -ne 0 ]; then
echo "Trailing whitespace found" >&2
cat "${whitespace}" >&2
exit 1
fi
}
function run_style() {
"${PYTHON}" -m black --diff --check "${SRC_ROOT}"
}
function run_build_docs() {
docker run --rm \
-u $(id -u):$(id -g) \
-v "${PWD}:/workspace/tpm2-pytss" \
--env-file .ci/docker.env \
tpm2software/tpm2-tss-python \
/bin/bash -c 'virtualenv .venv && . .venv/bin/activate && . .ci/docker-prelude.sh && python3 -m pip install -e .[dev] && ./scripts/docs.sh '
}
if [ "x${TEST}" != "x" ]; then
run_test
elif [ "x${WHITESPACE}" != "x" ]; then
run_whitespace
elif [ "x${STYLE}" != "x" ]; then
run_style
elif [ "x${DOCS}" != "x" ]; then
run_build_docs
elif [ "x${PUBLISH_PKG}" != "x" ]; then
run_publish_pkg
fi
|
<gh_stars>0
const sharp = require('sharp');
const { nanoid } = require('nanoid');
module.exports = async ({ rel, src: uncheckedSrc, options, debug }) => {
try {
src = uncheckedSrc;
// convert Array buffer if needed.
if (typeof uncheckedSrc !== 'string') {
src = Buffer.from(uncheckedSrc);
}
//Lighthouse makes it difficutl to tell which placeholders belong to which images, so this helps us know which one it belongs to
//much of the time, the id will actually be truncated in the Lighthouse report, but the important part is that you have enough to identify the image
const id_string = `id:${nanoid()};`;
const place = await sharp(src).resize(options.resize).jpeg(options.jpeg).toBuffer({ resolveWithObject: false });
if (debug) console.log({ rel, placeholder: `data:image/jpeg;${id_string}base64,${place.toString('base64')}`, error: null });
return { rel, placeholder: `data:image/jpeg;${id_string}base64,${place.toString('base64')}`, error: null };
} catch (e) {
return { error: e };
}
};
|
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#URL of the backend service of linkis-mg-gateway
linkis_url="http://127.0.0.1:9001"
#linkis ip address,the http access address for linkis-web will be http://${linkis_ipaddr}:${linkis_port}
linkis_ipaddr=127.0.0.1
linkis_port=8088
|
#解决https报错问题
apt install apt-transport-https ca-certificates
# buster版本
# https://mirrors.tuna.tsinghua.edu.cn/help/debian/
# 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释
cp /etc/apt/sources.list /etc/apt/sources.list.bck
# 清华源
cat >/etc/apt/sources.list<-eof
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ buster main contrib non-free
# deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ buster main contrib non-free
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ buster-updates main contrib non-free
# deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ buster-updates main contrib non-free
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ buster-backports main contrib non-free
# deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ buster-backports main contrib non-free
deb https://mirrors.tuna.tsinghua.edu.cn/debian-security buster/updates main contrib non-free
# deb-src https://mirrors.tuna.tsinghua.edu.cn/debian-security buster/updates main contrib non-free
eof
#备用
--------------------------------------------------------------
#备用阿里云http
cat >/etc/apt/sources.list <-eof
deb http://mirrors.aliyun.com/debian/ buster main non-free contrib
deb-src http://mirrors.aliyun.com/debian/ buster main non-free contrib
deb http://mirrors.aliyun.com/debian-security buster/updates main
deb-src http://mirrors.aliyun.com/debian-security buster/updates main
deb http://mirrors.aliyun.com/debian/ buster-updates main non-free contrib
deb-src http://mirrors.aliyun.com/debian/ buster-updates main non-free contrib
deb http://mirrors.aliyun.com/debian/ buster-backports main non-free contrib
deb-src http://mirrors.aliyun.com/debian/ buster-backports main non-free contrib
eof
#备用阿里云https
cat >/etc/apt/sources.list <-eof
# 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ buster main contrib non-free
# deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ buster main contrib non-free
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ buster-updates main contrib non-free
# deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ buster-updates main contrib non-free
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ buster-backports main contrib non-free
# deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ buster-backports main contrib non-free
deb https://mirrors.tuna.tsinghua.edu.cn/debian-security buster/updates main contrib non-free
# deb-src https://mirrors.tuna.tsinghua.edu.cn/debian-security buster/updates main contrib non-free
eof
|
<filename>qrutils/widgets/qRealMessageBox.cpp
/* Copyright 2017 CyberTech Labs Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. */
#include "qRealMessageBox.h"
#include <QtCore/QMap>
#include <QDebug>
QMessageBox::StandardButton utils::QRealMessageBox::question(QWidget *parent
, const QString &title
, const QString &text
, QMessageBox::StandardButtons buttons)
{
QMessageBox *messageBox = new QMessageBox(parent);
messageBox->setWindowTitle(title);
messageBox->setText(text);
messageBox->setStandardButtons(buttons);
if (buttons & QMessageBox::Ok) { messageBox->setButtonText(QMessageBox::Ok, tr("Ok")); }
if (buttons & QMessageBox::Open) { messageBox->setButtonText(QMessageBox::Open, tr("Open")); }
if (buttons & QMessageBox::Save) { messageBox->setButtonText(QMessageBox::Save, tr("Save")); }
if (buttons & QMessageBox::Cancel) { messageBox->setButtonText(QMessageBox::Cancel, tr("Cancel")); }
if (buttons & QMessageBox::Close) { messageBox->setButtonText(QMessageBox::Close, tr("Close")); }
if (buttons & QMessageBox::Discard) { messageBox->setButtonText(QMessageBox::Discard, tr("Discard")); }
if (buttons & QMessageBox::Apply) { messageBox->setButtonText(QMessageBox::Apply, tr("Apply")); }
if (buttons & QMessageBox::Reset) { messageBox->setButtonText(QMessageBox::Reset, tr("Reset")); }
if (buttons & QMessageBox::Help) { messageBox->setButtonText(QMessageBox::Help, tr("Help")); }
if (buttons & QMessageBox::SaveAll) { messageBox->setButtonText(QMessageBox::SaveAll, tr("Save All")); }
if (buttons & QMessageBox::Yes) { messageBox->setButtonText(QMessageBox::Yes, tr("Yes")); }
if (buttons & QMessageBox::YesToAll) { messageBox->setButtonText(QMessageBox::YesToAll, tr("Yes To All")); }
if (buttons & QMessageBox::No) { messageBox->setButtonText(QMessageBox::No, tr("No")); }
if (buttons & QMessageBox::NoToAll) { messageBox->setButtonText(QMessageBox::NoToAll, tr("No To All")); }
if (buttons & QMessageBox::Abort) { messageBox->setButtonText(QMessageBox::Abort, tr("Abort")); }
if (buttons & QMessageBox::Retry) { messageBox->setButtonText(QMessageBox::Retry, tr("Retry")); }
if (buttons & QMessageBox::Ignore) { messageBox->setButtonText(QMessageBox::Ignore, tr("Ignore")); }
if (buttons & QMessageBox::NoButton) { messageBox->setButtonText(QMessageBox::NoButton, tr("NoButton")); }
if (buttons & QMessageBox::RestoreDefaults) {
messageBox->setButtonText(QMessageBox::RestoreDefaults, tr("Restore Defaults"));
}
auto result = static_cast<QMessageBox::StandardButton>(messageBox->exec());
if (!parent) {
delete messageBox;
}
return result;
}
|
<reponame>makenosound/css-in-js-generator
const parseSelector = require("postcss-selector-parser");
export function getSelectorScope(selector: string): string[] {
const selectorScope: string[] = [];
parseSelector((nodes: any) => {
for (const node of nodes.first.nodes) {
if (node.type === "class") {
selectorScope.push(node.toString());
}
}
}).processSync(selector);
return selectorScope.length > 0 ? selectorScope : ["root"];
}
|
function toUTC(time) {
let hrs = time.substring(0,2);
let amPm = time.substring(6, 8);
if (amPm == 'PM') {
hrs = parseInt(hrs) + 12
}
return `${hrs.toString().padStart(2, '0')}:${time.substring(3, 5)}`
}
toUTC('3:00 PM'); //15:00 |
/*
* =============================================================================
*
* Copyright (c) 2011-2016, The THYMELEAF team (http://www.thymeleaf.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* =============================================================================
*/
package org.thymeleaf.templateengine.springintegration.context;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import org.springframework.validation.BindingResult;
import org.thymeleaf.testing.templateengine.exception.TestEngineExecutionException;
import org.thymeleaf.testing.templateengine.testable.ITest;
public class ErrorsSpringIntegrationWebProcessingContextBuilder
extends SpringIntegrationWebProcessingContextBuilder {
public static String BINDING_ERRORS_CONTEXT_VARIABLE_NAME = "bindingErrors";
public static String BINDING_ERRORS_OBJECT_BINDING_NAME = "binding";
public static String BINDING_ERRORS_OBJECT_BINDING_FIELD_NAME = "field";
public static String BINDING_ERRORS_OBJECT_BINDING_MESSAGE_NAME = "message";
public ErrorsSpringIntegrationWebProcessingContextBuilder() {
super();
}
@Override
protected void initBindingResult(final String bindingVariableName,
final Object bindingObject, final ITest test, final BindingResult bindingResult,
final Locale locale, final Map<String,Object> variables) {
super.initBindingResult(bindingVariableName, bindingObject, test,
bindingResult, locale, variables);
@SuppressWarnings("unchecked")
final List<Map<String,Object>> bindingErrorsList =
(List<Map<String,Object>>) variables.get(BINDING_ERRORS_CONTEXT_VARIABLE_NAME);
if (bindingErrorsList != null) {
for (final Map<String,Object> bindingErrors : bindingErrorsList) {
final Object bindingObj = bindingErrors.get(BINDING_ERRORS_OBJECT_BINDING_NAME);
if (bindingObj != null) {
if (bindingObj.toString().equals(bindingVariableName)) {
// This error map applies to this binding variable
final Object fieldObj = bindingErrors.get(BINDING_ERRORS_OBJECT_BINDING_FIELD_NAME);
final Object messageObj = bindingErrors.get(BINDING_ERRORS_OBJECT_BINDING_MESSAGE_NAME);
if (messageObj == null) {
throw new TestEngineExecutionException(
"Error specification does not include property 'message', which is mandatory");
}
if (fieldObj != null) {
// Field error
bindingResult.rejectValue(fieldObj.toString(), "no_code", messageObj.toString());
} else {
// Global error
bindingResult.reject("no_code",messageObj.toString());
}
}
}
}
}
}
}
|
Set1 = [1, 2, 3, 4, 5]
Set2 = [3, 4, 5, 6, 7]
# intersection
intersection_set = set(Set1) & set(Set2)
# Printing intersection
print("Intersection is :", intersection_set)
# output
Intersection is : {3, 4, 5} |
class OptionsManager:
def __init__(self, default_options):
self.default_options = default_options
self.file_options = {}
def set_options(self, file_name, user_options):
merged_options = self.default_options.copy()
merged_options.update(user_options)
self.file_options[file_name] = merged_options
def get_options(self, file_name):
return self.file_options.get(file_name, self.default_options)
# Example usage
default_options = {"shared": False}
manager = OptionsManager(default_options)
manager.set_options("file1.txt", {"shared": True, "encryption": "AES"})
manager.set_options("file2.txt", {"compression": "zip"})
print(manager.get_options("file1.txt")) # Output: {'shared': True, 'encryption': 'AES'}
print(manager.get_options("file2.txt")) # Output: {'shared': False, 'compression': 'zip'}
print(manager.get_options("file3.txt")) # Output: {'shared': False} |
#!/bin/sh
python random_input.py
|
<reponame>miluoshi/obsidian-advanced-slides
import { AttributeTransformer, Properties } from ".";
export class ClassTransformer implements AttributeTransformer {
transform(element: Properties) {
const clazz = element.getAttribute('class');
if(clazz != undefined){
clazz
.split(" ")
.map((value) => value.trim())
.forEach((value) => {
element.addClass(value);
})
element.deleteAttribute('class');
}
}
}
|
num = int(input("Enter an integer: "))
print("The number is:", num) |
<filename>cmd/goatcounter/reindex.go
// Copyright © 2019 <NAME> – This file is part of GoatCounter and
// published under the terms of a slightly modified EUPL v1.2 license, which can
// be found in the LICENSE file or at https://license.goatcounter.com
package main
import (
"context"
"fmt"
"os"
"strings"
"time"
nnow "github.com/jinzhu/now"
"zgo.at/gadget"
"zgo.at/goatcounter"
"zgo.at/goatcounter/cron"
"zgo.at/zdb"
"zgo.at/zli"
"zgo.at/zlog"
"zgo.at/zvalidate"
)
// reindex
const usageReindex = `
GoatCounter keeps several *_stats and *_count tables so it's less expensive to
generate charts. These are normally updated automatically in the background.
This command recreates these tables. This is mostly for upgrades; you shouldn't
have to run this in normal usage.
This command may take a while to run on larger sites.
For SQLite you may want to stop the main GoatCounter process first, or you're
likely to get locking errors. For PostgreSQL this shouldn't be an issue.
Flags:
-db Database connection: "sqlite://<file>" or "postgres://<connect>"
See "goatcounter help db" for detailed documentation. Default:
sqlite://db/goatcounter.sqlite3?_busy_timeout=200&_journal_mode=wal&cache=shared
-debug Modules to debug, comma-separated or 'all' for all modules.
-pause Number of seconds to pause after each month, to give the server
some breathing room on large sites. Default: 0.
-since Reindex only statistics since this month instead of all of them;
as year-month in UTC.
-to Reindex only statistics up to and including this month; as
year-month in UTC. The default is the current month.
-table Which tables to reindex: hit_stats, hit_counts, browser_stats,
system_stats, location_stats, ref_counts, size_stats, or all
(default).
-useragents Redo the bot and browser/system detection on all User-Agent headrs.
-site Only reindex this site ID. Default is to reindex all.
-silent Don't print progress.
`
// TODO: re-do the way this works. Instead of operating on the database directly
// send a signal to goatcounter to reindex stuff. This makes it easier to deal
// with locking from the application level, especially for SQLite.
func reindex() (int, error) {
dbConnect := flagDB()
debug := flagDebug()
since := CommandLine.String("since", "", "")
to := CommandLine.String("to", "", "")
table := CommandLine.String("table", "all", "")
pause := CommandLine.Int("pause", 0, "")
silent := CommandLine.Bool("silent", false, "")
doUA := CommandLine.Bool("useragents", false, "")
var site int64
CommandLine.Int64Var(&site, "site", 0, "")
err := CommandLine.Parse(os.Args[2:])
if err != nil {
return 1, err
}
tables := strings.Split(*table, ",")
v := zvalidate.New()
firstDay := v.Date("-since", *since, "2006-01")
lastDay := v.Date("-to", *to, "2006-01")
for _, t := range tables {
v.Include("-table", t, []string{"hit_stats", "hit_counts",
"browser_stats", "system_stats", "location_stats",
"ref_counts", "size_stats", "all", ""})
}
if v.HasErrors() {
return 1, v
}
zlog.Config.SetDebug(*debug)
db, err := connectDB(*dbConnect, nil, false)
if err != nil {
return 2, err
}
defer db.Close()
ctx := zdb.WithDB(context.Background(), db)
if *doUA {
err = userAgents(ctx, *silent)
if err != nil {
return 1, err
}
}
if len(tables) == 0 || (len(tables) == 1 && tables[0] == "") {
return 0, nil
}
if *since == "" {
w := ""
if site > 0 {
w = fmt.Sprintf(" where site_id=%d ", site)
}
var first string
err := db.GetContext(ctx, &first, `select created_at from hits `+w+` order by created_at asc limit 1`)
if err != nil {
if zdb.ErrNoRows(err) {
return 0, nil
}
return 1, err
}
firstDay, err = time.Parse("2006-01", first[:7])
if err != nil {
return 1, err
}
}
if *to == "" {
lastDay = time.Now().UTC()
}
var sites goatcounter.Sites
err = sites.UnscopedList(ctx)
if err != nil {
return 1, err
}
firstDay = nnow.New(firstDay).BeginningOfMonth()
lastDay = nnow.New(lastDay).EndOfMonth()
for i, s := range sites {
if site > 0 && s.ID != site {
continue
}
err := dosite(ctx, s, tables, *pause, firstDay, lastDay, *silent, len(sites), i+1)
if err != nil {
return 1, err
}
}
if !*silent {
fmt.Fprintln(stdout, "")
}
return 0, nil
}
func dosite(
ctx context.Context, site goatcounter.Site, tables []string,
pause int, firstDay, lastDay time.Time, silent bool,
nsites, isite int,
) error {
siteID := site.ID
if firstDay.Before(site.FirstHitAt) {
firstDay = site.FirstHitAt
}
now := goatcounter.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), 23, 59, 59, 0, time.UTC)
months := [][]time.Time{
{firstDay, nnow.With(firstDay).EndOfMonth()},
}
start := nnow.With(nnow.With(firstDay).EndOfMonth().Add(12 * time.Hour)).BeginningOfMonth()
for {
if start.After(now) {
break
}
end := nnow.With(start).EndOfMonth()
if end.After(lastDay) {
months = append(months, []time.Time{start, lastDay})
break
}
months = append(months, []time.Time{start, end})
start = nnow.With(end.Add(12 * time.Hour)).BeginningOfMonth()
}
query := `select * from hits where site_id=$1 and bot=0 and created_at>=$2 and created_at<=$3`
var pauses time.Duration
if pause > 0 {
pauses = time.Duration(pause) * time.Second
}
for _, month := range months {
err := zdb.TX(ctx, func(ctx context.Context) error {
if zdb.PgSQL(ctx) {
err := zdb.Exec(ctx, `lock table hits, hit_counts, hit_stats, size_stats, location_stats, browser_stats, system_stats
in exclusive mode`)
if err != nil {
return err
}
}
var hits []goatcounter.Hit
err := zdb.Select(ctx, &hits, query, siteID, dayStart(month[0]), dayEnd(month[1]))
if err != nil {
return err
}
if !silent {
fmt.Fprintf(stdout, "\r\x1b[0Ksite %d (%d/%d) %s → %d", siteID, isite, nsites, month[0].Format("2006-01"), len(hits))
}
clearMonth(ctx, tables, month[0].Format("2006-01"), siteID)
return cron.ReindexStats(ctx, site, hits, tables)
})
if err != nil {
return err
}
if pauses > 0 {
time.Sleep(pauses)
}
}
return nil
}
func must(err error) {
if err != nil {
panic(err)
}
}
func clearMonth(ctx context.Context, tables []string, month string, siteID int64) {
where := fmt.Sprintf(" where site_id=%d and cast(day as varchar) like '%s-__'", siteID, month)
for _, t := range tables {
switch t {
case "hit_stats":
must(zdb.Exec(ctx, `delete from hit_stats`+where))
case "hit_counts":
must(zdb.Exec(ctx, fmt.Sprintf(
`delete from hit_counts where site_id=%d and cast(hour as varchar) like '%s-%%'`,
siteID, month)))
case "browser_stats":
must(zdb.Exec(ctx, `delete from browser_stats`+where))
case "system_stats":
must(zdb.Exec(ctx, `delete from system_stats`+where))
case "location_stats":
must(zdb.Exec(ctx, `delete from location_stats`+where))
case "ref_counts":
must(zdb.Exec(ctx, fmt.Sprintf(
`delete from ref_counts where site_id=%d and cast(hour as varchar) like '%s-%%'`,
siteID, month)))
case "size_stats":
must(zdb.Exec(ctx, `delete from size_stats`+where))
case "all":
must(zdb.Exec(ctx, `delete from hit_stats`+where))
must(zdb.Exec(ctx, `delete from browser_stats`+where))
must(zdb.Exec(ctx, `delete from system_stats`+where))
must(zdb.Exec(ctx, `delete from location_stats`+where))
must(zdb.Exec(ctx, `delete from size_stats`+where))
must(zdb.Exec(ctx, fmt.Sprintf(
`delete from hit_counts where site_id=%d and cast(hour as varchar) like '%s-%%'`,
siteID, month)))
must(zdb.Exec(ctx, fmt.Sprintf(
`delete from ref_counts where site_id=%d and cast(hour as varchar) like '%s-%%'`,
siteID, month)))
}
}
}
func dayStart(t time.Time) string { return t.Format("2006-01-02") + " 00:00:00" }
func dayEnd(t time.Time) string { return t.Format("2006-01-02") + " 23:59:59" }
func userAgents(ctx context.Context, silent bool) error {
var uas []goatcounter.UserAgent
err := zdb.Select(ctx, &uas, `select * from user_agents`)
if err != nil {
return err
}
for i, ua := range uas {
ua.UserAgent = gadget.Unshorten(ua.UserAgent)
err := ua.Update(ctx)
if err != nil {
return err
}
if !silent {
if i%500 == 0 {
zli.ReplaceLinef("user_agent %d of %d", i, len(uas))
}
}
}
if !silent {
fmt.Println()
}
return nil
}
|
#!/usr/bin/env bash
docker run -d --name rabbitmq -p 5672:5672 -p 15672:15672 rabbitmq:3-management |
<gh_stars>0
package gen
import (
"log"
"net/http"
"strings"
)
type router struct {
roots map[string]*trieNode
handlers map[string]HandlerFunc
}
//roots key eg, roots['GET'] roots['POST']
//handlers key eg, handlers['GET-/p/:lang/doc], handlers['POST-/p/book']
func newRouter() *router {
return &router{
roots: make(map[string]*trieNode),
handlers: make(map[string]HandlerFunc),
}
}
func parsePattern(pattern string) []string {
vs := strings.Split(pattern, "/")
parts := make([]string, 0)
for _, item := range vs {
if item != "" {
parts = append(parts, item)
//the other parts after "*" won't be save
if item[0] == '*' {
break
}
}
}
return parts
}
func (r *router) addRoute(method string, pattern string, handler HandlerFunc) {
log.Printf("Route %4s - %s", method, pattern)
parts := parsePattern(pattern)
key := method + "-" + pattern
if _, has := r.roots[method]; !has {
r.roots[method] = &trieNode{}
}
r.roots[method].insert(pattern, parts, 0)
r.handlers[key] = handler
}
func (r *router) getRoute(method string, path string) (*trieNode, map[string]string) {
searchParts := parsePattern(path)
params := make(map[string]string)
root, has := r.roots[method]
if !has {
return nil, nil
}
node := root.search(searchParts, 0)
if node != nil {
parts := parsePattern(node.pattern)
for index, part := range parts {
if part[0] == ':' {
params[part[1:]] = searchParts[index] //??
}
if part[0] == '*' {
params[part[1:]] = strings.Join(searchParts[index:], "/")
break
}
}
return node, params
}
return nil, nil
}
func (r *router) handle(c *Context) {
node, params := r.getRoute(c.Method, c.Path)
if node != nil {
c.Params = params
key := c.Method + "-" + node.pattern
c.handlers = append(c.handlers, r.handlers[key])
} else {
c.handlers = append(c.handlers, func(c *Context) {
c.String(http.StatusNotFound, "404 NOT FOUND: %s/n", c.Path)
})
}
//do the middlewares and handlers
c.Next()
}
|
#!/bin/bash
#
# Copyright (c) 2017 The Bitcoin Core developers
# Copyright (c) 2017 The Titancoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Check for new lines in diff that introduce trailing whitespace.
# We can't run this check unless we know the commit range for the PR.
if [ -z "${TRAVIS_COMMIT_RANGE}" ]; then
echo "Cannot run lint-whitespace.sh without commit range. To run locally, use:"
echo "TRAVIS_COMMIT_RANGE='<commit range>' .lint-whitespace.sh"
echo "For example:"
echo "TRAVIS_COMMIT_RANGE='47ba2c3...ee50c9e' .lint-whitespace.sh"
exit 1
fi
showdiff() {
if ! git diff -U0 "${TRAVIS_COMMIT_RANGE}" -- "." ":(exclude)src/leveldb/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" ":(exclude)doc/release-notes/"; then
echo "Failed to get a diff"
exit 1
fi
}
showcodediff() {
if ! git diff -U0 "${TRAVIS_COMMIT_RANGE}" -- *.cpp *.h *.md *.py *.sh ":(exclude)src/leveldb/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/" ":(exclude)doc/release-notes/"; then
echo "Failed to get a diff"
exit 1
fi
}
RET=0
# Check if trailing whitespace was found in the diff.
if showdiff | grep -E -q '^\+.*\s+$'; then
echo "This diff appears to have added new lines with trailing whitespace."
echo "The following changes were suspected:"
FILENAME=""
SEEN=0
while read -r line; do
if [[ "$line" =~ ^diff ]]; then
FILENAME="$line"
SEEN=0
elif [[ "$line" =~ ^@@ ]]; then
LINENUMBER="$line"
else
if [ "$SEEN" -eq 0 ]; then
# The first time a file is seen with trailing whitespace, we print the
# filename (preceded by a newline).
echo
echo "$FILENAME"
echo "$LINENUMBER"
SEEN=1
fi
echo "$line"
fi
done < <(showdiff | grep -E '^(diff --git |@@|\+.*\s+$)')
RET=1
fi
# Check if tab characters were found in the diff.
if showcodediff | grep -P -q '^\+.*\t'; then
echo "This diff appears to have added new lines with tab characters instead of spaces."
echo "The following changes were suspected:"
FILENAME=""
SEEN=0
while read -r line; do
if [[ "$line" =~ ^diff ]]; then
FILENAME="$line"
SEEN=0
elif [[ "$line" =~ ^@@ ]]; then
LINENUMBER="$line"
else
if [ "$SEEN" -eq 0 ]; then
# The first time a file is seen with a tab character, we print the
# filename (preceded by a newline).
echo
echo "$FILENAME"
echo "$LINENUMBER"
SEEN=1
fi
echo "$line"
fi
done < <(showcodediff | grep -P '^(diff --git |@@|\+.*\t)')
RET=1
fi
exit $RET
|
import React from 'react';
import { CarPreview } from './CarPreview';
export const IndexPage = (props) => (
<div className="home">
<div className="cars-selector">
{props.cars.map(
carData => <CarPreview key={carData.id} {...carData} />,
)}
</div>
</div>
);
export default IndexPage;
|
<html>
<head>
<!-- Meta tags -->
<meta charset="utf-8">
<title>Hello world!</title>
<!--Stylesheet -->
<style>
body {
display: flex;
align-items: center;
justify-content: center;
}
</style>
</head>
<body>
<h1>Hello world!</h1>
</body>
</html> |
using System;
public class Program
{
static void Main(string[] args)
{
string sentence = "This is a sample sentence";
int count = 0;
foreach (char c in sentence)
{
if (c == 'a' || c == 'e' || c == 'i' || c == 'o' || c == 'u')
{
count++;
}
}
Console.WriteLine($"The sentence has {count} vowels");
}
} |
# Helper functions for bootstraping the M-Lab k8s cluster and adding new master
# nodes.
function create_master {
local zone=$1
local reboot_day=$2
gce_zone="${GCE_REGION}-${zone}"
gce_name="master-${GCE_BASE_NAME}-${gce_zone}"
GCE_ARGS=("--zone=${gce_zone}" "${GCP_ARGS[@]}")
GCE_TYPE_VAR="GCE_TYPE_${PROJECT//-/_}"
# Create a static IP for the GCE instance, or use the one that already exists.
EXISTING_IP=$(gcloud compute addresses list \
--filter "name=${gce_name} AND region:${GCE_REGION}" \
--format "value(address)" \
"${GCP_ARGS[@]}" || true)
if [[ -n "${EXISTING_IP}" ]]; then
EXTERNAL_IP="${EXISTING_IP}"
else
gcloud compute addresses create "${gce_name}" \
--region "${GCE_REGION}" \
"${GCP_ARGS[@]}"
EXTERNAL_IP=$(gcloud compute addresses list \
--filter "name=${gce_name} AND region:${GCE_REGION}" \
--format "value(address)" \
"${GCP_ARGS[@]}")
fi
# Check the value of the existing IP address in DNS associated with this GCE
# instance. If it's the same as the current/existing IP, then leave DNS alone,
# else delete the existing DNS RR and create a new one.
EXISTING_DNS_IP=$(gcloud dns record-sets list \
--zone "${PROJECT}-measurementlab-net" \
--name "${gce_name}.${PROJECT}.measurementlab.net." \
--format "value(rrdatas[0])" \
"${GCP_ARGS[@]}" || true)
if [[ -z "${EXISTING_DNS_IP}" ]]; then
# Add the record.
gcloud dns record-sets transaction start \
--zone "${PROJECT}-measurementlab-net" \
"${GCP_ARGS[@]}"
gcloud dns record-sets transaction add \
--zone "${PROJECT}-measurementlab-net" \
--name "${gce_name}.${PROJECT}.measurementlab.net." \
--type A \
--ttl 300 \
"${EXTERNAL_IP}" \
"${GCP_ARGS[@]}"
gcloud dns record-sets transaction execute \
--zone "${PROJECT}-measurementlab-net" \
"${GCP_ARGS[@]}"
elif [[ "${EXISTING_DNS_IP}" != "${EXTERNAL_IP}" ]]; then
# Add the record, deleting the existing one first.
gcloud dns record-sets transaction start \
--zone "${PROJECT}-measurementlab-net" \
"${GCP_ARGS[@]}"
gcloud dns record-sets transaction remove \
--zone "${PROJECT}-measurementlab-net" \
--name "${gce_name}.${PROJECT}.measurementlab.net." \
--type A \
--ttl 300 \
"${EXISTING_DNS_IP}" \
"${GCP_ARGS[@]}"
gcloud dns record-sets transaction add \
--zone "${PROJECT}-measurementlab-net" \
--name "${gce_name}.${PROJECT}.measurementlab.net." \
--type A \
--ttl 300 \
"${EXTERNAL_IP}" \
"${GCP_ARGS[@]}"
gcloud dns record-sets transaction execute \
--zone "${PROJECT}-measurementlab-net" \
"${GCP_ARGS[@]}"
fi
# Create the GCE instance.
#
# TODO (kinkade): In its current form, the service account associated with
# this GCE instance needs full access to a single GCS storage bucket for the
# purposes of moving around CA cert files, etc. Currently the instance is
# granted the "storage-full" scope, which is far more permissive than we
# ultimately want.
gcloud compute instances create "${gce_name}" \
--image-family "${GCE_IMAGE_FAMILY}" \
--image-project "${GCE_IMAGE_PROJECT}" \
--boot-disk-size "${GCE_DISK_SIZE}" \
--boot-disk-type "${GCE_DISK_TYPE}" \
--boot-disk-device-name "${gce_name}" \
--network "${GCE_NETWORK}" \
--subnet "${GCE_K8S_SUBNET}" \
--can-ip-forward \
--tags "${GCE_NET_TAGS}" \
--machine-type "${!GCE_TYPE_VAR}" \
--address "${EXTERNAL_IP}" \
--scopes "${GCE_API_SCOPES}" \
--metadata-from-file "user-data=cloud-config_master.yml" \
"${GCE_ARGS[@]}"
# Give the instance time to appear. Make sure it appears twice - there have
# been multiple instances of it connecting just once and then failing again for
# a bit.
until gcloud compute ssh "${gce_name}" --command true "${GCE_ARGS[@]}" --ssh-flag "-o PasswordAuthentication=no" && \
sleep 10 && \
gcloud compute ssh "${gce_name}" --command true "${GCE_ARGS[@]}" --ssh-flag "-o PasswordAuthentication=no"; do
echo Waiting for "${gce_name}" to boot up.
# Refresh keys in case they changed mid-boot. They change as part of the
# GCE bootup process, and it is possible to ssh at the precise moment a
# temporary key works, get that key put in your permanent storage, and have
# all future communications register as a MITM attack.
#
# Same root cause as the need to ssh twice in the loop condition above.
gcloud compute config-ssh "${GCP_ARGS[@]}"
done
# Get the instances internal IP address.
INTERNAL_IP=$(gcloud compute instances list \
--filter "name=${gce_name} AND zone:(${gce_zone})" \
--format "value(networkInterfaces[0].networkIP)" \
"${GCP_ARGS[@]}" || true)
# Create an instance group for our load balancers, internal and external, add
# this GCE instance to the group, then attach the instance group to our
# various backend services.
gcloud compute instance-groups unmanaged create "${gce_name}" \
--zone "${gce_zone}" \
"${GCP_ARGS[@]}"
gcloud compute instance-groups unmanaged add-instances "${gce_name}" \
--instances "${gce_name}" \
--zone "${gce_zone}" \
"${GCP_ARGS[@]}"
# If this is the first instance being created, it must be added to the
# backend service now, else creating the initial cluster will fail.
# Subsequent instances will be added later in this process.
if [[ "${ETCD_CLUSTER_STATE}" == "new" ]]; then
gcloud compute backend-services add-backend "${GCE_BASE_NAME}" \
--instance-group "${gce_name}" \
--instance-group-zone "${gce_zone}" \
--region "${GCE_REGION}" \
"${GCP_ARGS[@]}"
fi
gcloud compute backend-services add-backend "${TOKEN_SERVER_BASE_NAME}" \
--instance-group "${gce_name}" \
--instance-group-zone "${gce_zone}" \
--region "${GCE_REGION}" \
"${GCP_ARGS[@]}"
gcloud compute backend-services add-backend "${BMC_STORE_PASSWORD_BASE_NAME}" \
--instance-group "${gce_name}" \
--instance-group-zone "${gce_zone}" \
--region "${GCE_REGION}" \
"${GCP_ARGS[@]}"
# Become root, install and configure all the k8s components.
gcloud compute ssh "${GCE_ARGS[@]}" "${gce_name}" <<-EOF
set -euxo pipefail
sudo --login
# Bash options are not inherited by subshells. Reset them to exit on any error.
set -euxo pipefail
# Binaries will get installed in /opt/bin, put it in root's PATH, for both
# interactive and non-interactve logins.
echo -e "\nexport PATH=\$PATH:/opt/bin" >> /root/.profile
echo -e "\nexport PATH=\$PATH:/opt/bin" >> /root/.bashrc
# Adds /opt/bin to the end of the secure_path sudoers configuration.
sed -i -e '/secure_path/ s|"$|:/opt/bin"|' /etc/sudoers
# Write out the reboot day to a file in /etc. The reboot-node.service
# systemd unit will read the contents of this file to determine when to
# reboot the node.
echo -n "${reboot_day}" > /etc/reboot-node-day
# Enable and start the timer which periodically reboots master nodes.
systemctl enable reboot-node.timer
systemctl start reboot-node.timer
# Install CNI plugins.
mkdir -p /opt/cni/bin
curl -L "https://github.com/containernetworking/plugins/releases/download/${K8S_CNI_VERSION}/cni-plugins-linux-amd64-${K8S_CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz
# Install crictl.
mkdir -p /opt/bin
curl -L "https://github.com/kubernetes-incubator/cri-tools/releases/download/${K8S_CRICTL_VERSION}/crictl-${K8S_CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz
# Install kubeadm, kubelet and kubectl.
cd /opt/bin
curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/{kubeadm,kubelet,kubectl}
chmod +x {kubeadm,kubelet,kubectl}
# Install kubelet systemd service and enable it.
curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${K8S_TOOLING_VERSION}/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service" \
| sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service
mkdir -p /etc/systemd/system/kubelet.service.d
curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${K8S_TOOLING_VERSION}/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf" \
| sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
# Install etcdctl
curl --location https://github.com/etcd-io/etcd/releases/download/${ETCDCTL_VERSION}/etcd-${ETCDCTL_VERSION}-linux-amd64.tar.gz | tar -xz
cp etcd-${ETCDCTL_VERSION}-linux-amd64/etcdctl /opt/bin
rm -rf etcd-${ETCDCTL_VERSION}-linux-amd64
# Enable and start the kubelet service
systemctl enable --now kubelet.service
systemctl daemon-reload
systemctl restart kubelet
EOF
# Install gcsfuse and fusermount, then mount the repository's GCS bucket so we
# can read and/or write the generated CA files to it to persist them in the
# event we need to recreate a k8s master.
gcloud compute ssh "${GCE_ARGS[@]}" "${gce_name}" <<EOF
set -euxo pipefail
sudo --login
# Bash options are not inherited by subshells. Reset them to exit on any error.
set -euxo pipefail
# Build the gcsfuse binary in a throwaway Docker container. The build
# artifact will end up directly in /opt/bin due to the volume mount. Also,
# while in there, install the fuse package so we can extract the fusermount
# binary
docker run --rm --volume /opt/bin:/tmp/go/bin --env "GOPATH=/tmp/go" \
golang:1.15 \
/bin/bash -c \
"go get -u github.com/googlecloudplatform/gcsfuse &&
apt-get update --quiet=2 &&
apt-get install --yes fuse &&
cp /bin/fusermount /tmp/go/bin"
# Create the mount point for the GCS bucket
mkdir -p ${K8S_PKI_DIR}
# Mount the GCS bucket. This bucket contains all of the k8s cluster
# certificate and key files for all components generated by the first
# master node.
/opt/bin/gcsfuse --implicit-dirs -o rw,allow_other \
${!GCS_BUCKET_K8S} ${K8S_PKI_DIR}
# Make sure that the necessary subdirectories exist. Separated into two
# steps due to limitations of gcsfuse.
# https://github.com/GoogleCloudPlatform/gcsfuse/blob/master/docs/semantics.md#implicit-directories
mkdir -p ${K8S_PKI_DIR}/pki/
mkdir -p ${K8S_PKI_DIR}/pki/etcd/
# If there are any files in the bucket's pki directory, copy them to
# /etc/kubernetes/pki, creating that directory first, if it didn't already
# exist.
mkdir -p /etc/kubernetes/pki
cp -a ${K8S_PKI_DIR}/pki/* /etc/kubernetes/pki
# Copy the admin KUBECONFIG file from the bucket, if it exists.
cp ${K8S_PKI_DIR}/admin.conf /etc/kubernetes/ 2> /dev/null || true
EOF
# Copy all config template files to the server.
gcloud compute scp *.template "${gce_name}":/tmp "${GCE_ARGS[@]}"
# Evaluate the kubeadm config template with a beastly sed statement.
gcloud compute ssh "${gce_name}" "${GCE_ARGS[@]}" <<EOF
set -euxo pipefail
sudo --login
# Bash options are not inherited by subshells. Reset them to exit on any error.
set -euxo pipefail
# Create the kubeadm config from the template
sed -e 's|{{PROJECT}}|${PROJECT}|g' \
-e 's|{{INTERNAL_IP}}|${INTERNAL_IP}|g' \
-e 's|{{MASTER_NAME}}|${gce_name}|g' \
-e 's|{{LOAD_BALANCER_NAME}}|api-${GCE_BASE_NAME}|g' \
-e 's|{{K8S_VERSION}}|${K8S_VERSION}|g' \
-e 's|{{K8S_CLUSTER_CIDR}}|${K8S_CLUSTER_CIDR}|g' \
-e 's|{{K8S_SERVICE_CIDR}}|${K8S_SERVICE_CIDR}|g' \
/tmp/kubeadm-config.yml.template > \
./kubeadm-config.yml
EOF
if [[ "${ETCD_CLUSTER_STATE}" == "new" ]]; then
gcloud compute ssh "${gce_name}" "${GCE_ARGS[@]}" <<EOF
set -euxo pipefail
sudo --login
# Bash options are not inherited by subshells. Reset them to exit on any error.
set -euxo pipefail
# The template variables {{TOKEN}} and {{CA_CERT_HASH}} are not used when
# creating the initial master node but kubeadm cannot parse the YAML with
# the variables in the file. Here we simply replace the variables with
# some meaningless text so that the YAML can be parse. These variables
# are in the JoinConfiguration section, which isn't used here so the
# values aren't used and don't matter.
sed -i -e 's|{{TOKEN}}|NOT_USED|' \
-e 's|{{CA_CERT_HASH}}|NOT_USED|' \
kubeadm-config.yml
kubeadm init --config kubeadm-config.yml
# Copy the admin KUBECONFIG file to the GCS bucket.
cp /etc/kubernetes/admin.conf ${K8S_PKI_DIR}
# Since we don't know which of the CA files already existed in the GCS
# bucket before creating this first instance (ETCD_CLUSTER_STATE=new),
# just copy them all back. If they already existed it will be a no-op,
# and if they didn't then they will now be persisted.
for f in ${K8S_CA_FILES}; do
cp /etc/kubernetes/pki/\${f} ${K8S_PKI_DIR}/pki/\${f}
done
EOF
else
# Join the new master node to the existing cluster.
gcloud compute ssh "${gce_name}" "${GCE_ARGS[@]}" <<EOF
set -euxo pipefail
sudo --login
# Bash options are not inherited by subshells. Reset them to exit on any error.
set -euxo pipefail
# Get the join command.
JOIN_COMMAND=\$(/opt/bin/kubeadm token create --print-join-command)
# Extract the token from the join command
TOKEN=\$(echo "\$JOIN_COMMAND" | egrep -o '[0-9a-z]{6}\.[0-9a-z]{16}')
CA_CERT_HASH=\$(echo "\$JOIN_COMMAND" | egrep -o 'sha256:[0-9a-z]+')
# Replace a few more variables in the config file.
sed -i -e "s|{{TOKEN}}|\${TOKEN}|" \
-e "s|{{CA_CERT_HASH}}|\${CA_CERT_HASH}|" \
./kubeadm-config.yml
# Join the master node to the existing cluster.
kubeadm join --config kubeadm-config.yml
EOF
fi
# Modify a few of the generated static manifests to suit our needs.
gcloud compute ssh "${gce_name}" "${GCE_ARGS[@]}" <<EOF
set -euxo pipefail
sudo --login
# Bash options are not inherited by subshells. Reset them to exit on any error.
set -euxo pipefail
# Modify the --advertise-address flag to point to the external IP,
# instead of the internal one that kubeadm populated. This is necessary
# because external nodes (and especially kube-proxy) need to know of the
# master node by its public IP, even though it is technically running in
# a private VPC.
sed -i -re 's|(advertise-address)=.+|\1=${EXTERNAL_IP}|' \
/etc/kubernetes/manifests/kube-apiserver.yaml
# Modify the default --listen-metrics-urls flag to listen on the VPC internal
# IP address (the default is localhost). Sadly, this cannot currently be
# defined in the configuration file, since the only place to define etcd
# extraArgs is in the ClusterConfiguration, which applies to the entire
# cluster, not a single etcd instances in a cluster.
# https://github.com/kubernetes/kubeadm/issues/2036
sed -i -re '/listen-metrics-urls/ s|$|,http://${INTERNAL_IP}:2381|' \
/etc/kubernetes/manifests/etcd.yaml
# The above modifications to manifests will cause the api-server and etcd
# to be restarted by the kubelet. Stop and wait here for a little bit to
# give them time to restart before we continue.
sleep 60
EOF
# Configure root's account to be able to easily access kubectl as well as
# etcdctl. As we productionize this process, this code
# should be deleted.
gcloud compute ssh "${gce_name}" "${GCE_ARGS[@]}" <<\EOF
set -x
sudo --login
mkdir -p /root/.kube
cp -i /etc/kubernetes/admin.conf /root/.kube/config
chown root:root /root/.kube/config
bash -c "(cat <<-EOF2
export ETCDCTL_API=3
export ETCDCTL_DIAL_TIMEOUT=3s
export ETCDCTL_CACERT=/etc/kubernetes/pki/etcd/ca.crt
export ETCDCTL_CERT=/etc/kubernetes/pki/etcd/peer.crt
export ETCDCTL_KEY=/etc/kubernetes/pki/etcd/peer.key
export ETCDCTL_ENDPOINTS=https://127.0.0.1:2379
export CONTAINER_RUNTIME_ENDPOINT=unix:///run/containerd/containerd.sock
EOF2
) | tee -a /root/.profile /root/.bashrc"
EOF
# Annotate and label the master node.
gcloud compute ssh "${gce_name}" "${GCE_ARGS[@]}" <<-EOF
set -euxo pipefail
sudo --login
# Bash options are not inherited by subshells. Reset them to exit on any error.
set -euxo pipefail
kubectl annotate node ${gce_name} flannel.alpha.coreos.com/public-ip-overwrite=${EXTERNAL_IP}
kubectl label node ${gce_name} mlab/type=virtual
# As a final step, unmount the GCS bucket, as it is no longer needed.
umount ${K8S_PKI_DIR}
EOF
if [[ "${ETCD_CLUSTER_STATE}" == "new" ]]; then
# Update the node setup script with the current CA certificate hash.
#
# https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-join/#token-based-discovery-with-ca-pinning
ca_cert_hash=$(gcloud compute ssh "${gce_name}" "${GCE_ARGS[@]}" \
--command "openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | \
openssl rsa -pubin -outform der 2>/dev/null | \
openssl dgst -sha256 -hex | sed 's/^.* //'")
sed -e "s/{{CA_CERT_HASH}}/${ca_cert_hash}/" ../node/setup_k8s.sh.template > setup_k8s.sh
cache_control="Cache-Control:private, max-age=0, no-transform"
gsutil -h "$cache_control" cp setup_k8s.sh gs://${!GCS_BUCKET_EPOXY}/stage3_ubuntu/setup_k8s.sh
# Apply all configs and workloads to the cluster. This only needs to happen
# on the first master that is created.
./create_k8s_configs.sh "${PROJECT}"
./apply_k8s_configs.sh "${PROJECT}"
fi
# Now that the instance should be functional, add it to our load balancer target pool.
if [[ "${ETCD_CLUSTER_STATE}" == "existing" ]]; then
gcloud compute backend-services add-backend "${GCE_BASE_NAME}" \
--instance-group "${gce_name}" \
--instance-group-zone "${gce_zone}" \
--region "${GCE_REGION}" \
"${GCP_ARGS[@]}"
fi
# After the first iteration of this loop, the cluster state becomes "existing"
# for all other iterations, since the first iteration bootstraps the cluster,
# while subsequent ones expand the existing cluster.
ETCD_CLUSTER_STATE="existing"
}
# Delete the GCE instance-group associated with a master node.
function delete_instance_group {
local name=$1
local zone=$2
local existing_instance_group
existing_instance_group=$(gcloud compute instance-groups list \
--filter "name=${name} AND zone:($zone)" \
--format "value(name)" \
"${GCP_ARGS[@]}" || true)
if [[ -n "${existing_instance_group}" ]]; then
gcloud compute instance-groups unmanaged delete "${name}" \
--zone "${zone}" \
"${GCP_ARGS[@]}"
fi
}
# Removes a backend from a backend service.
function delete_server_backend {
local name=$1
local zone=$2
local backend=$3
local existing_backends
existing_backends=$(gcloud compute backend-services describe "${backend}" \
--format "value(backends)" \
--region "${GCE_REGION}" \
"${GCP_ARGS[@]}" || true)
if echo "${existing_backends}" | grep "${name}"; then
gcloud compute backend-services remove-backend "${backend}" \
--instance-group "${name}" \
--instance-group-zone "${zone}" \
--region "${GCE_REGION}" \
"${GCP_ARGS[@]}"
fi
}
# Find the lowest network number available for a new subnet.
# Stolen from https://github.com/m-lab/epoxy/blob/master/deploy_epoxy_container.sh#L54
function find_lowest_network_number() {
local current_sequence=$( mktemp )
local natural_sequence=$( mktemp )
local available=$( mktemp )
# List current network subnets, and extract the second octet from each.
gcloud compute networks subnets list \
--network "${GCE_NETWORK}" --format "value(ipCidrRange)" "${GCP_ARGS[@]}" \
| cut -d. -f2 | sort -n > "${current_sequence}"
# Generate a natural sequence from 0 to 255.
seq 0 255 > "${natural_sequence}"
# Find values present in $natural_sequence but missing from $current_sequence.
# -1 = suppress lines unique to file 1
# -3 = suppress lines that appear in both files
# As a result, only report lines that are unique to "${natural_sequence}".
comm -1 -3 --nocheck-order \
"${current_sequence}" "${natural_sequence}" > "${available}"
# "Return" the first $available value: the lowest available network number.
head -n 1 "${available}"
# Clean up temporary files.
rm -f "${current_sequence}" "${natural_sequence}" "${available}"
}
|
#!/bin/bash
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
sudo cp $DIR/ros.service /etc/systemd/system/
systemctl enable ros.service
|
<reponame>weltam/idylfin
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.maths.lowlevelapi.functions.iss;
import org.apache.commons.lang.Validate;
/**
* Tries to detect NaNs, where they are, and provides some potentially useful tools to do things like mask off NaNs.
*/
public class IsNaN {
/**
* Walks through a vector looking for NaN's if one is found the routine returns TRUE.
* @param v the vector (with possible NaN entries)
* @return a boolean, TRUE if a NaN is found in v, FALSE otherwise
*/
public static boolean any(double[] v) {
Validate.notNull(v);
boolean logical = false;
final int len = v.length;
for (int i = 0; i < len; i++) {
if (v[i] != v[i]) {
logical = true;
return logical;
}
}
return logical;
}
/**
* Walks through a vector looking for NaN's and sets the index of a corresponding boolean vector to TRUE if a NaN is found.
* @param v the vector (with possible NaN entries)
* @return a boolean vector, true or false depending on whether a NaN is found in the input vector (true = NaN found at position).
*/
public static boolean[] getBooleans(double[] v) {
Validate.notNull(v);
final int len = v.length;
boolean[] logical = new boolean[len];
for (int i = 0; i < len; i++) {
if (v[i] != v[i]) {
logical[i] = true;
}
}
return logical;
}
/**
* Walks through a vector looking for NaN's if one is found the routine returns TRUE.
* @param v the vector (with possible NaN entries)
* @return a boolean, TRUE if a NaN is found in v, FALSE otherwise
*/
public static boolean any(float[] v) {
Validate.notNull(v);
boolean logical = false;
final int len = v.length;
for (int i = 0; i < len; i++) {
if (v[i] != v[i]) {
logical = true;
return logical;
}
}
return logical;
}
/**
* Walks through a vector looking for NaN's and sets the index of a corresponding boolean vector to TRUE if a NaN is found.
* @param v the vector (with possible NaN entries)
* @return a boolean vector, true or false depending on whether a NaN is found in the input vector (true = NaN found at position).
*/
public static boolean[] getBooleans(float[] v) {
Validate.notNull(v);
final int len = v.length;
boolean[] logical = new boolean[len];
for (int i = 0; i < len; i++) {
if (v[i] != v[i]) {
logical[i] = true;
}
}
return logical;
}
}
|
<reponame>Starainrt/talebook
import Vue from 'vue'
import VueCookies from 'vue-cookies'
Vue.use(VueCookies)
//import talebook from "~/plugins/talebook.js"
//Vue.use(talebook)
|
package opener
import (
"errors"
"reflect"
"testing"
"github.com/tomguerney/marks/mocks"
)
func newTestOpener() *opener {
return &opener{
config: mocks.NewConfig(),
commander: newMockCommander(),
}
}
type mockCommmander struct {
commandFn func(name string, arg ...string) combinedOutputter
commandFnCalled bool
combinedOutputter combinedOutputter
}
func newMockCommander() *mockCommmander {
return &mockCommmander{
commandFn: defaultCommandFn,
combinedOutputter: newMockCombinedOutputter(),
}
}
func (c *mockCommmander) Command(name string, arg ...string) combinedOutputter {
c.commandFnCalled = true
if combinedOutputter := c.commandFn(name, arg...); combinedOutputter != nil {
return combinedOutputter
}
return c.combinedOutputter
}
var defaultCommandFn = func(name string, arg ...string) combinedOutputter {
return nil
}
type mockCombinedOutputter struct {
combinedOutputFn func() ([]byte, error)
combinedOutputFnCalled bool
}
func newMockCombinedOutputter() *mockCombinedOutputter {
return &mockCombinedOutputter{
combinedOutputFn: defaultCombinedOutputFn,
}
}
func (c *mockCombinedOutputter) CombinedOutput() ([]byte, error) {
c.combinedOutputFnCalled = true
return c.combinedOutputFn()
}
var defaultCombinedOutputFn = func() ([]byte, error) {
return []byte("combined output"), nil
}
func TestOpenSuccess(t *testing.T) {
o := newTestOpener()
url := "https://www.url.com"
browser := "chrome"
o.config.ChromeOpenArgs = "-a \"Google Chrome\" {{.Url}}"
commandFn := func(actualName string, actualArgs ...string) combinedOutputter {
expectedName := "open"
expectedArgs := []string{"-a", "Google Chrome", url}
if actualName != expectedName {
t.Fatalf("expected %v, received %v", expectedName, actualName)
}
if !reflect.DeepEqual(actualArgs, expectedArgs) {
t.Fatalf("expected %v, received %v", expectedArgs, actualArgs)
}
return nil
}
o.commander.(*mockCommmander).commandFn = commandFn
err := o.Open(url, browser)
if err != nil {
t.Fatal(err.Error())
}
if !o.commander.(*mockCommmander).commandFnCalled {
t.Fatal("Command should have been called")
}
}
func TestOpenTemplateFail(t *testing.T) {
o := newTestOpener()
url := "https://www.url.com"
browser := "not a browser"
err := o.Open(url, browser)
t.Log("Expected error: ", err.Error())
if err == nil {
t.Fatal("should return error")
}
if o.commander.(*mockCommmander).commandFnCalled {
t.Fatal("Command should not have been called")
}
}
func TestOpenInterplateFail(t *testing.T) {
o := newTestOpener()
o.config.ChromeOpenArgs = "{{.Notafield}}"
url := "https://www.url.com"
browser := "chrome"
err := o.Open(url, browser)
t.Log("Expected error: ", err.Error())
if err == nil {
t.Fatal("should return error")
}
if o.commander.(*mockCommmander).commandFnCalled {
t.Fatal("Command should not have been called")
}
}
func TestOpenCombinedOutputFail(t *testing.T) {
o := newTestOpener()
url := "https://www.url.com"
browser := "chrome"
combinedOutputFn := func() ([]byte, error) {
return nil, errors.New("combined output error")
}
o.commander.(*mockCommmander).
combinedOutputter.(*mockCombinedOutputter).combinedOutputFn = combinedOutputFn
err := o.Open(url, browser)
t.Log("Expected error: ", err.Error())
if err == nil {
t.Fatal("should return error")
}
if !o.commander.(*mockCommmander).
combinedOutputter.(*mockCombinedOutputter).combinedOutputFnCalled {
t.Fatal("combinedOutput should have been called")
}
}
func TestTemplateSuccess(t *testing.T) {
o := newTestOpener()
expected := "firefox args"
o.config.FirefoxOpenArgs = expected
actual, err := o.template("firefox")
if err != nil {
t.Fatal(err.Error())
}
if actual != expected {
t.Fatalf("expected %v, received %v", expected, actual)
}
}
func TestTemplateFail(t *testing.T) {
o := newTestOpener()
expected := ""
actual, err := o.template("not a browser")
if err == nil {
t.Fatal("expected error")
}
t.Log("Expected error: ", err.Error())
if actual != "" {
t.Fatalf("expected %v, received %v", expected, actual)
}
}
func TestInterpolateTemplateSuccess(t *testing.T) {
o := newTestOpener()
url := "testUrl"
expected := "***testUrl***"
template := "***{{.Url}}***"
actual, err := o.interpolateTemplate(template, url)
if err != nil {
t.Fatal(err.Error())
}
if actual != expected {
t.Fatalf("expected %v, received %v", expected, actual)
}
}
func TestInterpolateTemplateFail(t *testing.T) {
o := newTestOpener()
url := "testUrl"
template := "***{{.Other}}***"
actual, err := o.interpolateTemplate(template, url)
if err == nil {
t.Fatal("expected error")
}
t.Log("Expected error: ", err.Error())
if actual != "" {
t.Fatal("string should be empty")
}
t.Log("Expected error: ", err.Error())
}
func TestSliceArgsSuccess(t *testing.T) {
o := newTestOpener()
args := "./foo --bar=baz \"blah blah\" -p p -b"
expected := []string{"./foo", "--bar=baz", "blah blah", "-p", "p", "-b"}
actual, err := o.sliceArgs(args)
if err != nil {
t.Fatal(err.Error())
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("expected %v, received %v", expected, actual)
}
}
|
import java.math.BigDecimal;
public class EmployeeSalaryManager {
private BigDecimal salary;
private String admissionDate;
private String salaryScale;
public EmployeeSalaryManager(String admissionDate, BigDecimal salary) {
this.admissionDate = admissionDate;
this.salary = salary;
this.setSalaryScale();
}
public void readjustSalary(BigDecimal readjust) {
this.salary = this.salary.add(readjust);
this.setSalaryScale();
}
private void setSalaryScale() {
if (salary.compareTo(new BigDecimal("50000")) < 0) {
salaryScale = "Low";
} else if (salary.compareTo(new BigDecimal("100000")) <= 0) {
salaryScale = "Medium";
} else {
salaryScale = "High";
}
}
// Getters and setters for admissionDate, salary, and salaryScale
public String getAdmissionDate() {
return admissionDate;
}
public void setAdmissionDate(String admissionDate) {
this.admissionDate = admissionDate;
}
public BigDecimal getSalary() {
return salary;
}
public void setSalary(BigDecimal salary) {
this.salary = salary;
this.setSalaryScale();
}
public String getSalaryScale() {
return salaryScale;
}
public void setSalaryScale(String salaryScale) {
this.salaryScale = salaryScale;
}
} |
<gh_stars>1-10
package com.zto.testcase.validator;
import com.zto.testcase.validator.anno.InEnum;
import java.lang.reflect.Field;
import javax.validation.ConstraintValidator;
import javax.validation.ConstraintValidatorContext;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
@Slf4j
public class InEnumValidator implements ConstraintValidator<InEnum, String> {
private String message;
private Class enumClass;
private String attribute;
@Override
public void initialize(InEnum constraintAnnotation) {
this.message = constraintAnnotation.message();
this.enumClass = constraintAnnotation.enumClass();
this.attribute = constraintAnnotation.attribute();
}
@Override
public boolean isValid(String value, ConstraintValidatorContext context) {
if (StringUtils.isBlank(value)) {
return true;
}
Field field = null;
try {
field = enumClass.getDeclaredField(attribute);
field.setAccessible(true);
} catch (NoSuchFieldException e) {
log.error(enumClass.getName() + "不存在此字段:" + attribute);
}
for (Object enumObj : enumClass.getEnumConstants()) {
try {
if (value.equals(field.get(enumObj))) {
return true;
}
} catch (IllegalAccessException e) {
break;
}
}
if (StringUtils.isNotBlank(message)) {
context.disableDefaultConstraintViolation();
context.buildConstraintViolationWithTemplate(message).addConstraintViolation();
}
return false;
}
}
|
#include <stdbool.h>
#include <kore/kore.h>
#include <kore/http.h>
#include <kore/pgsql.h>
#include "shared/shared_error.h"
#include "shared/shared_http.h"
#include "model/flight.h"
#include "assets.h"
#define FLIGHT_BOOK_RESULT_OK 0
#define FLIGHT_BOOK_RESULT_NO_SEATS_AVAILABLE 1
#define FLIGHT_BOOK_RESULT_INSUFFICIENT_FUNDS 2
int flight_book(struct http_request *);
int flight_book_parseparams(struct http_request *, int *);
void flight_book_error_handler(struct http_request *, int);
int
flight_book(struct http_request *req)
{
uint32_t err;
if(req->method != HTTP_METHOD_GET)
{
return(KORE_RESULT_ERROR); //No methods besides GET exist on the home page
}
int flightid = 0;
if((err = flight_book_parseparams(req, &flightid)) != (SHARED_OK))
{
flight_book_error_handler(req, err);
goto exit;
}
Session *session = NULL;
if((err = shared_http_get_session_from_request(req, &session)) != (SHARED_OK))
{
flight_book_error_handler(req, err);
goto exit;
}
uint32_t book_result = flight_book_for_user(flightid, session->user_identifier, &err);
if(err != (SHARED_OK))
{
flight_book_error_handler(req, err);
goto exit;
}
switch(book_result)
{
case (FLIGHT_BOOK_RESULT_NO_SEATS_AVAILABLE):
flight_book_error_handler(req, (FLIGHT_BOOK_ERROR_NO_SEATS_AVAILABLE));
goto exit;
case (FLIGHT_BOOK_RESULT_INSUFFICIENT_FUNDS):
flight_book_error_handler(req, (FLIGHT_BOOK_ERROR_INSUFFICIENT_FUNDS));
goto exit;
case (FLIGHT_BOOK_RESULT_OK):
break;
default:
flight_book_error_handler(req, (FLIGHT_BOOK_ERROR_UNKNOWN_RESULT));
goto exit;
}
http_response_header(req, "content-type", "text/html");
http_response(req, HTTP_STATUS_OK,
asset_flight_book_success_html,
asset_len_flight_book_success_html);
exit:
session_destroy(&session);
return (KORE_RESULT_OK);
}
int
flight_book_parseparams(struct http_request *req, int *flightid)
{
http_populate_get(req);
int err = (SHARED_OK);
if(!http_argument_get_int32(req, "id", flightid))
{
err = (FLIGHT_BOOK_ERROR_ID_VALIDATOR_INVALID);
}
return err;
}
void
flight_book_error_handler(struct http_request *req, int errcode)
{
bool handled = true;
switch(errcode)
{
case (FLIGHT_BOOK_ERROR_ID_VALIDATOR_INVALID):
shared_error_response(req, HTTP_STATUS_INTERNAL_ERROR,
"Unknown Flight Identifier. Please try again.", "/flight/search", 10);
break;
case (FLIGHT_BOOK_ERROR_INSUFFICIENT_FUNDS):
shared_error_response(req, HTTP_STATUS_INTERNAL_ERROR,
"Insufficient DogeCoins.", "/flight/search", 10);
break;
case (FLIGHT_BOOK_ERROR_NO_SEATS_AVAILABLE):
shared_error_response(req, HTTP_STATUS_INTERNAL_ERROR,
"No more seats are available for the selected flight.", "/flight/search", 10);
break;
case (FLIGHT_BOOK_ERROR_UNKNOWN_RESULT):
shared_error_response(req, HTTP_STATUS_INTERNAL_ERROR,
"Internal Server error. Unknown Flight Booking result", "/flight/search", 10);
break;
default:
handled = false;
}
if(!handled)
{
shared_error_handler(req, errcode, "/flight/search");
}
} |
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ed.biodare2.backend.features.subscriptions;
import ed.biodare2.backend.security.BioDare2User;
import ed.biodare2.backend.repo.system_dom.FeaturesAvailability;
import ed.biodare2.backend.repo.system_dom.ServiceLevel;
import static ed.biodare2.backend.repo.system_dom.ServiceLevel.*;
import org.springframework.stereotype.Service;
/**
*
* @author tzielins
*/
@Service
public class ServiceLevelResolver {
public FeaturesAvailability buildForExperiment(BioDare2User user) {
FeaturesAvailability features = new FeaturesAvailability();
features.serviceLevel = subscriptionToServiceLevel(user.getSubscription());
return features;
}
public void setServiceForOpen(FeaturesAvailability features) {
if (FULL_SUBSCRIBED.equals(features.serviceLevel)) return;
if (FULL_PURCHASED.equals(features.serviceLevel)) return;
features.serviceLevel = FULL_FOR_OPEN;
}
protected ServiceLevel subscriptionToServiceLevel(AccountSubscription subscription) {
switch (subscription.kind) {
case FREE: return FULL_GRATIS;
case FULL_WELCOME: return FULL_GRATIS;
case FULL_INDIVIDUAL:
case FULL_INHERITED:
case FULL_GROUP: return FULL_SUBSCRIBED;
default: throw new IllegalArgumentException("Unsuported subscription: "+subscription.kind);
}
}
}
|
const wordFrequency = (list) => {
let frequencies = {};
list.forEach(word => {
frequencies[word] = (frequencies[word] || 0) + 1
});
return frequencies;
}
let words = ["foo", "bar", "baz", "foo", "qux", "foo"];
let frequencies = wordFrequency(words);
console.log(frequencies);
Output:
{foo: 3, bar: 1, baz: 1, qux: 1} |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2010 <NAME>. All rights reserved.
#
"""Convert hostname to IP address.
"""
#end_pymotw_header
import socket
for host in [ 'homer', 'www', 'www.python.org', 'nosuchname' ]:
print host
try:
hostname, aliases, addresses = socket.gethostbyname_ex(host)
print ' Hostname:', hostname
print ' Aliases :', aliases
print ' Addresses:', addresses
except socket.error as msg:
print 'ERROR:', msg
print
|
<filename>Calligraphy/src/hallelujah/cal/ctrl/ParserController.java
package hallelujah.cal.ctrl;
import hallelujah.cal.parser.CalligraphyParser;
import hallelujah.cal.producer.CalligraphyProducer;
import java.io.IOException;
import android.util.Log;
class ParserController {
private static final String TAG = "ParserController";
// static
// {
// System.loadLibrary("pdc_prs");
// }
//
public static CalligraphyProducer newProducer(String szFilePath) throws IOException
{
return nativeNewProducer(szFilePath, 0);
}
public static CalligraphyProducer newProducer(String szFilePath, int nPos) throws IOException
{
return nativeNewProducer(szFilePath, nPos);
}
public static CalligraphyParser newParser(String szFilePath) throws IOException
{
CalligraphyParser p = nativeNewParser(szFilePath, 0);
Log.i(TAG, "create prs");
return p;
}
public static CalligraphyParser newParser(String szFilePath, int nPos) throws IOException
{
CalligraphyParser p = nativeNewParser(szFilePath, nPos);
Log.i(TAG, "create prs");
return p;
}
private static native CalligraphyProducer nativeNewProducer(String szFilePath, int nPos) throws IOException;
private static native CalligraphyParser nativeNewParser(String szFilePath, int nPos) throws IOException;
}
|
package Atom.Net;
import java.io.IOException;
import java.io.PrintWriter;
import java.net.Socket;
import java.util.ArrayList;
import java.util.Scanner;
import java.util.function.Consumer;
public class Client {
public final PrintWriter output;
private final Scanner input;
private final ArrayList<Consumer<String>> inputListener = new ArrayList<>();
private final Thread inputHandler = new Thread(this::inputHandler);
public Client(Socket s) throws IOException {
this.output = new PrintWriter(s.getOutputStream(), true);
this.input = new Scanner(s.getInputStream());
inputHandler.setName("Input Handler");
inputHandler.setDaemon(true);
inputHandler.start();
}
public Client(String ip, int port) throws IOException {
this(new Socket(ip, port));
}
public Client(int port) throws IOException {
this("127.0.0.1", port);
}
public void stop() {
while (inputHandler.isAlive()) inputHandler.interrupt();
}
private void inputHandler() {
String temp;
while (!Thread.currentThread().isInterrupted()) {
while (input.hasNextLine()) {
temp = input.nextLine();
for (Consumer<String> s : inputListener)
s.accept(temp);
}
}
}
public void addInputListener(Consumer<String> s) {
inputListener.add(s);
}
}
|
#!/usr/bin/env bash
# Copyright 2020 Antrea Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -exu
function echoerr {
>&2 echo "$@"
}
REGION="westus"
RESOURCE_GROUP="antrea-ci-rg"
SSH_KEY_PATH="$HOME/.ssh/id_rsa.pub"
SSH_PRIVATE_KEY_PATH="$HOME/.ssh/id_rsa"
RUN_ALL=true
RUN_SETUP_ONLY=false
RUN_CLEANUP_ONLY=false
KUBECONFIG_PATH="$HOME/jenkins/out/aks"
TEST_SCRIPT_RC=0
MODE="report"
KUBE_CONFORMANCE_IMAGE_VERSION=v1.19.4
_usage="Usage: $0 [--cluster-name <AKSClusterNameToUse>] [--kubeconfig <KubeconfigSavePath>] [--k8s-version <ClusterVersion>]\
[--azure-app-id <AppID>] [--azure-tenant-id <TenantID>] [--azure-password <Password>] \
[--aks-region <Region>] [--log-mode <SonobuoyResultLogLevel>] [--setup-only] [--cleanup-only]
Setup a AKS cluster to run K8s e2e community tests (Conformance & Network Policy).
--cluster-name The cluster name to be used for the generated AKS cluster. Must be specified if not run in Jenkins environment.
--kubeconfig Path to save kubeconfig of generated AKS cluster.
--k8s-version AKS K8s cluster version. Defaults to the default K8s version for AKS in the Azure region.
--azure-app-id Azure Service Principal Application ID.
--azure-tenant-id Azure Service Principal Tenant ID.
--azure-password Azure Service Principal Password.
--aks-region The Azure region where the cluster will be initiated. Defaults to westus.
--log-mode Use the flag to set either 'report', 'detail', or 'dump' level data for sonobouy results.
--setup-only Only perform setting up the cluster and run test.
--cleanup-only Only perform cleaning up the cluster."
function print_usage {
echoerr "$_usage"
}
function print_help {
echoerr "Try '$0 --help' for more information."
}
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
--cluster-name)
CLUSTER="$2"
shift 2
;;
--azure-app-id)
AZURE_APP_ID="$2"
shift 2
;;
--azure-tenant-id)
AZURE_TENANT_ID="$2"
shift 2
;;
--azure-password)
AZURE_PASSWORD="$2"
shift 2
;;
--aks-region)
REGION="$2"
shift 2
;;
--kubeconfig)
KUBECONFIG_PATH="$2"
shift 2
;;
--k8s-version)
K8S_VERSION="$2"
shift 2
;;
--log-mode)
MODE="$2"
shift 2
;;
--setup-only)
RUN_SETUP_ONLY=true
RUN_ALL=false
shift
;;
--cleanup-only)
RUN_CLEANUP_ONLY=true
RUN_ALL=false
shift
;;
-h|--help)
print_usage
exit 0
;;
*) # unknown option
echoerr "Unknown option $1"
exit 1
;;
esac
done
function setup_aks() {
echo "=== This cluster to be created is named: ${CLUSTER} in resource group ${RESOURCE_GROUP} ==="
# Save the cluster information for cleanup on Jenkins environment
echo "CLUSTERNAME=${CLUSTER}" > ${GIT_CHECKOUT_DIR}/ci_properties.txt
if [[ -n ${ANTREA_GIT_REVISION+x} ]]; then
echo "ANTREA_REPO=${ANTREA_REPO}" > ${GIT_CHECKOUT_DIR}/ci_properties.txt
echo "ANTREA_GIT_REVISION=${ANTREA_GIT_REVISION}" > ${GIT_CHECKOUT_DIR}/ci_properties.txt
fi
echo "=== Using the following az cli version ==="
az --version
echo "=== Logging into Azure Cloud ==="
az login --service-principal --username ${AZURE_APP_ID} --password ${AZURE_PASSWORD} --tenant ${AZURE_TENANT_ID}
# enable the 'Node Public IP' preview feature
az feature register --name NodePublicIPPreview --namespace Microsoft.ContainerService
az provider register -n Microsoft.ContainerService
printf "\n"
echo "=== Using the following kubectl ==="
which kubectl
echo '=== Creating a resource group ==='
az group create --name ${RESOURCE_GROUP} --location $REGION
if [[ -z ${K8S_VERSION+x} ]]; then
K8S_VERSION=$(az aks get-versions -l ${REGION} -o json | jq -r '.orchestrators[] | select(.default==true).orchestratorVersion')
fi
echo '=== Creating a cluster in AKS ==='
# enable-node-public-ip is a preview feature and may be changed/removed in a future release. For more details, see
# https://docs.microsoft.com/en-us/azure/aks/use-multiple-node-pools#assign-a-public-ip-per-node-for-your-node-pools-preview
# without public node ip, ssh into worker nodes in the AKS cluster can only be done through a `jump` pod deployed
# in the cluster, which is very tedious and makes loading Antrea tarball into Nodes challenging.
# https://docs.microsoft.com/en-us/azure/aks/ssh
az aks create \
--resource-group ${RESOURCE_GROUP} \
--name ${CLUSTER} \
--node-count 2 \
--enable-node-public-ip \
--ssh-key-value ${SSH_KEY_PATH} \
--network-plugin azure \
--kubernetes-version ${K8S_VERSION} \
--service-principal ${AZURE_APP_ID} \
--client-secret ${AZURE_PASSWORD}
if [[ $? -ne 0 ]]; then
echo "=== Failed to deploy AKS cluster! ==="
exit 1
fi
mkdir -p ${KUBECONFIG_PATH}
az aks get-credentials --resource-group ${RESOURCE_GROUP} \
--name ${CLUSTER} --file ${KUBECONFIG_PATH}/kubeconfig
export KUBECONFIG="${KUBECONFIG_PATH}/kubeconfig"
sleep 5
if [[ $(kubectl get svc) ]]; then
echo "=== AKS cluster setup succeeded ==="
else
echo "=== AKS kubectl is not configured correctly! ==="
exit 1
fi
}
function deliver_antrea_to_aks() {
echo "====== Building Antrea for the Following Commit ======"
git show --numstat
export GO111MODULE=on
export GOROOT=/usr/local/go
export PATH=${GOROOT}/bin:$PATH
if [[ -z ${GIT_CHECKOUT_DIR+x} ]]; then
GIT_CHECKOUT_DIR=..
fi
make clean -C ${GIT_CHECKOUT_DIR}
if [[ -n ${JOB_NAME+x} ]]; then
docker images | grep "${JOB_NAME}" | awk '{print $3}' | xargs -r docker rmi -f || true > /dev/null
fi
# Clean up dangling images generated in previous builds. Recent ones must be excluded
# because they might be being used in other builds running simultaneously.
docker image prune -f --filter "until=2h" || true > /dev/null
cd ${GIT_CHECKOUT_DIR}
VERSION="$CLUSTER" make
if [[ "$?" -ne "0" ]]; then
echo "=== Antrea Image build failed ==="
exit 1
fi
echo "=== Loading the Antrea image to each Node ==="
antrea_image="antrea-ubuntu"
DOCKER_IMG_VERSION=${CLUSTER}
DOCKER_IMG_NAME="projects.registry.vmware.com/antrea/antrea-ubuntu"
docker save -o ${antrea_image}.tar ${DOCKER_IMG_NAME}:${DOCKER_IMG_VERSION}
CLUSTER_RESOURCE_GROUP=$(az aks show --resource-group ${RESOURCE_GROUP} --name ${CLUSTER} --query nodeResourceGroup -o tsv)
SCALE_SET_NAME=$(az vmss list --resource-group ${CLUSTER_RESOURCE_GROUP} --query [0].name -o tsv)
NODE_IPS=$(az vmss list-instance-public-ips --resource-group ${CLUSTER_RESOURCE_GROUP} --name ${SCALE_SET_NAME} | grep ipAddress | awk -F'"' '{print $4}')
NETWORK_NSG=$(az network nsg list -g ${CLUSTER_RESOURCE_GROUP} -o table | grep ${CLUSTER_RESOURCE_GROUP} | awk -F' ' '{print $2}')
# Create a firewall rule to allow ssh access into the worker node through public IPs
az network nsg rule create -g ${CLUSTER_RESOURCE_GROUP} --nsg-name ${NETWORK_NSG} -n SshRule --priority 100 \
--source-address-prefixes Internet --destination-port-ranges 22 --access Allow --protocol Tcp --direction Inbound
# Wait for the rule to take effect
sleep 30
for IP in ${NODE_IPS}; do
scp -o StrictHostKeyChecking=no -i ${SSH_PRIVATE_KEY_PATH} ${antrea_image}.tar azureuser@${IP}:~
ssh -o StrictHostKeyChecking=no -i ${SSH_PRIVATE_KEY_PATH} -n azureuser@${IP} "sudo ctr -n=k8s.io images import ~/${antrea_image}.tar ; sudo ctr -n=k8s.io images tag ${DOCKER_IMG_NAME}:${DOCKER_IMG_VERSION} ${DOCKER_IMG_NAME}:latest"
done
rm ${antrea_image}.tar
echo "=== Configuring Antrea for AKS cluster ==="
if [[ -z ${GIT_CHECKOUT_DIR+x} ]]; then
GIT_CHECKOUT_DIR=..
fi
kubectl apply -f ${GIT_CHECKOUT_DIR}/build/yamls/antrea-aks-node-init.yml
sleep 5s
kubectl apply -f ${GIT_CHECKOUT_DIR}/build/yamls/antrea-aks.yml
kubectl rollout status --timeout=2m deployment.apps/antrea-controller -n kube-system
kubectl rollout status --timeout=2m daemonset/antrea-agent -n kube-system
# Restart all Pods in all Namespaces (kube-system, etc) so they can be managed by Antrea.
kubectl delete pods -n kube-system $(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name,HOSTNETWORK:.spec.hostNetwork \
--no-headers=true | grep '<none>' | awk '{ print $1 }')
kubectl rollout status --timeout=2m deployment.apps/coredns -n kube-system
# wait for other pods in the kube-system namespace to become ready
sleep 5
echo "=== Antrea has been deployed for AKS cluster ${CLUSTER} ==="
}
function run_conformance() {
echo "=== Running Antrea Conformance and Network Policy Tests ==="
${GIT_CHECKOUT_DIR}/ci/run-k8s-e2e-tests.sh --e2e-conformance --e2e-network-policy \
--kube-conformance-image-version ${KUBE_CONFORMANCE_IMAGE_VERSION} \
--log-mode ${MODE} > ${GIT_CHECKOUT_DIR}/aks-test.log || TEST_SCRIPT_RC=$?
if [[ $TEST_SCRIPT_RC -eq 0 ]]; then
echo "All tests passed."
echo "=== SUCCESS !!! ==="
elif [[ $TEST_SCRIPT_RC -eq 1 ]]; then
echo "Failed test cases exist."
echo "=== FAILURE !!! ==="
else
echo "Unexpected error when running tests."
echo "=== FAILURE !!! ==="
fi
echo "=== Cleanup Antrea Installation ==="
for antrea_yml in ${GIT_CHECKOUT_DIR}/build/yamls/*.yml
do
kubectl delete -f ${antrea_yml} --ignore-not-found=true || true
done
}
function cleanup_cluster() {
echo '=== Cleaning up AKS cluster ${CLUSTER} ==='
az aks delete --name ${CLUSTER} --resource-group ${RESOURCE_GROUP} --yes
if [[ $? -ne 0 ]]; then
echo "== Failed to delete AKS cluster"
exit
fi
az group delete --name ${RESOURCE_GROUP} --yes --no-wait
if [[ $? -ne 0 ]]; then
echo "== Failed to delete AKS resource group"
exit
fi
rm -f ${KUBECONFIG_PATH}/kubeconfig
echo "=== Cleanup cluster ${CLUSTER} succeeded ==="
}
# ensures that the script can be run from anywhere
THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
GIT_CHECKOUT_DIR=${THIS_DIR}/..
pushd "$THIS_DIR" > /dev/null
if [[ "$RUN_ALL" == true || "$RUN_SETUP_ONLY" == true ]]; then
setup_aks
deliver_antrea_to_aks
run_conformance
fi
if [[ "$RUN_ALL" == true || "$RUN_CLEANUP_ONLY" == true ]]; then
cleanup_cluster
fi
if [[ "$RUN_CLEANUP_ONLY" == false && $TEST_SCRIPT_RC -ne 0 ]]; then
exit 1
fi
|
#!/bin/bash
set -e
KOTLIN_VERSION=1.3.50
TAG=develop
UPDATE_LATEST=false
if [ "$1" != "" ]; then
TAG=$1
UPDATE_LATEST=true
fi
cd $(dirname $(readlink -f $0))/docker
set -x
# Make the build image and extract build artifacts
# ===============================================
sudo docker build \
-f Dockerfile.build \
-t problemtools/build:${TAG} \
--no-cache \
--build-arg PROBLEMTOOLS_VERSION="${TAG}" \
.
mkdir -p artifacts
rm -rf artifacts/deb/*
sudo docker run --rm -v "$(pwd)/artifacts/:/artifacts" problemtools/build:${TAG} cp -r /usr/local/problemtools_build/deb /artifacts
sudo chown -R $USER:$USER artifacts/
# Get Kotlin since it is not available through apt
# ===============================================
mkdir -p artifacts/kotlin
curl -L -o artifacts/kotlin/kotlinc.zip https://github.com/JetBrains/kotlin/releases/download/v${KOTLIN_VERSION}/kotlin-compiler-${KOTLIN_VERSION}.zip
# FIXME(?): The "-linux-x64" version sounds correct but seems broken
#curl -L -o artifacts/kotlin/kotlinc.zip https://github.com/JetBrains/kotlin/releases/download/v${KOTLIN_VERSION}/kotlin-compiler-${KOTLIN_VERSION}-linux-x64.zip
# ===============================================
# Build the actual problemtools images
# ===============================================
for IMAGE in minimal icpc full; do
sudo docker build\
-f Dockerfile.${IMAGE}\
-t problemtools/${IMAGE}:${TAG}\
--build-arg PROBLEMTOOLS_VERSION=${TAG}\
.
if [ "$UPDATE_LATEST" = "true" ]; then
sudo docker tag problemtools/${IMAGE}:${TAG} problemtools/${IMAGE}:latest
fi
done
# Push to Docker Hub
# ===============================================
sudo docker login
for IMAGE in minimal icpc full; do
sudo docker push problemtools/${IMAGE}:${TAG}
if [ "$UPDATE_LATEST" = "true" ]; then
sudo docker push problemtools/${IMAGE}:latest
fi
done
|
import json
from typing import Dict
def character_analysis(json_filename: str) -> Dict[str, int]:
with open(json_filename, 'r') as file:
data = json.load(file)
contributions = data.get('contributions', [])
char_count = {}
for contribution in contributions:
for char in contribution:
if char in char_count:
char_count[char] += 1
else:
char_count[char] = 1
return char_count |
package server
import (
jsoniter "github.com/json-iterator/go"
"rwcoding/gphp/internal/common"
"rwcoding/gphp/internal/worker"
)
func NewHttpPkg(pkg worker.Pkg) *httpPkg {
hp := &httpPkg{
pkg: pkg,
}
hp.parse()
return hp
}
type httpPkg struct {
pkg worker.Pkg
status int
headers map[string]string
cookies []string
body []byte
}
func (hp *httpPkg) Status() int {
return hp.status
}
func (hp *httpPkg) Headers() map[string]string {
return hp.headers
}
func (hp *httpPkg) Cookies() []string {
return hp.cookies
}
func (hp *httpPkg) Body() []byte {
return hp.body
}
func (hp *httpPkg) parse() {
buf := hp.pkg.Body()
bd := map[string]interface{}{}
if err := jsoniter.Unmarshal(buf, &bd); err != nil {
common.Err("parse error for http pkg")
hp.status = 500
return
}
if bd["status"] != nil {
hp.status = bd["status"].(int)
} else {
hp.status = 200
}
if bd["headers"] != nil {
hp.headers = bd["headers"].(map[string]string)
}
if bd["cookies"] != nil {
hp.cookies = bd["cookies"].([]string)
}
if bd["body"] != nil {
hp.body = []byte(bd["body"].(string))
}
}
|
public class Student {
private String name;
private int age;
private String course;
public Student(String name, int age, String course) {
this.name = name;
this.age = age;
this.course = course;
}
public String getName() {
return name;
}
public int getAge() {
return age;
}
public String getCourse() {
return course;
}
} |
#!/bin/bash
# TODO explain --remote-host/--remote-path
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
PROJECT_PATH="$(cd "$DIR/../../../.." && pwd)"
REMOTE_PATH="$PROJECT_PATH"
REMOTE_HOST="localhost"
LOCAL_HOST="localhost"
LOCAL_PORT=1234
OTSRC="src/ext/oblivc/ot.c"
BENCHDIR="test/oblivc/ottest/"
BENCHSRC="$BENCHDIR/ottime.c"
BENCHBIN="ottime"
OBLIVCC="$PROJECT_PATH/bin/oblivcc"
BUILDCMD="make cilly oblivruntime RELEASELIB=1 NATIVECAML=1"
while [ $# -ge 1 ]; do
if [[ $1 = "--remote-host="* ]]; then
REMOTE_HOST=${1#--remote-host=}
elif [[ $1 = "--remote-path="* ]]; then
REMOTE_PATH=${1#--remote-path=}
elif [[ $1 = "--local-host="* ]]; then
LOCAL_HOST=${1#--local-host=}
elif [[ $1 = "--local-port-init="* ]]; then
LOCAL_PORT=${1#--local-port-init=}
fi
shift
done
echo $REMOTE_HOST:$REMOTE_PATH
cd "$PROJECT_PATH/$BENCHDIR"
port=$LOCAL_PORT
for ((tcount=1;tcount<=8;tcount++)); do
# Change thread count
sed -i "s/#define OT_THREAD_COUNT .*$/#define OT_THREAD_COUNT $tcount/" $PROJECT_PATH/$OTSRC
# Build project
( cd "$PROJECT_PATH" && $BUILDCMD )
# Build remote project
if [ $REMOTE_HOST != "localhost" ]; then
scp $PROJECT_PATH/$OTSRC $REMOTE_HOST:$REMOTE_PATH/$OTSRC
ssh $REMOTE_HOST "cd $REMOTE_PATH && $BUILDCMD"
fi
# Compile benchmark program
$OBLIVCC -O3 $PROJECT_PATH/$BENCHSRC -o $BENCHBIN
ssh $REMOTE_HOST "cd $REMOTE_PATH/$BENCHDIR && $REMOTE_PATH/bin/oblivcc -O3 $REMOTE_PATH/$BENCHSRC -o $BENCHBIN"
for ottype in M Q; do
for ((run=0; run<5; run++)); do
./$BENCHBIN $port -- $ottype 5000000 &
sleep 0.3
echo -n "$port $ottype $tcount" >> $0.log
ssh $REMOTE_HOST time $REMOTE_PATH/$BENCHDIR/$BENCHBIN $port $LOCAL_HOST $ottype 5000000 &>> $0.log
port=$((port+1))
done
done
done
# Restore source file
git checkout HEAD $PROJECT_PATH/$OTSRC
scp $PROJECT_PATH/$OTSRC $REMOTE_HOST:$REMOTE_PATH/$OTSRC
|
<gh_stars>0
package de.eimantas.eimantasbackend.entities;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.NonNull;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import java.math.BigDecimal;
import java.time.LocalDate;
@Data
@AllArgsConstructor
@NoArgsConstructor
@Entity
public class Project {
@Id
@GeneratedValue
private Long id;
private LocalDate createDate;
private LocalDate expireDate;
private LocalDate updateDate;
private boolean active;
private long refBankAccountId;
private @NonNull
String name;
private BigDecimal rate;
private String userId;
}
|
<gh_stars>1000+
public class TestConfusingOverloading {
class Super<T> {
void test2(T t) {}
void test(Super<T> other) {}
}
class Sub extends Super<Runnable> {
void test(Sub other) {}
}
class Sub2 extends Super<Runnable> {
@Override void test2(Runnable r) {}
@Override void test(Super<Runnable> other) {}
}
}
class TestDelegates {
class A {}
class B1 extends A {}
class B2 extends A {}
class C extends B1 {}
void test(A a) {} // OK (all overloaded methods are either delegates or deprecated)
void test(B1 b1) { test((A)b1); } // OK (delegate)
void test(B2 b2) { test((A)b2); } // OK (delegate)
void test(C c) { test((B1)c); } // OK (delegate)
@Deprecated void test(Object obj) {} // OK (deprecated)
}
|
/*
Copyright (c) 2013, Groupon, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
Neither the name of GROUPON nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.groupon.nakala.analysis;
import com.groupon.nakala.db.DataStore;
import com.groupon.nakala.db.FlatFileStore;
import com.groupon.nakala.exceptions.StoreException;
import java.util.Map;
/**
* @author <EMAIL>
*/
public class KeyValuePairAnalysis<T extends Comparable, U extends Comparable>
implements Analysis, Map.Entry<T, U>, Comparable<KeyValuePairAnalysis<T, U>> {
T key;
U value;
public KeyValuePairAnalysis(Map.Entry<T, U> entry) {
this.key = entry.getKey();
this.value = entry.getValue();
}
public KeyValuePairAnalysis(T key, U value) {
this.key = key;
this.value = value;
}
public T getKey() {
return key;
}
public U getValue() {
return value;
}
@Override
public U setValue(U o) {
return value = o;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof KeyValuePairAnalysis))
return false;
KeyValuePairAnalysis e = (KeyValuePairAnalysis) o;
return (getKey() == null ?
e.getKey() == null :
getKey().equals(e.getKey()))
&&
(getValue() == null ?
e.getValue() == null :
getValue().equals(e.getValue()));
}
@Override
public int hashCode() {
return (getKey() == null ? 0 : getKey().hashCode()) ^
(getValue() == null ? 0 : getValue().hashCode());
}
public String getTsv() {
return key + "\t" + value;
}
@Override
public int compareTo(KeyValuePairAnalysis<T, U> other) {
int cmp = getKey().compareTo(other.getKey());
return cmp != 0 ? cmp : getValue().compareTo(other.getValue());
}
@Override
public void store(DataStore ds) throws StoreException {
if (ds instanceof FlatFileStore) {
((FlatFileStore) ds).getPrintStream().println(getTsv());
} else {
throw new StoreException("Unsupported data store " + ds.getClass().getName());
}
}
} |
mkdir -p checkpoints/transformer
CUDA_VISIBLE_DEVICES=0 fairseq-train data-bin/iwslt14.tokenized.de-en \
--optimizer adam --lr 0.0005 --clip-norm 0.1 --dropout 0.2 --max-tokens 4000 \
--arch transformer_iwslt_de_en --save-dir checkpoints/transformer \
--reset-lr-scheduler --reset-optimizer --reset-meters
# src=de
# tgt=en
# CUDA_VISIBLE_DEVICES=0 fairseq-train data-bin/iwslt14.tokenized.de-en \
# --task rdrop_translation \
# --arch transformer_iwslt_de_en \
# --optimizer adam --lr 0.0005 \
# --label-smoothing 0.1 --dropout 0.3 --max-tokens 4096 \
# --lr-scheduler inverse_sqrt --weight-decay 0.0001 \
# --criterion rdrop_label_smoothed_cross_entropy \
# --alpha 5 \
# --k 2 \
# --seed 64 \
# --fp16 \
# --eval-bleu \
# --eval-bleu-args '{"beam": 5, "max_len_a": 1.2, "max_len_b": 10}' \
# --eval-bleu-detok moses \
# --eval-bleu-remove-bpe \
# --best-checkpoint-metric bleu --maximize-best-checkpoint-metric \
# --max-update 300000 --warmup-updates 4000 --warmup-init-lr 1e-07 --adam-betas '(0.9,0.98)' \
# --save-dir checkpoints/transformer |
#! /usr/bin/env bash
source "test-helper.sh"
#
# __stub_index() tests.
#
# Echoes index of given stub.
STUB_INDEX=("uname=1" "top=3")
assert '__stub_index "uname"' "1"
assert '__stub_index "top"' "3"
unset STUB_INDEX
# Echoes nothing if stub is not in the index.
STUB_INDEX=("uname=1")
assert '__stub_index "top"' ""
unset STUB_INDEX
# End of tests.
assert_end "__stub_index()"
|
<form action="/reviews" method="POST">
<label for="rating">Rating:</label><br>
<select id="rating" name="rating">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select><br>
<label for="title">Title:</label><br>
<input type="text" id="title" name="title" placeholder="Enter your review title"><br>
<label for="description">Description:</label><br>
<textarea id="description" name="description" placeholder="Enter your review description"></textarea><br>
<label for="name">Name:</label><br>
<input type="text" id="name" name="name" placeholder="Enter your name"><br><br>
<input type="submit" value="Submit">
</form> |
<filename>src/js/common/dom.js
'use strict';
function addClassHelper(el, className) {
if (el.classList) {
el.classList.add(className);
} else {
el.className += ' ' + className;
}
}
function addClass(el, className) {
if ((Object.prototype.toString.call(el) === '[object NodeList]')) {
for(var i = 0; i < el.length; i++) {
addClassHelper(el[i], className);
}
} else {
addClassHelper(el, className);
}
}
function removeClassHelper(el, className){
if (el.classList) {
el.classList.remove(className);
} else {
el.className = el.className.replace(new RegExp('(^|\\b)' + className.split(' ').join('|') + '(\\b|$)', 'gi'), ' ');
}
}
function removeClass(el, className) {
if ((Object.prototype.toString.call(el) === '[object NodeList]')) {
for(var i = 0; i < el.length; i++) {
removeClassHelper(el[i], className);
}
} else {
removeClassHelper(el, className);
}
}
function toggleClass(el, className) {
if (el.classList) {
el.classList.toggle(className);
} else {
var classes = el.className.split(' ');
var existingIndex = -1;
for (var i = classes.length; i--;) {
if (classes[i] === className) {
existingIndex = i;
}
}
if (existingIndex >= 0) {
classes.splice(existingIndex, 1);
} else {
classes.push(className);
}
el.className = classes.join(' ');
}
}
function hasClass(el, className) {
if (el.classList)
el.classList.contains(className);
else
new RegExp('(^| )' + className + '( |$)', 'gi').test(el.className);
}
module.exports = {
addClass: addClass,
removeClass: removeClass,
toggleClass: toggleClass,
hasClass: hasClass
};
|
import re
text = "He is an excellent programmer"
def find_and_replace(text, pattern, replacement):
return re.sub(pattern, replacement, text)
result = find_and_replace(text, r"excellent", "brilliant")
print(result) |
#!/bin/bash
function docker_tag_exists() {
EXISTS=$(curl -s https://hub.docker.com/v2/repositories/$1/tags/?page_size=10000 | jq -r "[.results | .[] | .name == \"$2\"] | any")
test $EXISTS = true
}
if docker_tag_exists svenruppert/maven-3.2.5-zulu 1.8.181; then
echo skip building, image already existing - svenruppert/maven-3.2.5-zulu 1.8.181
else
echo start building the images
docker build -t svenruppert/maven-3.2.5-zulu .
docker tag svenruppert/maven-3.2.5-zulu:latest svenruppert/maven-3.2.5-zulu:1.8.181
docker push svenruppert/maven-3.2.5-zulu:1.8.181
fi
docker image rm svenruppert/maven-3.2.5-zulu:latest
docker image rm svenruppert/maven-3.2.5-zulu:1.8.181 |
#!/bin/bash
python3 <<'EOF'
animal_emotes = {
'viper': '🐍',
'hornet': '🐝',
'cricket': '🪳',
'newt': '🦎',
'termite': '🐜',
'python': '🐍',
'cicada': '🪰',
'bumblebee': '🐝',
'cobra': '🐍',
'frog': '🐸',
'tick': '🪳',
'turtle': '🐢',
'aphid': '🪲',
'ladybug': '🐞',
'chameleon': '🦎',
'rattlesnake': '🐍',
'scorpion': '🦂',
'perch': '🐟',
'dolphin': '🐬',
'kangaroo': '🦘',
'hippo': '🦛',
'swan': '🦢',
'gator': '🐊',
'jaguar': '🐆',
'rhino': '🦏',
'grizzly': '🐻',
'macaw': '🐦',
'monkey': '🐵',
'cardinal': '🦩',
'peacock': '🦚',
'lion': '🦁',
'giraffee': '🦒',
'hare': '🐇',
'raven': '🐧',
'tiger': '🐅',
'hawk': '🦅',
'woodpecker': '🦤',
'zebra': '🦓',
}
import socket
host = socket.gethostname().split('.')[0]
emoji = animal_emotes.get(host, '☾')
print('#[fg=yellow]' + emoji + ' ' + host)
EOF
#status=""
#div=" #[fg=colour240]// "
#spotify="$(spotify.py)"
#if ! [[ -z $spotify ]]; then
# status+="#[fg=colour2]${spotify}${div}"
#fi
#
#status+="#[fg=blue]$(internet.sh)${div}"
#status+="$(temp.py)${div}"
## TODO: Use real analog clock
#status+="#[fg=red]◶"
#echo "$status"
|
#/bin/bash -xe
install_ansible () {
apt-get update -q
apt-get install -yq python-pip
pip install ansible
}
install_git () {
apt-get update -q
apt-get install -yq git
}
[ -z "$(which ansible)" ] && install_ansible
[ -z "$(which git)" ] && install_git
ansible-pull \
-e gitlab_runner_ci_server_url="${GITLAB_URL:-https://gitlab.com}" \
-e gitlab_runner_registration_token="${GITLAB_TOKEN}" \
-e gitlab_runner_name="${GITLAB_NAME-$(hostname)}" \
--accept-host-key --clean \
--directory=/etc/ansible/roles/egeneralov.gitlab-runner/ \
--url=https://github.com/egeneralov/gitlab-runner.git \
/etc/ansible/roles/egeneralov.gitlab-runner/local.yml
|
/*
* Copyright (c) 2020 The Go Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style
* license that can be found in the LICENSE file.
*/
// Original Go source here:
// http://code.google.com/p/go/source/browse/src/pkg/regexp/syntax/prog.go
package com.steveniemitz.binaryre2j;
import java.io.ByteArrayOutputStream;
import java.util.Arrays;
/**
* A Prog is a compiled regular expression program.
*/
final class Prog {
Inst[] inst = new Inst[10];
int instSize = 0;
int start; // index of start instruction
int numCap = 2; // number of CAPTURE insts in re
// 2 => implicit ( and ) for whole match $0
// Constructs an empty program.
Prog() {}
// Returns the instruction at the specified pc.
// Precondition: pc > 0 && pc < numInst().
Inst getInst(int pc) {
return inst[pc];
}
// Returns the number of instructions in this program.
int numInst() {
return instSize;
}
// Adds a new instruction to this program, with operator |op| and |pc| equal
// to |numInst()|.
void addInst(int op) {
if (instSize >= inst.length) {
inst = Arrays.copyOf(inst, inst.length * 2);
}
inst[instSize] = new Inst(op);
instSize++;
}
// skipNop() follows any no-op or capturing instructions and returns the
// resulting instruction.
Inst skipNop(int pc) {
Inst i = inst[pc];
while (i.op == Inst.NOP || i.op == Inst.CAPTURE) {
i = inst[pc];
pc = i.out;
}
return i;
}
// prefix() returns a pair of a literal string that all matches for the
// regexp must start with, and a boolean which is true if the prefix is the
// entire match. The string is returned by appending to |prefix|.
boolean prefix(ByteArrayOutputStream prefix) {
Inst i = skipNop(start);
// Avoid allocation of buffer if prefix is empty.
if (!Inst.isRuneOp(i.op) || i.runes.length != 1) {
return i.op == Inst.MATCH; // (append "" to prefix)
}
// Have prefix; gather characters.
while (Inst.isRuneOp(i.op) && i.runes.length == 1 && i.runes[0] <= 0xFF && (i.arg & RE2.FOLD_CASE) == 0) {
prefix.write(i.runes[0]); // an int, not a byte.
i = skipNop(i.out);
}
return i.op == Inst.MATCH;
}
// startCond() returns the leading empty-width conditions that must be true
// in any match. It returns -1 (all bits set) if no matches are possible.
int startCond() {
int flag = 0; // bitmask of EMPTY_* flags
int pc = start;
loop:
for (; ; ) {
Inst i = inst[pc];
switch (i.op) {
case Inst.EMPTY_WIDTH:
flag |= i.arg;
break;
case Inst.FAIL:
return -1;
case Inst.CAPTURE:
case Inst.NOP:
break; // skip
default:
break loop;
}
pc = i.out;
}
return flag;
}
// --- Patch list ---
// A patchlist is a list of instruction pointers that need to be filled in
// (patched). Because the pointers haven't been filled in yet, we can reuse
// their storage to hold the list. It's kind of sleazy, but works well in
// practice. See http://swtch.com/~rsc/regexp/regexp1.html for inspiration.
// These aren't really pointers: they're integers, so we can reinterpret them
// this way without using package unsafe. A value l denotes p.inst[l>>1].out
// (l&1==0) or .arg (l&1==1). l == 0 denotes the empty list, okay because we
// start every program with a fail instruction, so we'll never want to point
// at its output link.
int next(int l) {
Inst i = inst[l >> 1];
if ((l & 1) == 0) {
return i.out;
}
return i.arg;
}
void patch(int l, int val) {
while (l != 0) {
Inst i = inst[l >> 1];
if ((l & 1) == 0) {
l = i.out;
i.out = val;
} else {
l = i.arg;
i.arg = val;
}
}
}
int append(int l1, int l2) {
if (l1 == 0) {
return l2;
}
if (l2 == 0) {
return l1;
}
int last = l1;
for (; ; ) {
int next = next(last);
if (next == 0) {
break;
}
last = next;
}
Inst i = inst[last >> 1];
if ((last & 1) == 0) {
i.out = l2;
} else {
i.arg = l2;
}
return l1;
}
// ---
@Override
public String toString() {
StringBuilder out = new StringBuilder();
for (int pc = 0; pc < instSize; ++pc) {
int len = out.length();
out.append(pc);
if (pc == start) {
out.append('*');
}
// Use spaces not tabs since they're not always preserved in
// Google Java source, such as our tests.
out.append(" ".substring(out.length() - len)).append(inst[pc]).append('\n');
}
return out.toString();
}
}
|
/*
* Copyright (c) 2014, 2016 Oracle and/or its affiliates. All rights reserved. This
* code is released under a tri EPL/GPL/LGPL license. You can use it,
* redistribute it and/or modify it under the terms of the:
*
* Eclipse Public License version 1.0
* GNU General Public License version 2
* GNU Lesser General Public License version 2.1
*/
package org.jruby.truffle.language.arguments;
import com.oracle.truffle.api.CompilerDirectives;
import com.oracle.truffle.api.frame.VirtualFrame;
import com.oracle.truffle.api.object.DynamicObject;
import com.oracle.truffle.api.profiles.ConditionProfile;
import com.oracle.truffle.api.source.SourceSection;
import org.jruby.truffle.RubyContext;
import org.jruby.truffle.core.Layouts;
import org.jruby.truffle.core.hash.BucketsStrategy;
import org.jruby.truffle.core.hash.HashOperations;
import org.jruby.truffle.language.RubyGuards;
import org.jruby.truffle.language.RubyNode;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class ReadKeywordRestArgumentNode extends RubyNode {
private final String[] excludedKeywords;
@Child private ReadUserKeywordsHashNode readUserKeywordsHashNode;
private final ConditionProfile noHash = ConditionProfile.createBinaryProfile();
public ReadKeywordRestArgumentNode(RubyContext context, SourceSection sourceSection,
int minimum, String[] excludedKeywords) {
super(context, sourceSection);
this.excludedKeywords = excludedKeywords;
readUserKeywordsHashNode = new ReadUserKeywordsHashNode(context, sourceSection, minimum);
}
@Override
public Object execute(VirtualFrame frame) {
return lookupRestKeywordArgumentHash(frame);
}
private Object lookupRestKeywordArgumentHash(VirtualFrame frame) {
final Object hash = readUserKeywordsHashNode.execute(frame);
if (noHash.profile(hash == null)) {
return Layouts.HASH.createHash(coreLibrary().getHashFactory(), null, 0, null, null, null, null, false);
}
CompilerDirectives.bailout("Ruby keyword arguments aren't optimized");
final DynamicObject hashObject = (DynamicObject) hash;
final List<Map.Entry<Object, Object>> entries = new ArrayList<>();
outer: for (Map.Entry<Object, Object> keyValue : HashOperations.iterableKeyValues(hashObject)) {
if (!RubyGuards.isRubySymbol(keyValue.getKey())) {
continue;
}
for (String excludedKeyword : excludedKeywords) {
if (excludedKeyword.equals(keyValue.getKey().toString())) {
continue outer;
}
}
entries.add(keyValue);
}
return BucketsStrategy.create(getContext(), entries, Layouts.HASH.getCompareByIdentity(hashObject));
}
}
|
public class JettyWebDefaultsProcessor {
private Map<Integer, String> webDefaultsMap;
public JettyWebDefaultsProcessor() {
// Initialize the web defaults map with version-specific web defaults
webDefaultsMap = new HashMap<>();
webDefaultsMap.put(7, "Jetty 7 web defaults");
webDefaultsMap.put(9, "Jetty 9 web defaults");
}
public String extractWebDefaults(int version) {
if (webDefaultsMap.containsKey(version)) {
return webDefaultsMap.get(version);
} else {
return "Jetty " + version + " is not supported";
}
}
public static void main(String[] args) {
JettyWebDefaultsProcessor processor = new JettyWebDefaultsProcessor();
System.out.println(processor.extractWebDefaults(7)); // Output: Jetty 7 web defaults
System.out.println(processor.extractWebDefaults(9)); // Output: Jetty 9 web defaults
System.out.println(processor.extractWebDefaults(8)); // Output: Jetty 8 is not supported
}
} |
import {http} from './config';
export default{
buscar:(cnpj)=>{
console.log(cnpj.cnpj_cadastral);
//return http.get('https://www.receitaws.com.br/v1/cnpj/'+ '05018904000168');
return http.get('https://cors-anywhere.herokuapp.com/http://www.receitaws.com.br/v1/cnpj/' + cnpj.cnpj_cadastral);
},
listDb:() => {
return http.get('cnpj');
},
cadastrar:(cnpj) =>{
return http.post('cnpj',cnpj);
},
edit:(cnpj) =>{
return http.put('cnpj/' + cnpj.id,cnpj );
},
destroy:(cnpj) =>{
return http.delete('cnpj/' + cnpj.id);
},
}
|
package com.rawsanj.adminlte;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.boot.web.support.SpringBootServletInitializer;
@SpringBootApplication
public class SpringBootAdminLteTemplateWithSpringSecurityApplication extends SpringBootServletInitializer {
public static void main(String[] args) {
SpringApplication.run(SpringBootAdminLteTemplateWithSpringSecurityApplication.class, args);
}
@Override
protected SpringApplicationBuilder configure(SpringApplicationBuilder builder) {
return builder.sources(SpringBootAdminLteTemplateWithSpringSecurityApplication.class);
}
} |
/*
* Copyright 2018, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.android.navigation;
import android.os.Bundle;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.fragment.app.Fragment;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import androidx.navigation.NavController;
import androidx.navigation.fragment.NavHostFragment;
import com.example.android.navigation.databinding.FragmentRulesBinding;
public class RulesFragment extends Fragment {
private NavController mNavController;
@Nullable
@Override
public View onCreateView(@NonNull LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) {
FragmentRulesBinding binding = FragmentRulesBinding.inflate(inflater);
mNavController = NavHostFragment.findNavController(this);
binding.playButton2.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
mNavController.navigate(R.id.action_rulesFragment_to_gameFragment);
}
});
return binding.getRoot();
}
}
|
<gh_stars>1000+
public void m() {
lock.lock();
// A
try {
// ... method body
} finally {
// B
lock.unlock();
}
} |
<reponame>quintel/etengine
class AddCreatedAtIndexToScenarios < ActiveRecord::Migration[5.2]
def change
add_index :scenarios, :created_at
end
end
|
from typing import Any
from sqlite3 import Cursor
class SQLQuery:
def __init__(self, cursor: Cursor, sql: str):
self.__cursor = cursor
self.__sql = sql
self.__params = {}
def __setitem__(self, name: str, value) -> None:
self.__params[name] = value
def __contains__(self, key: str) -> bool:
return key in self.__params
def __call__(self, **kwargs) -> Any:
return self.__cursor.execute(self.__sql, {**kwargs, **self.__params}) |
class DockerfileGenerator:
DOCKERFILE_TEMPLATE = """
# Dockerfile generated by DockerfileGenerator
FROM base_image
COPY {source_archivedir} /app
ENV SOURCE_SHA={source_sha}
"""
def __init__(self, sourcecode_path, sha):
self.sourcecode_path = sourcecode_path
self.sha = sha
self.data = {}
def populate_data(self):
self.data["source_archivedir"] = self.sourcecode_path
self.data["source_sha"] = self.sha
def _write_dockerfile(self):
self.populate_data()
template = self.DOCKERFILE_TEMPLATE.format(**self.data)
return template
# Example usage
source_path = "/path/to/source/code"
sha_hash = "abc123"
dockerfile_gen = DockerfileGenerator(source_path, sha_hash)
dockerfile = dockerfile_gen._write_dockerfile()
print(dockerfile) |
"""
# Node class
class Node:
# Function to initialise the node object
def __init__(self, data):
self.data = data # Assign data
self.next = None # Initialize next as null
# Linked List class
class LinkedList:
# Function to initialize head
def __init__(self):
self.head = None
# This function prints contents of linked list
# starting from head
def printList(self, si):
curr_node = self.head
ctr = 0
while(curr_node):
ctr += 1
if ctr == si:
return curr_node.data
curr_node = curr_node.next
# Code execution starts here
if __name__=='__main__':
# Start with the empty list
llist = LinkedList()
llist.head = Node(1)
second = Node(2)
third = Node(3)
llist.head.next = second; # Link first node with second
second.next = third; # Link second node with the third node
# Print the 60th element
llist.printList(60)
"""
# Call the function
print(llist.printList(60)) |
let textAnimation = function(element, speed) {
let pos = 0;
setInterval(() => {
if (pos > element.clientWidth) {
pos = 0;
}
element.style.transform = `translateX(-${pos}px)`;
pos += speed;
}, 16);
};
let myText = document.querySelector('.my-text');
textAnimation(myText, 10); |
<filename>backend/src/main/java/oidc/management/service/impl/DefaultUserAccountService.java
package oidc.management.service.impl;
import lombok.extern.log4j.Log4j2;
import oidc.management.model.UserAccount;
import oidc.management.repository.UserAccountRepository;
import oidc.management.service.UserAccountEncryptionService;
import oidc.management.service.UserAccountService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DataAccessResourceFailureException;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.Pageable;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
/**
* Default implementation of {@link UserAccountService}.
*
* @author <NAME>
* @since 27-03-2022
*/
@Log4j2
public class DefaultUserAccountService implements UserAccountService {
@Autowired
private UserAccountRepository userAccountRepository;
@Autowired
private UserAccountEncryptionService userAccountEncryptionService;
@Override
public UserAccount.UserAccountBuilder entityBuilder() {
return userAccountRepository.entityBuilder();
}
@Override
public List<UserAccount> findAll() {
// Find all user accounts
return (List<UserAccount>) this.userAccountRepository.findAll()
.stream()
.map(
// Decrypt user accounts
userAccount -> this.userAccountEncryptionService.decrypt((UserAccount) userAccount)
)
.collect(Collectors.toList());
}
@Override
public Page<UserAccount> findAll(Pageable pageable, String search) {
// If there is no search term or the search term is empty
if (search == null || search.isEmpty()) {
// Return all user accounts
return this.userAccountRepository.findAll(pageable);
}
// Return all user accounts that match the search term
return this.userAccountRepository.findByTagsContainingIgnoreCase(search, pageable)
.map(
// Decrypt user accounts
userAccount -> this.userAccountEncryptionService.decrypt((UserAccount) userAccount)
);
}
@Override
public Optional<UserAccount> findById(String id) {
// Find user account by id
return this.userAccountRepository.findById(id)
.map(
// Decrypt user account
userAccount -> this.userAccountEncryptionService.decrypt((UserAccount) userAccount)
);
}
@Override
public Optional<UserAccount> findByUsername(String username) {
// Find user account by username
return this.userAccountRepository.findByHashedUsername(this.userAccountEncryptionService.hashUsername(username))
.map(
// Decrypt user account
userAccount -> this.userAccountEncryptionService.decrypt((UserAccount) userAccount)
);
}
@Override
public UserAccount save(UserAccount userAccount) {
// Encrypt user account
UserAccount encryptedUserAccount = this.userAccountEncryptionService.encrypt(userAccount);
try {
// Save user account
this.userAccountRepository.save(encryptedUserAccount);
} catch (DataAccessResourceFailureException e) {
// Failed to read response from database
log.warn("Problems saving user account", e);
}
// Set id
userAccount.setId(encryptedUserAccount.getId());
// Return user account
return userAccount;
}
@Override
public void deleteById(String id) {
try {
// Delete user account by id
this.userAccountRepository.deleteById(id);
} catch (DataAccessResourceFailureException e) {
// Failed to read response from database
log.warn("Problems deleting user account", e);
}
}
}
|
<gh_stars>0
package main
import (
"flag"
"fmt"
"os"
"github.com/spacemeshos/smrepl/client"
"github.com/spacemeshos/smrepl/log"
"github.com/spacemeshos/smrepl/repl"
)
func main() {
var (
dataDir string
walletName string
be *client.WalletBackend
)
grpcServer := client.DefaultGRPCServer
secureConnection := client.DefaultSecureConnection
flag.StringVar(&grpcServer, "server", grpcServer, fmt.Sprintf("The Spacemesh api grpc server host and port. Defaults to %s", client.DefaultGRPCServer))
flag.BoolVar(&secureConnection, "secure", secureConnection, "Connect securely to the server. Default is false")
flag.StringVar(&dataDir, "wallet_directory", getwd(), "set default wallet files directory")
flag.StringVar(&walletName, "wallet", "", "set the name of wallet file to open")
flag.Parse()
be, err := client.OpenConnection(grpcServer, secureConnection, dataDir)
if err != nil {
flag.Usage()
os.Exit(1)
}
if walletName != "" {
walletPath := dataDir + "/" + walletName
fmt.Println("opening ", walletPath)
be, err = client.OpenWalletBackend(walletPath, grpcServer, secureConnection)
if err != nil {
fmt.Println("failed to open wallet file : ", err)
os.Exit(1)
}
}
_, err = be.GetMeshInfo()
if err != nil {
log.Error("Failed to connect to mesh service at %v: %v", be.ServerInfo(), err)
fmt.Println()
flag.Usage()
os.Exit(1)
}
repl.Start(be)
}
func getwd() string {
pwd, err := os.Getwd()
if err != nil {
panic(err)
}
return pwd
}
|
#!/usr/bin/bash
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)"
export FINGERPRINT="TOYOTA COROLLA TSS2 2019"
$DIR/../launch_openpilot.sh
|
/*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.glowroot.instrumentation.test.harness.agent;
import java.util.Deque;
import java.util.List;
import com.google.common.collect.Lists;
import org.checkerframework.checker.nullness.qual.Nullable;
import org.glowroot.instrumentation.engine.impl.TimerNameImpl;
import org.glowroot.instrumentation.test.harness.ImmutableTimer;
public class TimerImpl implements org.glowroot.instrumentation.api.Timer {
private final TimerNameImpl timerName;
private final long startNanoTime;
private final @Nullable Deque<TimerImpl> currTimerStack;
private final List<TimerImpl> childTimers = Lists.newArrayList();
private volatile long totalNanos = -1;
private volatile boolean extended;
public static TimerImpl create(TimerNameImpl timerName, long startNanoTime,
Deque<TimerImpl> currTimerStack) {
TimerImpl timer = new TimerImpl(timerName, startNanoTime, currTimerStack);
TimerImpl currTimer = currTimerStack.peek();
if (currTimer != null) {
currTimer.addChildTimer(timer);
}
currTimerStack.push(timer);
return timer;
}
public static TimerImpl createAsync(TimerNameImpl timerName, long startNanoTime) {
return new TimerImpl(timerName, startNanoTime, null);
}
private TimerImpl(TimerNameImpl timerName, long startNanoTime,
@Nullable Deque<TimerImpl> currTimerStack) {
this.timerName = timerName;
this.startNanoTime = startNanoTime;
this.currTimerStack = currTimerStack;
}
public TimerNameImpl getTimerName() {
return timerName;
}
public void addChildTimer(TimerImpl childTimer) {
childTimers.add(childTimer);
}
@Override
public void stop() {
stop(System.nanoTime());
}
public void stop(long endNanoTime) {
totalNanos = endNanoTime - startNanoTime;
if (currTimerStack != null && currTimerStack.pop() != this) {
throw new IllegalStateException(
"Unexpected value at the top of current parent span stack");
}
}
long getTotalNanos() {
return totalNanos;
}
public void setExtended() {
extended = true;
}
public ImmutableTimer toImmutable() {
ImmutableTimer.Builder builder = ImmutableTimer.builder()
.name(timerName.name())
.extended(extended)
.totalNanos(totalNanos)
.count(1);
for (TimerImpl timer : childTimers) {
builder.addChildTimers(timer.toImmutable());
}
return builder.build();
}
}
|
#!/usr/local/bin/bash
#
# Pass the destination host as the 1st argument, and optionally
# the destination pool name as the 2nd (defaults to sndb).
#
# Switches:
#
# -c Compress (requires lz4)
# -n Dry run.
# -p <pools> List space-delimited pools to send
# -v Verbose output
pools="db/solar93 db/solar93/index db/solar93/log db2/data93 solar/wal96"
skip=""
#skip="db/solar93/log"
dry=""
comp=""
decomp=""
keep=168
wal_dir=/solar93/9.6/archives
ver=
send_opt=
while getopts ":cnp:v" opt; do
case $opt in
c)
comp="/usr/local/bin/lz4 -c -9"
decomp="/usr/local/bin/lz4 -d"
;;
n)
dry="1"
;;
p)
pools="$OPTARG"
;;
v)
ver="1"
send_opt="-v"
;;
esac
done
shift $(($OPTIND - 1))
dest=$1
destpool=${2:-sndb}
if [ -z "$pools" ]; then
echo 'Must specify the pools to export (-p "pool1 pool2...").'
exit 1
fi
if [ -z "$dest" ]; then
echo "Must specify the destination host as the first argument."
exit 1
fi
[[ -n "$dry" ]] && echo "DRY RUN"
[[ -n "$ver" ]] && echo "Sending ($pools) to $dest..."
# pull in support for do_snapshots function
. /usr/local/bin/zfs-snapshot
# pull in support for ssh-find-agent function
. ~/bin/ssh-find-agent.sh
# make_pg_snapshot()
#
# Function to create the backup snapshots, by calling pg_start_backup(), do_snapshots, and pg_stop_backup
# Prints out the snapshot name used
#
# Pass the pool name, keep number, and skip list as arguments.
make_pg_snapshot()
{
p=$1
k=$2
s=$3
if [ -z "$dry" ]; then
su - pgsql -c "/usr/local/bin/psql -d postgres -p 5496 -c "'"'"select pg_start_backup('hourly');"'"' \
>/dev/null
fi
now=`date +"hourly-%Y-%m-%d-%H"`
if [ -z "$dry" -a -n "$ver" ]; then
do_snapshots "$p" $k 'hourly' "$s" >&2
su - pgsql -c "/usr/local/bin/psql -d postgres -p 5496 -c "'"'"select pg_stop_backup();"'"' \
>/dev/null
elif [ -z "$dry" ]; then
do_snapshots "$p" $k 'hourly' "$s" >/dev/null
su - pgsql -c "/usr/local/bin/psql -d postgres -p 5496 -c "'"'"select pg_stop_backup();"'"' \
>/dev/null 2>&1
fi
echo $now;
}
# find_inc_start()
#
# Function to look for the most recent snapshot matching a specific name on the destination host.
# Prints out the found snapshot name, if found.
#
# Pass the source snapshot name to look for as the only argument.
find_inc_start()
{
src_snap=$1
dest_snap="$destpool/${src_snap#*/}"
prev_snap=$(ssh $dest zfs list -t snapshot -H -o name -S creation -r $dest_snap 2>/dev/null |grep "^${dest_snap}@" |head -1)
echo ${prev_snap##*@}
}
find_prev_inc()
{
src_snap=$1
dest_snap="$destpool/${src_snap#*/}"
prev_snap=$(ssh $dest zfs list -t snapshot -H -o name -S creation -r $dest_snap 2>/dev/null |grep "^${dest_snap}@" |head -2 |tail -1)
echo ${prev_snap##*@}
}
destroy_snapshot_if_exists()
{
snap=$1
if zfs list $snap >/dev/null 2>&1; then
if [ -z "$dry" ]; then
[[ -n "$ver" ]] && echo "Destroying incremental source snapshot $snap..."
zfs destroy $snap
else
echo "Would destroy incremental source snapshot $snap..."
fi
else
[[ -n "$ver" ]] && echo "Incremental source snapshot $snap already destroyed."
fi
}
ssh-find-agent -a
[[ -n "$ver" ]] && echo "Creating snapshots..."
ts=$(make_pg_snapshot "$pools" $keep "$skip")
[[ -n "$ver" ]] && echo "Got snapshot $ts"
count=0
complete=0
newest_wal="$(ls -1t $wal_dir/ |head -1)"
for pool in $pools; do
count+=1
snap="$pool@$ts"
inc_snap=$(find_inc_start $pool)
[[ -n "$ver" ]] && echo "Initial snapshot for $pool is [$inc_snap]."
if [ -z "$inc_snap" ]; then
[[ -n "$ver" ]] && echo "Sending initial snapshot $snap to $dest $destpool..."
if [ -z "$dry" -a -n "$comp" -a -n "$decomp" ]; then
zfs send -R $send_opt $snap |$comp |ssh -C $dest "$decomp |zfs recv -F -d $destpool"
elif [ -z "$dry" ]; then
zfs send -R $send_opt $snap |ssh -C $dest "zfs recv -F -d $destpool"
else
zfs send -R -n -v $snap
fi
if [ $? -eq 0 ]; then
complete+=1
fi
elif [ "$ts" = "$inc_snap" ]; then
[[ -n "$ver" ]] && echo "Destination already contains $snap, not sending again."
complete+=1
prev_snap=$(find_prev_inc $pool)
if [ "$inc_snap" != "$prev_snap" ]; then
destroy_snapshot_if_exists $pool@$prev_snap
fi
else
[[ -n "$ver" ]] && echo "Sending incremental snapshot $pool $inc_snap - $ts to $dest $destpool..."
if [ -z "$dry" -a -n "$comp" -a -n "$decomp" ]; then
zfs send -R $send_opt -i $pool@$inc_snap $snap |$comp |ssh -C $dest "$decomp |zfs recv -F -d $destpool"
elif [ -z "$dry" ]; then
zfs send -R $send_opt -i $pool@$inc_snap $snap |ssh -C $dest "zfs recv -F -d $destpool"
else
zfs send -R -n -v -i $pool@$inc_snap $snap
fi
if [ $? -eq 0 ]; then
complete+=1
destroy_snapshot_if_exists $pool@$inc_snap
fi
fi
done
if [ $count -eq $complete ]; then
[[ -n "$ver" ]] && echo "Cleaning archived WAL files from $wal_dir older than $newest_wal..."
if [ -z "$dry" ]; then
find $wal_dir -type f ! -name $newest_wal -a ! -newer $wal_dir/$newest_wal -exec rm -f {} \;
else
find $wal_dir -type f ! -name $newest_wal -a ! -newer $wal_dir/$newest_wal -print
fi
fi
[[ -n "$ver" ]] && echo "Done."
|
<gh_stars>1-10
import React from 'react';
// import classes from './opportunity-filter.module.css';
type PropType = {
onChange?(value: string): void;
onFocus?(): void;
};
export function OpportunityFilter(props: PropType) {
return (
<>
{/*
<div className={classes.container}>
<div className={classes.row}>
<input type="checkbox" />
<span>Female Only</span>
</div>
</div>
*/}
<input
type="search"
name=""
placeholder="search..."
onFocus={() => {
if (typeof props.onFocus === 'function') {
props.onFocus();
}
}}
onChange={(ev) => {
if (typeof props.onChange === 'function') {
props.onChange(ev.target.value);
}
}}
/>
</>
);
}
OpportunityFilter.defaultProps = {
onChange: () => {},
onFocus: () => {},
};
|
package web
import (
"net/http"
"fmt"
"github.com/chain-service/web/controllers"
)
func Serve(app *controllers.Application) {
fs := http.FileServer(http.Dir("web/assets"))
http.Handle("/assets/", http.StripPrefix("/assets/", fs))
http.HandleFunc("/home.html", app.HomeHandler)
http.HandleFunc("/request.html", app.RequestHandler)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/home.html", http.StatusTemporaryRedirect)
})
fmt.Println("Listening (http://localhost:3000/) ...")
http.ListenAndServe(":3000", nil)
} |
#!/usr/bin/env bash
# Triggers automatic deploy for certain branches.
#
# Uses Shippable env variables:
# - BRANCH
# - COMMIT
set -e
elementIn () {
local e
for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done
return 1
}
# Automatic deploy allowed for these branches only.
DEPLOY_BRANCHES=("staging" "master")
if ! elementIn "$BRANCH" "${DEPLOY_BRANCHES[@]}" ;
then
echo "Skiping deploy as branch is not allowed for automatic deploy"
exit 0
fi
# Id of the Shippable project containing the deploy script.
DEPLOY_PROJECT_ID=5804f143e8fe021000f9aed1
BUILT_PROJECT_NAME=rgi
# Trigger Shippable to run the deploy project and pass the current project name, branch, and latest commit hash, committer, commit message
STATUS=$(curl -s\
-H "Authorization: apiToken $API_TOKEN"\
-H "Content-Type: application/json"\
-d "{\"branchName\":\"master\",\"globalEnv\": {\"PROJECT\":\"$BUILT_PROJECT_NAME\", \"PROJECT_BRANCH\":\"$BRANCH\", \"PROJECT_COMMIT\":\"$COMMIT\", \"PROJECT_REPO_FULL_NAME\":\"$REPO_FULL_NAME\", \"PROJECT_COMMITTER\":\"$COMMITTER\", \"PROJECT_COMPARE_URL\":\"$COMPARE_URL\" }}"\
"https://api.shippable.com/projects/$DEPLOY_PROJECT_ID/newBuild")
echo "$STATUS"
if [[ "$STATUS" == *"runId"* ]]
then
echo "Deploy triggered successfully";
exit 0
else
echo "Failed to trigger deploy.";
exit 1
fi
|
/*
TITLE Binding arguments with Function objects Chapter24Exercise3.cpp
COMMENT
Objective: Write an apply(f,a) that can takes a void f(T&), a T f(const T&), and
their function object equivalents. Hint: Boost::bind.
I'm clearly not getting something right.
Input: -
Output: -
Author: <NAME>
Date: 05.05.2017
*/
#include <iostream>
#include <functional> // std::bind
#include "Matrix.h"
#include "MatrixIO.h"
using namespace Numeric_lib;
/*
"Freestanding" function object:
Does not modify the input parameter.
The, copy of, the result is meant to be
used as an initialization value.
*/
template <class T>
struct DoubleF
{
T operator()(const T src) { return src * 2; }
};
//-------------------------------------------------------------------------------
/*
"Broadcast" function object:
Modifies the input parameter.
The result is the modified input parameter,
i.e. an already existing value.
*/
template <class T>
struct DoubleRefF
{
void operator()(T& ref_src) { ref_src *= 2; }
};
//-------------------------------------------------------------------------------
/*
"Freestanding" function:
Does not modify the input parameter.
The, copy of, the result is meant to be
used as an initialization value.
*/
template <class T>
inline T Double(const T src) { return src * 2; }
//-------------------------------------------------------------------------------
/*
"Broadcast" function:
Modifies the input parameter.
The result is the modified input parameter,
i.e. an already existing value.
*/
template <class T>
inline void DoubleRef(T& ref_src) { ref_src *= 2; }
//-------------------------------------------------------------------------------
int main()
{
try
{
Matrix<int, 1> a(10);
int arr[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
a = arr;
std::cout <<"Initial Matrix:\n"<< a <<'\n';
// "freestanding" apply
a = apply(Double<int>, a);
std::cout <<"Matrix.apply(Double):\n"<< a <<'\n';
// broadcast function
a.apply(DoubleRef<int>);
std::cout <<"Matrix.apply(DoubleRef):\n"<< a <<'\n';
// "freestanding" apply object
a = apply(DoubleF<int>(), a);
std::cout <<"Matrix.apply(DoubleF):\n"<< a <<'\n';
// broadcast function object
a.apply(DoubleRefF<int>());
std::cout <<"Matrix.apply(DoubleRefF):\n"<< a <<'\n';
}
catch(std::exception& e)
{
std::cerr << e.what();
}
getchar();
} |
import {assert} from "chai";
import * as Yargs from "yargs";
import * as Options from "./options";
describe("options module has a", () => {
describe("getOptions function that", () => {
it("should load the configuration options", () => {
Yargs([
"--delete",
"--directory",
"./test",
"--exclude",
"zeta.ts$",
"--include",
"a.ts$",
"--location",
"top",
"--name",
"barrel",
"--structure",
"filesystem",
"--verbose",
]);
const options = Options.getOptions();
// tslint:disable-next-line:no-console
assert.equal(options.logger, console.log);
assert.match(options.rootPath, /test$/);
assert.equal(options.barrelName, "barrel.ts");
// From yargs
assert.isUndefined(options.config);
assert.equal(options.delete, true);
assert.equal(options.directory, "./test");
assert.sameMembers(options.exclude as string[], ["zeta.ts$"]);
assert.sameMembers(options.include as string[], ["a.ts$"]);
assert.equal(options.location, "top");
assert.equal(options.name, "barrel");
assert.equal(options.structure, "filesystem");
assert.equal(options.verbose, true);
});
it("should not use the console if logging is disabled", () => {
Yargs([]);
const options = Options.getOptions();
// tslint:disable-next-line:no-console
assert.notEqual(options.logger, console.log);
});
it("should not append .ts to the name option if already present", () => {
Yargs(["--name", "barrel.ts"]);
const options = Options.getOptions();
assert.equal(options.barrelName, "barrel.ts");
});
it("should resolve the baseUrl if specified", () => {
Yargs(["--baseUrl", "/base/url"]);
const options = Options.getOptions();
assert.match(options.combinedBaseUrl as string, /base[\\/]url$/);
});
});
});
|
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('@angular/core'), require('@angular/common')) :
typeof define === 'function' && define.amd ? define(['exports', '@angular/core', '@angular/common'], factory) :
(factory((global.ng = global.ng || {}, global.ng['bar-chart'] = {}),global.ng.core,global.common));
}(this, (function (exports,core,common) { 'use strict';
var BarChartComponent = (function () {
function BarChartComponent() {
}
BarChartComponent.prototype.ngOnInit = function () { };
BarChartComponent.decorators = [
{ type: core.Component, args: [{
selector: 'bar-chart',
templateUrl: 'bar-chart.component.html'
},] },
];
/** @nocollapse */
BarChartComponent.ctorParameters = function () { return []; };
return BarChartComponent;
}());
var BarChartModule = (function () {
function BarChartModule() {
}
BarChartModule.decorators = [
{ type: core.NgModule, args: [{
imports: [
common.CommonModule
],
declarations: [
BarChartComponent
],
exports: [
BarChartComponent
]
},] },
];
/** @nocollapse */
BarChartModule.ctorParameters = function () { return []; };
return BarChartModule;
}());
exports.BarChartModule = BarChartModule;
exports.BarChartComponent = BarChartComponent;
Object.defineProperty(exports, '__esModule', { value: true });
})));
|
public class Stack {
private Object[] arr;
private int top;
public Stack(int size) {
this.arr = new Object[size];
this.top = -1;
}
public Object pop() {
if (this.top < 0) {
// Stack is empty
return null;
}
Object value = this.arr[this.top];
this.top--;
return value;
}
public void push(Object item) {
this.top++;
this.arr[this.top] = item;
}
public Object peek() {
if (this.top < 0) {
// Stack is empty
return null;
}
return this.arr[this.top];
}
} |
def is_prime(n):
for i in range(2, n):
if n % i == 0:
return False
return True
def find_primes(start, end):
primes = []
for num in range(start, end + 1):
if is_prime(num):
primes.append(num)
return primes
find_primes(2, 100) # Output: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97] |
<gh_stars>1-10
/* unparser.h
* This header file contains the class declaration for the newest unparser. Six
* C files include this header file: unparser.C, modified_sage.C, unparse_stmt.C,
* unparse_expr.C, unparse_type.C, and unparse_sym.C.
*/
#ifndef UNPARSER_FORMAT_H
#define UNPARSER_FORMAT_H
//#include "sage3.h"
//#include "roseInternal.h"
#include "unparser.h"
class SgScopeStatement;
class SgLocatedNode;
class Unparser;
#define KAI_NONSTD_IOSTREAM 1
// #include IOSTREAM_HEADER_FILE
#include <iostream>
// DQ (1/26/2009): a value of 1000 is too small for Fortran code (see test2009_09.f; from <NAME>)
// This value is now increased to 1,000,000. If this is too small then likely we want to
// know about it anyway!
// #define MAXCHARSONLINE 1000
#define MAXCHARSONLINE 1000000
#define MAXINDENT 60
// DQ: Try out a larger setting
#define TABINDENT 2
// #define TABINDENT 5
// Used in unparser.C in functions Unparser::count_digits(<type>)
// default was 10, but that is too small for float, double, and long double
// (use of 10 also generated a purify error for case of double)
// Size of buffer used to generation of strings from number values,
// it is larger than what we need to display because the values can
// be arbitrarily large (up to the size of MAX_DOUBLE)
// #define MAX_DIGITS 128
#define MAX_DIGITS 512
// Size of what we want to have be displayed in the unparsed (generated) code.
// No string representing a number should be larger then this value
// (if it were to be then it is regenerated in exponential notation).
#define MAX_DIGITS_DISPLAY 32
// DQ (3/16/2006): Added comments.
// These control how indentation and newlines are added in the
// pre and post processing of each statement.
typedef enum Format_Opt
{
FORMAT_BEFORE_DIRECTIVE,
FORMAT_AFTER_DIRECTIVE,
FORMAT_BEFORE_STMT,
FORMAT_AFTER_STMT,
FORMAT_BEFORE_BASIC_BLOCK1,
FORMAT_AFTER_BASIC_BLOCK1,
FORMAT_BEFORE_BASIC_BLOCK2,
FORMAT_AFTER_BASIC_BLOCK2,
FORMAT_BEFORE_NESTED_STATEMENT,
FORMAT_AFTER_NESTED_STATEMENT
} FormatOpt;
#include "unparseFormatHelp.h"
class UnparseFormat
{
int currentLine; //! stores current line number being unparsed
int currentIndent; //! indent of the current line
int chars_on_line; //! the number of characters printed on the line
int stmtIndent; //! the current indent for statement
int linewrap; //! the characters allowed perline before wraping the line
int indentstop; //! the number of spaces allowed for indenting
SgLocatedNode* prevnode; //! The previous SgLocatedNode unparsed
std::ostream* os; //! the directed output for the current file
UnparseFormatHelp *formatHelpInfo;
// void insert_newline(int i = 1, int indent = -1);
void insert_space(int);
//! make the output nicer
void removeTrailingZeros ( char* inputString );
bool formatHelp(SgLocatedNode*, SgUnparse_Info& info, FormatOpt opt = FORMAT_BEFORE_STMT);
public:
UnparseFormat& operator << (std::string out);
UnparseFormat& operator << (int num);
UnparseFormat& operator << (short num);
UnparseFormat& operator << (unsigned short num);
UnparseFormat& operator << (unsigned int num);
UnparseFormat& operator << (long num);
UnparseFormat& operator << (unsigned long num);
UnparseFormat& operator << (long long num);
UnparseFormat& operator << (unsigned long long num);
UnparseFormat& operator << (float num);
UnparseFormat& operator << (double num);
UnparseFormat& operator << (long double num);
// DQ (10/13/2006): Added to support debugging!
// UnparseFormat& operator << (void* pointerValue);
int current_line() const { return currentLine; }
int current_col() const { return chars_on_line; }
bool line_is_empty() const { return currentIndent == chars_on_line; }
// DQ (2/16/2004): Make this part of the public interface (to control old-style K&R C function definitions)
void insert_newline(int i = 1, int indent = -1);
// DQ (12/10/2014): Reset the chars_on_line to zero, used in token based unparsing to reset the
// formatting for AST subtrees unparsed using the AST in conjunction with the token based unparsing.
void reset_chars_on_line();
// private:
//Unparser* unp;
public:
// UnparserFormat(Unparser* unp,std::ostream* _os, UnparseFormatHelp *help = NULL):unp(unp){};
UnparseFormat( std::ostream* _os = NULL, UnparseFormatHelp *help = NULL );
~UnparseFormat();
// DQ (9/11/2011): Added copy constructor.
UnparseFormat(const UnparseFormat & X);
// DQ (9/11/2011): Added operator==() to fix issue detected in static analysis.
UnparseFormat & operator=(const UnparseFormat & X);
//! the ultimate formatting functions
void format(SgLocatedNode*, SgUnparse_Info& info, FormatOpt opt = FORMAT_BEFORE_STMT);
void flush() { os->flush(); }
void set_linewrap( int w);// { linewrap = w; } // no wrapping if linewrap <= 0
int get_linewrap() const;// { return linewrap; }
void set_indentstop( int s) { indentstop = s; }
int get_indentstop() const { return indentstop; }
// DQ (3/18/2006): Added to support presentation and debugging of formatting
std::string formatOptionToString(FormatOpt opt);
// DQ (6/6/2007): Debugging support for hidden list data held in scopes
void outputHiddenListData ( Unparser* unp,SgScopeStatement* inputScope );
// DQ (9/30/2013): We need access to the std::ostream* os so that we can support token output without interpretation of line endings.
std::ostream* output_stream () { return os; }
};
#endif
|
echo "SCRIPT_NAME: $SCRIPT_NAME"
echo "SHARED_HTPASSWD_PATH: $SHARED_HTPASSWD_PATH"
echo "APP_SCRIPT_PATH: $APP_SCRIPT_PATH"
echo "APP_START_SCRIPT_PATH: $APP_START_SCRIPT_PATH"
echo "No tests" |
<filename>tests/test_db_hybrid/test_where_not_equal.py
import pytest
import uvicore
import sqlalchemy as sa
from uvicore.support.dumper import dump
# DB Hybrid
@pytest.fixture(scope="module")
def Posts():
from app1.database.tables.posts import Posts
yield Posts
@pytest.fixture(scope="module")
def post(Posts):
yield Posts.table.c
@pytest.mark.asyncio
async def test_single(app1, Posts, post):
# Single NOT where
query = uvicore.db.query().table(Posts.table).where(post.creator_id, '!=', 2)
posts = await query.get()
#print(query.sql());dump(posts); dump(posts[0].keys())
assert [1, 2, 6, 7] == [x.id for x in posts]
@pytest.mark.asyncio
async def test_single_bexp(app1, Posts, post):
# Single NOT where - binary expression
posts = await uvicore.db.query().table(Posts.table).where(post.creator_id != 2).get()
assert [1, 2, 6, 7] == [x.id for x in posts]
@pytest.mark.asyncio
async def test_and(app1, Posts, post):
# Multiple where NOT AND
posts = await uvicore.db.query().table(Posts.table).where(post.creator_id, '!=', 2).where(post.owner_id, '!=', 2).get()
assert [6, 7] == [x.id for x in posts]
@pytest.mark.asyncio
async def test_and_bexp(app1, Posts, post):
# Multiple where NOT AND - binary expression
posts = await uvicore.db.query().table(Posts.table).where(post.creator_id != 2).where(post.owner_id != 2).get()
assert [6, 7] == [x.id for x in posts]
@pytest.mark.asyncio
async def test_and_list(app1, Posts, post):
# Multiple where NOT AND using a LIST
posts = await uvicore.db.query().table(Posts.table).where([
(post.creator_id, '!=', 2),
(post.owner_id, '!=', 2),
]).get()
assert [6, 7] == [x.id for x in posts]
@pytest.mark.asyncio
async def test_and_list_bexp(app1, Posts, post):
# Multiple where NOT AND using a LIST - binary expression
posts = await uvicore.db.query().table(Posts.table).where([
post.creator_id != 2,
post.owner_id != 2,
]).get()
assert [6, 7] == [x.id for x in posts]
@pytest.mark.asyncio
async def test_or(app1, Posts, post):
# Where NOT OR
posts = await uvicore.db.query().table(Posts.table).or_where([
(post.creator_id, '!=', 1),
(post.owner_id, '!=', 2)
]).get()
assert [3, 4, 5, 6, 7] == [x.id for x in posts]
@pytest.mark.asyncio
async def test_or_bexp(app1, Posts, post):
# Where NOT OR - binary expression
posts = await uvicore.db.query().table(Posts.table).or_where([
post.creator_id != 1,
post.owner_id != 2
]).get()
assert [3, 4, 5, 6, 7] == [x.id for x in posts]
@pytest.mark.asyncio
async def test_and_or(app1, Posts, post):
# Where NOT AND with where OR
posts = await uvicore.db.query().table(Posts.table).where(post.unique_slug, '!=', 'test-post5').or_where([
(post.creator_id, '!=', 1),
(post.owner_id, '!=', 2)
]).get()
assert [3, 4, 6, 7] == [x.id for x in posts]
@pytest.mark.asyncio
async def test_and_or_bexp(app1, Posts, post):
# Where NOT AND with where OR - binary expression
posts = await uvicore.db.query().table(Posts.table).where(post.unique_slug != 'test-post5').or_where([
post.creator_id != 1,
post.owner_id != 2
]).get()
assert [3, 4, 6, 7] == [x.id for x in posts]
|
<filename>core/migrations/0003_auto_20190821_1125.py
# Generated by Django 2.2.4 on 2019-08-21 09:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_remove_product_exploitation'),
]
operations = [
migrations.AlterField(
model_name='address',
name='province',
field=models.TextField(default=''),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utility to retrieve x-ray data from external libraries/databases
===================================================================
Available libraries/dbs and order of usage
------------------------------------------
1. `xraylib <https://github.com/tschoonj/xraylib>`_
2. ``Elements`` in `PyMca <https://github.com/vasole/pymca>`_
3. ``xraydb_plugin.py`` in `Larch <https://github.com/xraypy/xraylarch>`_
.. note:: `XrayDB <https://github.com/xraypy/XrayDB>`_ has its own package now
"""
import math
import numpy as np
try:
from PyMca5.PyMcaPhysics.xrf.Elements import Element
HAS_PYMCA = True
except ImportError:
HAS_PYMCA = False
ELEMENTS_DICT = None
pass
try:
import xraylib as xl
#xl.SetErrorMessages(0) #: disable showing error messages // DEPRECATED since xraylib 4.1.1
HAS_XRAYLIB = True
except ImportError:
HAS_XRAYLIB = False
pass
try:
import xraydb
HAS_XRAYDB = True
except ImportError:
HAS_XRAYDB = False
pass
from sloth.utils.bragg import findhkl
from sloth.math.lineshapes import lorentzian, fwhm2sigma
#: MODULE LOGGER
from .logging import getLogger
_LOGGER = getLogger("sloth.utils.xdata", level="INFO")
#: ERROR MESSAGES
def _larch_error(ret=None):
"""Log a missing larch error message and return given 'ret'"""
_LOGGER.error("Larch not found")
return ret
def _xraylib_error(ret=None):
"""Log a missing xraylib error message and return given 'ret'"""
_LOGGER.error("Xraylib not found")
return ret
def _pymca_error(ret=None):
"""Log a missing PyMca5 error message and return given 'ret'"""
_LOGGER.error("PyMca5 not found")
return ret
def _xraydb_error(ret=None):
"""Log a missing XrayDB error message and return given 'ret'"""
_LOGGER.error("XrayDB not found")
return ret
#######################
#: ELEMENTS AND LINES #
#######################
#: Taken from PyMca5/PyMcaPhysics/xrf/Elements.py
#
# Symbol Atomic Number x y ( positions on table )
# name, mass, density
#
ELEMENTS_INFO = (
("H", 1, 1, 1, "hydrogen", 1.00800, 0.08988),
("He", 2, 18, 1, "helium", 4.00300, 0.17860),
("Li", 3, 1, 2, "lithium", 6.94000, 534.000),
("Be", 4, 2, 2, "beryllium", 9.01200, 1848.00),
("B", 5, 13, 2, "boron", 10.8110, 2340.00),
("C", 6, 14, 2, "carbon", 12.0100, 1580.00),
("N", 7, 15, 2, "nitrogen", 14.0080, 1.25000),
("O", 8, 16, 2, "oxygen", 16.0000, 1.42900),
("F", 9, 17, 2, "fluorine", 19.0000, 1108.00),
("Ne", 10, 18, 2, "neon", 20.1830, 0.90020),
("Na", 11, 1, 3, "sodium", 22.9970, 970.000),
("Mg", 12, 2, 3, "magnesium", 24.3200, 1740.00),
("Al", 13, 13, 3, "aluminium", 26.9700, 2720.00),
("Si", 14, 14, 3, "silicon", 28.0860, 2330.00),
("P", 15, 15, 3, "phosphorus", 30.9750, 1820.00),
("S", 16, 16, 3, "sulphur", 32.0660, 2000.00),
("Cl", 17, 17, 3, "chlorine", 35.4570, 1560.00),
("Ar", 18, 18, 3, "argon", 39.9440, 1.78400),
("K", 19, 1, 4, "potassium", 39.1020, 862.000),
("Ca", 20, 2, 4, "calcium", 40.0800, 1550.00),
("Sc", 21, 3, 4, "scandium", 44.9600, 2992.00),
("Ti", 22, 4, 4, "titanium", 47.9000, 4540.00),
("V", 23, 5, 4, "vanadium", 50.9420, 6110.00),
("Cr", 24, 6, 4, "chromium", 51.9960, 7190.00),
("Mn", 25, 7, 4, "manganese", 54.9400, 7420.00),
("Fe", 26, 8, 4, "iron", 55.8500, 7860.00),
("Co", 27, 9, 4, "cobalt", 58.9330, 8900.00),
("Ni", 28, 10, 4, "nickel", 58.6900, 8900.00),
("Cu", 29, 11, 4, "copper", 63.5400, 8940.00),
("Zn", 30, 12, 4, "zinc", 65.3800, 7140.00),
("Ga", 31, 13, 4, "gallium", 69.7200, 5903.00),
("Ge", 32, 14, 4, "germanium", 72.5900, 5323.00),
("As", 33, 15, 4, "arsenic", 74.9200, 5730.00),
("Se", 34, 16, 4, "selenium", 78.9600, 4790.00),
("Br", 35, 17, 4, "bromine", 79.9200, 3120.00),
("Kr", 36, 18, 4, "krypton", 83.8000, 3.74000),
("Rb", 37, 1, 5, "rubidium", 85.4800, 1532.00),
("Sr", 38, 2, 5, "strontium", 87.6200, 2540.00),
("Y", 39, 3, 5, "yttrium", 88.9050, 4405.00),
("Zr", 40, 4, 5, "zirconium", 91.2200, 6530.00),
("Nb", 41, 5, 5, "niobium", 92.9060, 8570.00),
("Mo", 42, 6, 5, "molybdenum", 95.9500, 10220.0),
("Tc", 43, 7, 5, "technetium", 99.0000, 11500.0),
("Ru", 44, 8, 5, "ruthenium", 101.0700, 12410.0),
("Rh", 45, 9, 5, "rhodium", 102.9100, 12440.0),
("Pd", 46, 10, 5, "palladium", 106.400, 12160.0),
("Ag", 47, 11, 5, "silver", 107.880, 10500.0),
("Cd", 48, 12, 5, "cadmium", 112.410, 8650.00),
("In", 49, 13, 5, "indium", 114.820, 7280.00),
("Sn", 50, 14, 5, "tin", 118.690, 5310.00),
("Sb", 51, 15, 5, "antimony", 121.760, 6691.00),
("Te", 52, 16, 5, "tellurium", 127.600, 6240.00),
("I", 53, 17, 5, "iodine", 126.910, 4940.00),
("Xe", 54, 18, 5, "xenon", 131.300, 5.90000),
("Cs", 55, 1, 6, "caesium", 132.910, 1873.00),
("Ba", 56, 2, 6, "barium", 137.360, 3500.00),
("La", 57, 3, 6, "lanthanum", 138.920, 6150.00),
("Ce", 58, 4, 9, "cerium", 140.130, 6670.00),
("Pr", 59, 5, 9, "praseodymium", 140.920, 6769.00),
("Nd", 60, 6, 9, "neodymium", 144.270, 6960.00),
("Pm", 61, 7, 9, "promethium", 147.000, 6782.00),
("Sm", 62, 8, 9, "samarium", 150.350, 7536.00),
("Eu", 63, 9, 9, "europium", 152.000, 5259.00),
("Gd", 64, 10, 9, "gadolinium", 157.260, 7950.00),
("Tb", 65, 11, 9, "terbium", 158.930, 8272.00),
("Dy", 66, 12, 9, "dysprosium", 162.510, 8536.00),
("Ho", 67, 13, 9, "holmium", 164.940, 8803.00),
("Er", 68, 14, 9, "erbium", 167.270, 9051.00),
("Tm", 69, 15, 9, "thulium", 168.940, 9332.00),
("Yb", 70, 16, 9, "ytterbium", 173.040, 6977.00),
("Lu", 71, 17, 9, "lutetium", 174.990, 9842.00),
("Hf", 72, 4, 6, "hafnium", 178.500, 13300.0),
("Ta", 73, 5, 6, "tantalum", 180.950, 16600.0),
("W", 74, 6, 6, "tungsten", 183.920, 19300.0),
("Re", 75, 7, 6, "rhenium", 186.200, 21020.0),
("Os", 76, 8, 6, "osmium", 190.200, 22500.0),
("Ir", 77, 9, 6, "iridium", 192.200, 22420.0),
("Pt", 78, 10, 6, "platinum", 195.090, 21370.0),
("Au", 79, 11, 6, "gold", 197.200, 19370.0),
("Hg", 80, 12, 6, "mercury", 200.610, 13546.0),
("Tl", 81, 13, 6, "thallium", 204.390, 11860.0),
("Pb", 82, 14, 6, "lead", 207.210, 11340.0),
("Bi", 83, 15, 6, "bismuth", 209.000, 9800.00),
("Po", 84, 16, 6, "polonium", 209.000, 0),
("At", 85, 17, 6, "astatine", 210.000, 0),
("Rn", 86, 18, 6, "radon", 222.000, 9.73000),
("Fr", 87, 1, 7, "francium", 223.000, 0),
("Ra", 88, 2, 7, "radium", 226.000, 0),
("Ac", 89, 3, 7, "actinium", 227.000, 0),
("Th", 90, 4, 10, "thorium", 232.000, 11700.0),
("Pa", 91, 5, 10, "proactinium", 231.03588, 0),
("U", 92, 6, 10, "uranium", 238.070, 19050.0),
("Np", 93, 7, 10, "neptunium", 237.000, 0),
("Pu", 94, 8, 10, "plutonium", 239.100, 19700.0),
("Am", 95, 9, 10, "americium", 243, 0),
("Cm", 96, 10, 10, "curium", 247, 0),
("Bk", 97, 11, 10, "berkelium", 247, 0),
("Cf", 98, 12, 10, "californium", 251, 0),
("Es", 99, 13, 10, "einsteinium", 252, 0),
("Fm", 100, 14, 10, "fermium", 257, 0),
("Md", 101, 15, 10, "mendelevium", 258, 0),
("No", 102, 16, 10, "nobelium", 259, 0),
("Lr", 103, 17, 10, "lawrencium", 262, 0),
("Rf", 104, 4, 7, "rutherfordium", 261, 0),
("Db", 105, 5, 7, "dubnium", 262, 0),
("Sg", 106, 6, 7, "seaborgium", 266, 0),
("Bh", 107, 7, 7, "bohrium", 264, 0),
("Hs", 108, 8, 7, "hassium", 269, 0),
("Mt", 109, 9, 7, "meitnerium", 268, 0),
)
ELEMENTS = [elt[0] for elt in ELEMENTS_INFO]
ELEMENTS_N = [elt[1] for elt in ELEMENTS_INFO]
#: SHELLS / EDGES
#: K = 1 (s)
#: L = 2 (s, p)
#: M = 3 (s, p, d)
#: N = 4 (s, p, d, f)
#: O = 5 (s, p, d, f)
#: P = 6 (s, p)
SHELLS = (
"K", # 0
"L1",
"L2",
"L3", # 1, 2, 3
"M1",
"M2",
"M3",
"M4",
"M5", # 4, 5, 6, 7, 8
"N1",
"N2",
"N3",
"N4",
"N5",
"N6",
"N7", # 9, 10, 11, 12, 13, 14, 15
"O1",
"O2",
"O3",
"O4",
"O5", # 16, 17, 18, 19, 20
"P1",
"P2",
"P3",
) # 21, 22, 23
#: TRANSITIONS
#: 1 = s
#: 2, 3 = p (1/2, 3/2)
#: 4, 5 = d (3/2, 5/2)
#: 6, 7 = f (5/2, 7/2)
LEVELS_DICT = {
"K": "1s",
"L1": "2s",
"L2": "2p1/2",
"L3": "2p3/2",
"M1": "3s",
"M2": "3p1/2",
"M3": "3p3/2",
"M4": "3d3/2",
"M5": "3d5/2",
"N1": "4s",
"N2": "4p1/2",
"N3": "4p3/2",
"N4": "4d3/2",
"N5": "4d5/2",
"N6": "4f5/2",
"N7": "4f7/2",
"O1": "5s",
"O2": "5p1/2",
"O3": "5p3/2",
"P1": "6s",
"P2": "6p1/2",
"P3": "6p3/2",
}
#: dictionary of lines
#: Ln in Hepheastus is LE in Xraylib
#: Mz in Hepheastus not in Xraylib (the single transitions yes!)
LINES_DICT = {
"K": (
"KA1",
"KA2",
"KA3", # LINES[0, 1, 2]
"KB1",
"KB2",
"KB3",
"KB4",
"KB5",
), # LINES[3, 4, 5, 6, 7]
"L1": ("LB3", "LB4", "LG2", "LG3"), # LINES[8, 9, 10, 11]
"L2": ("LB1", "LG1", "LG6", "LE"), # LINES[12, 13, 14, 15]
#: LINES[16, 17, 18, 19, 20, 21]
"L3": ("LA1", "LA2", "LB2", "LB5", "LB6", "LL"),
"M3": ("MG",), # LINES[22]
"M4": ("MB",), # LINES[23]
"M5": ("MA1", "MA2"),
} # LINES[24, 25]
LINES_K = LINES_DICT["K"]
LINES_L = LINES_DICT["L1"] + LINES_DICT["L2"] + LINES_DICT["L3"]
LINES_M = LINES_DICT["M3"] + LINES_DICT["M4"] + LINES_DICT["M5"]
LINES = LINES_K + LINES_L + LINES_M
#
TRANS_DICT = {
"K": ("KL3", "KL2", "KL1", "KM3", "KN3", "KM2", "KN5", "KM5"),
"L1": ("L1M3", "L1M2", "L1N2", "L1N3"),
"L2": ("L2M4", "L2N4", "L2O4", "L2M1"),
"L3": ("L3M5", "L3M4", "L3N5", "L304", "L3N1", "L3M1"),
"M3": ("M3N5",),
"M4": ("M4N6",),
"M5": ("M5N7", "M5N6"),
}
TRANS_K = TRANS_DICT["K"]
TRANS_L = TRANS_DICT["L1"] + TRANS_DICT["L2"] + TRANS_DICT["L3"]
TRANS_M = TRANS_DICT["M3"] + TRANS_DICT["M4"] + TRANS_DICT["M5"]
TRANSITIONS = TRANS_K + TRANS_L + TRANS_M
#: INDEX DICTIONARY: KEYS=LINES : VALUES=(LINES[IDX],\
# SHELLS[IDX_XAS], SHELLS[IDX_XES])
LINES2TRANS = {
"KA1": (0, 0, 3),
"KA2": (1, 0, 2),
"KA3": (2, 0, 1),
"KB1": (3, 0, 6),
"KB2": (4, 0, 11),
"KB3": (5, 0, 5),
"KB4": (6, 0, 13),
"KB5": (7, 0, 8),
"LB3": (8, 1, 6),
"LB4": (9, 1, 5),
"LG2": (10, 1, 10),
"LG3": (11, 1, 11),
"LB1": (12, 2, 7),
"LG1": (13, 2, 12),
"LG6": (14, 2, 19),
"LE": (15, 2, 4),
"LA1": (16, 3, 8),
"LA2": (17, 3, 7),
"LB2": (18, 3, 13),
"LB5": (19, 3, 19), # WARNING: here is only O4
"LB6": (20, 3, 9),
"LL": (21, 3, 4),
"MG": (22, 6, 13),
"MB": (23, 7, 14),
"MA1": (24, 8, 15),
"MA2": (25, 8, 14),
}
def mapLine2Trans(line):
"""returns a tuple of strings mapping the transitions for a given line"""
try:
idx = LINES2TRANS[line]
return (LINES[idx[0]], TRANSITIONS[idx[0]], SHELLS[idx[1]], SHELLS[idx[2]])
except KeyError:
_LOGGER.error("Line {0} not known; returning 0".format(line))
return 0
############################
#: XRAYLIB-BASED FUNCTIONS #
############################
def get_element(elem):
"""get a tuple of information for a given element"""
_errstr = f"Element {elem} not known!"
if (isinstance(elem, str) and (elem in ELEMENTS)):
return [elt for elt in ELEMENTS_INFO if elt[0] == elem][0]
if (isinstance(elem, int) and (elem in ELEMENTS_N)):
return [elt for elt in ELEMENTS_INFO if elt[1] == elem]
_LOGGER.error(_errstr)
raise NameError(_errstr)
def get_line(line):
"""Check the line name is a valid name and return it"""
if line not in LINES:
_errstr = f"Line {line} is not a valid name in Siegbahn notation"
_LOGGER.error(_errstr)
raise NameError(_errstr)
return line
def find_edge(emin, emax, shells=None):
"""Get the edge energy in a given energy range [emin, emax] (eV)
Parameters
----------
emin, emax : floats
energy range to search for an absorption edege (eV)
shells : list of str (optional)
list of shells to search for [None -> use SHELLS (=all)]
"""
if HAS_XRAYLIB is False:
_xraylib_error(0)
if shells is None:
shells = SHELLS
for el in ELEMENTS:
eln = get_element(el)
for sh in shells:
edge = (xl.EdgeEnergy(eln[1], getattr(xl, sh + "_SHELL")) * 1000)
if (edge >= emin) and (edge <= emax):
_LOGGER.info("{0} \t {1} \t {2:>.2f} eV".format(el, sh, edge))
def find_line(emin, emax, elements=None, lines=None, outDict=False, backend="xraylib", skip_zero_width=True, thetamin=65):
"""Get the emission line energy in a given energy range [emin,emax] (eV)
Parameters
----------
emin, emax : float
[minimum, maximum] energy range (eV)
elements : list of str (optional)
list of elements, [None -> ELEMENTS (all)]
lines : list of str (optional)
list of lines, [None -> LINES (all)]
outDict : boolean, False
returns a dictionary instead of printing to screen
skip_zero_width : boolean, True
True: if fluo_width == 0, not include in the results
Returns
-------
None or outDict
if outDict:
_out['el']: element symbol, list of strs
_out['eln]: element number, list of ints
_out['ln']: line, list of strs
_out['en']: energy eV, list of floats
_out['w'] : width eV, list of floats
else:
prints to screen the results
"""
if backend == "pymca" and (not HAS_PYMCA):
_pymca_error()
backend = "xraylib"
_LOGGER.warning("Changed backend to %s", backend)
if backend == "xraylib" and (not HAS_XRAYLIB):
_xraylib_error()
_LOGGER.error("No backend available!")
return None
if lines is None:
lines = LINES
if elements is None:
elements = ELEMENTS
_out = {}
_out["el"] = []
_out["eln"] = []
_out["ln"] = []
_out["en"] = []
_out["w"] = []
_out["crys_lab"] = []
_out["crys0_lab"] = []
_out["crys_deg"] = []
for el in elements:
eln = get_element(el)
for ln in lines:
try:
if backend == "pymca":
line = Element[eln[0]][mapLine2Trans(ln)[1]]["energy"] * 1000
else:
line = xl.LineEnergy(eln[1], getattr(xl, ln + "_LINE")) * 1000
except Exception:
_LOGGER.debug("{0}.{1} none".format(el, ln))
continue
if (line >= emin) and (line <= emax):
w = fluo_width(elem=el, line=ln, showInfos=False)
if w == 0:
_LOGGER.warning(f"{el}.{ln} zero width")
if skip_zero_width:
_LOGGER.info(f"{el}.{ln} skipped")
continue
_out["el"].append(eln[0])
_out["eln"].append(eln[1])
_out["ln"].append(ln)
_out["en"].append(line)
_out["w"].append(w)
try:
hkl_out = findhkl(line, thetamin=thetamin, verbose=False, retBest=True)
except Exception:
_LOGGER.warning(f"No Si/Ge crystal analyzer found for {eln[0]} {ln}")
hkl_out = [None, None, None, None, None, None, None]
_out["crys_lab"].append(hkl_out[5])
_out["crys0_lab"].append(hkl_out[6])
_out["crys_deg"].append(hkl_out[4])
#: returns
if outDict:
return _out
else:
for eln, el, ln, line, w in zip(
_out["eln"], _out["el"], _out["ln"], _out["en"], _out["w"]
):
_LOGGER.info(f"{eln} {el} {ln} {line:>.3f} {w:>.2f}")
def ene_res(emin, emax, shells=["K"]):
""" used in spectro.py """
if HAS_XRAYLIB is False:
_xraylib_error(0)
s = {}
s["el"] = []
s["en"] = []
s["edge"] = []
s["ch"] = []
s["dee"] = []
for el in ELEMENTS:
eln = get_element(el)
for sh in shells:
edge = (xl.EdgeEnergy(eln[1], getattr(xl, sh + "_SHELL")) * 1000)
ch = (xl.AtomicLevelWidth(eln[1], getattr(xl, sh + "_SHELL")) * 1000)
if (edge >= emin) and (edge <= emax):
s["el"].append(el)
s["en"].append(xl.SymbolToAtomicNumber(el))
s["edge"].append(edge)
s["ch"].append(ch)
s["dee"].append(ch / edge)
return s
def fluo_width(elem=None, line=None, herfd=False, showInfos=True):
"""Get the fluorescence line width in eV
Parameters
----------
elem : string or int
absorbing element
line : string
Siegbahn notation for emission line
Returns
-------
herfd=False (default): lw_xas + lw_xes
herfd=True: 1/(math.sqrt(lw_xas**2 + lw_xes**2))
"""
if HAS_XRAYLIB is False:
_xraylib_error(0)
if (elem is None) or (line is None):
_LOGGER.error("element or edge not given, returning 0")
return 0
elm = get_element(elem)
ln = mapLine2Trans(line)
try:
lw_xas = xl.AtomicLevelWidth(elm[1], getattr(xl, ln[2] + "_SHELL")) * 1000
lw_xes = xl.AtomicLevelWidth(elm[1], getattr(xl, ln[3] + "_SHELL")) * 1000
lw_herfd = 1.0 / (math.sqrt(lw_xas ** 2 + lw_xes ** 2))
if showInfos:
ln_ev = xl.LineEnergy(elm[1], getattr(xl, line + "_LINE")) * 1000
_LOGGER.info(f"{elm[0]} {line} (={ln[1]}): {ln_ev:.2f} eV")
_LOGGER.info(
f"Atomic levels widths: XAS={lw_xas:.2f} eV, XES={lw_xes:.2f} eV"
)
_LOGGER.info(f"... -> STD={lw_xas+lw_xes:.2f} eV, HERFD={lw_herfd:.2f} eV]")
if herfd is True:
return lw_herfd
else:
return lw_xas + lw_xes
except Exception:
return 0
def fluo_amplitude(elem, line, excitation=None, barn_unit=False):
"""Get the fluorescence cross section for a given element/line
Parameters
----------
elem : string or number
element
line : string
emission line Siegban (e.g. 'LA1') or IUPAC (e.g. 'L3M5')
excitation : float (optional)
excitation energy in eV [None -> 10 keV]
barn_unit : boolean
use units of barn/atom [None -> cm2/g]
Returns
-------
fluo_amp (in 'cm2/g' or 'barn/atom' if barn_unit is True)
"""
if excitation is None:
_LOGGER.warning("Excitation energy not given, using 10 keV")
excitation = 10.0
#: guess if eV or keV
elif excitation >= 200.0:
excitation /= 1000
_LOGGER.info(f"Excitation energy is {excitation} keV")
el_n = get_element(elem)[1]
if barn_unit:
CSfluo = xl.CSb_FluorLine_Kissel_Cascade
else:
CSfluo = xl.CS_FluorLine_Kissel_Cascade
try:
fluo_amp = CSfluo(el_n, getattr(xl, line + "_LINE"), excitation)
except Exception:
_LOGGER.warning("Line not known")
fluo_amp = 0
return fluo_amp
def xray_line(element, line=None, initial_level=None):
"""Get the emission energy in eV for a given element/line or level
Parameters
----------
element : string or int
absorbing element
line: string (optional)
Siegbahn notation, e.g. 'KA1' [None -> LINES (all)]
initial_level: string
initial core level, e.g. 'K' [None]
Returns
-------
dictionary {'line': [], 'ene': []} or a number
"""
if HAS_XRAYLIB is False:
_xraylib_error(0)
el_n = get_element(element)[1]
outdict = {"line": [], "ene": []}
_retNum = False
if (line is None) and (initial_level is not None):
try:
lines = [line for line in LINES if initial_level in line]
except Exception:
_LOGGER.error("initial_level is wrong")
else:
lines = [line]
_retNum = True
for _line in lines:
try:
line_ene = xl.LineEnergy(el_n, getattr(xl, _line + "_LINE")) * 1000
outdict["line"].append(_line)
outdict["ene"].append(line_ene)
except Exception:
_LOGGER.error("line is wrong")
continue
if _retNum:
return outdict["ene"][0]
else:
return outdict
def xray_edge(element, initial_level=None):
"""Get the energy edge in eV for a given element
:param element: string or number
:param initial_level: string, initial core level, e.g. 'K' or list [None]
:returns: dictionary {'edge' : [], 'ene' : []} or a number
"""
if HAS_XRAYLIB is False:
_xraylib_error(0)
el_n = get_element(element)[1]
outdict = {"edge": [], "ene": []}
_retNum = False
if initial_level is None:
initial_level = SHELLS
if type(initial_level) == str:
initial_level = [initial_level]
_retNum = True
else:
_LOGGER.error("initial_level is wrong")
for _level in initial_level:
try:
edge_ene = xl.EdgeEnergy(el_n, getattr(xl, _level + "_SHELL")) * 1000
outdict["edge"].append(_level)
outdict["ene"].append(edge_ene)
except Exception:
_LOGGER.warning(
"{0} {1} edge unknown".format(get_element(element)[0], _level)
)
continue
if _retNum:
return outdict["ene"][0]
else:
return outdict
def fluo_spectrum(elem, line, xwidth=3, xstep=0.05, plot=False, showInfos=True, **kws):
"""Generate a fluorescence spectrum for a given element/line
.. note:: it generates a Lorentzian function with the following parameters:
- center: emission energy (eV)
- sigma: from FWHM of sum of atomic levels widths (XAS+XES)
- amplitude: CS_FuorLine_Kissel_Cascade
- xmin, xmax: center -+ xwidth*fwhm
Parameters
----------
elem : string or int
absorbing element
line : string
emission line Siegban (e.g. 'LA1') or IUPAC (e.g. 'L3M5')
xwidth : int or float (optional)
FWHM multiplication factor to establish xmin, xmax range
(= center -+ xwidth*fwhm) [3]
xstep : float (optional)
energy step in eV [0.05]
showInfos : boolean (optional)
print the `info` dict [True]
plot : boolean (optional)
plot the line before returning it [False]
**kws : keyword arguments for :func:`fluo_width`, :func:`fluo_amplitude`
Returns
-------
xfluo, yfluo, info : XY arrays of floats, dictionary
"""
el = get_element(elem)
exc = kws.get("excitation", 10000.0)
bu = kws.get("barn_unit", False)
if bu is True:
yunit = "barn/atom"
else:
yunit = "cm2/g"
fwhm = fluo_width(elem, line, showInfos=showInfos)
amp = fluo_amplitude(el[1], line, excitation=exc, barn_unit=bu)
cen = xl.LineEnergy(el[1], getattr(xl, line + "_LINE")) * 1000
if (fwhm == 0) or (amp == 0) or (cen == 0):
raise NameError("no line found")
sig = fwhm2sigma(fwhm)
xmin = cen - xwidth * fwhm
xmax = cen + xwidth * fwhm
xfluo = np.arange(xmin, xmax, xstep)
yfluo = lorentzian(xfluo, amplitude=amp, center=cen, sigma=sig)
info = {
"el": el[0],
"eln": el[1],
"ln": line,
"exc": exc,
"cen": cen,
"fwhm": fwhm,
"amp": amp,
"yunit": yunit,
}
legend = "{eln} {ln}".format(**info)
if showInfos:
_LOGGER.info(
"Lorentzian => cen: {cen:.3f} eV, amp: {amp:.3f} {yunit}, fwhm: {fwhm:.3f} eV".format(
**info
)
)
if plot:
from sloth.gui.plot.plot1D import Plot1D
p1 = Plot1D()
p1.addCurve(
xfluo,
yfluo,
legend=legend,
replace=True,
xlabel="energy (eV)",
ylabel="intensity ({0})".format(yunit),
)
p1.show()
input("PRESS ENTER to close the plot window and return")
return xfluo, yfluo, info
def fluo_lines(elem, lines, retAll=False, **fluokws):
"""Generate a cumulative emission spectrum of a given element and
list of lines
Parameters
----------
elem : string or int
lines : list of str
emission lines as Siegban (e.g. 'LA1') or IUPAC (e.g. 'L3M5')
**fluokws : keyword arguments for :func:`fluo_spectrum`
Returns
-------
xcom, ycom : arrays of floats
energy/intensity of the whole spectrum
if retAll:
xcom, ycom, [xi, yi, ii]
"""
plot = fluokws.get("plot", False)
xstep = fluokws.get("xstep", 0.05)
fluokws.update({"plot": False})
xi, yi, ii = [], [], []
for ln in lines:
try:
x, y, i = fluo_spectrum(elem, ln, **fluokws)
xi.append(x)
yi.append(y)
ii.append(i)
except Exception:
_LOGGER.info("no line found for {0}-{1}".format(elem, ln))
xmin = min([x.min() for x in xi])
xmax = max([x.max() for x in xi])
xcom = np.arange(xmin, xmax, xstep)
ycom = np.zeros_like(xcom)
for x, y in zip(xi, yi):
yinterp = np.interp(xcom, x, y)
ycom += yinterp
if plot:
from sloth.gui.plot.plot1D import Plot1D
p = Plot1D()
p.addCurve(
xcom,
ycom,
legend="sum",
color="black",
replace=True,
xlabel="energy (eV)",
ylabel="intensity",
)
for x, y, i in zip(xi, yi, ii):
p.addCurve(x, y, legend=i["ln"], replace=False)
p.show()
if retAll:
return xcom, ycom, [xi, yi, ii]
else:
return xcom, ycom
##########################
#: LARCH-BASED FUNCTIONS #
##########################
#: xdb.function()
# -----------------------------------------------------------
#: function : description
# --------------------:--------------------------------------
#: atomic_number() : atomic number from symbol
#: atomic_symbol() : atomic symbol from number
#: atomic_mass() : atomic mass
#: atomic_density() : atomic density (for pure element)
#: xray_edge() : xray edge data for a particular element and edge
#: xray_line() : xray emission line data for an element and line
#: xray_edges() : dictionary of all X-ray edges data for an element
#: xray_lines() : dictionary of all X-ray emission line data for an element
#: fluo_yield() : fluorescence yield and weighted line energy
#: core_width() : core level width for an element and edge
#: (Keski-Rahkonen and Krause)
#: mu_elam() : absorption cross-section
#: coherent_xsec() : coherent cross-section
#: incoherent_xsec() : incoherent cross-section
#: f0() : elastic scattering factor (Waasmaier and Kirfel)
#: f0_ions() : list of valid “ions” for f0() (Waasmaier and Kirfel)
#: chantler_energies() : energies of tabulation for Chantler data (Chantler)
#: f1_chantler() : f’ anomalous factor (Chantler)
#: f2_chantler() : f” anomalous factor (Chantler)
#: mu_chantler() : absorption cross-section (Chantler)
#: xray_delta_beta() : anomalous components of the index of refraction for a material
#: f1f2_cl() : f’ and f” anomalous factors (Cromer and Liberman)
#: Table of X-ray Edge / Core electronic levels
# +-----+-----------------+-----+-----------------+-----+-----------------+
# |Name |electronic level |Name |electronic level |Name |electronic level |
# +=====+=================+=====+=================+=====+=================+
# | K | 1s | N7 | 4f7/2 | O3 | 5p3/2 |
# +-----+-----------------+-----+-----------------+-----+-----------------+
# | L3 | 2p3/2 | N6 | 4f5/2 | O2 | 5p1/2 |
# +-----+-----------------+-----+-----------------+-----+-----------------+
# | L2 | 2p1/2 | N5 | 4d5/2 | O1 | 5s |
# +-----+-----------------+-----+-----------------+-----+-----------------+
# | L1 | 2s | N4 | 4d3/2 | P3 | 6p3/2 |
# +-----+-----------------+-----+-----------------+-----+-----------------+
# | M5 | 3d5/2 | N3 | 4p3/2 | P2 | 6p1/2 |
# +-----+-----------------+-----+-----------------+-----+-----------------+
# | M4 | 3d3/2 | N2 | 4p1/2 | P1 | 6s |
# +-----+-----------------+-----+-----------------+-----+-----------------+
# | M3 | 3p3/2 | N1 | 4s | | |
# +-----+-----------------+-----+-----------------+-----+-----------------+
# | M2 | 3p1/2 | | | | |
# +-----+-----------------+-----+-----------------+-----+-----------------+
# | M1 | 3s | | | | |
# +-----+-----------------+-----+-----------------+-----+-----------------+
#: Table of X-ray emission line names and the corresponding Siegbahn and IUPAC notations
# +--------+---------------------+--------+-----+----------+----------+
# | Name | Siegbahn | IUPAC | Name| Siegbahn | IUPAC |
# +========+=====================+========+=====+==========+==========+
# | Ka1 | K\alpha_1 | K-L3 | Lb4 | L\beta_4 | L1-M2 |
# +--------+---------------------+--------+-----+----------+----------+
# | Ka2 | K\alpha_2 | K-L2 | Lb5 | L\beta_5 | L3-O4,5 |
# +--------+---------------------+--------+-----+----------+----------+
# | Ka3 | K\alpha_3 | K-L1 | Lb6 | L\beta_6 | L3-N1 |
# +--------+---------------------+--------+-----+----------+----------+
# | Kb1 | K\beta_1 | K-M3 | Lg1 | L\gamma_1| L2-N4 |
# +--------+---------------------+--------+-----+----------+----------+
# | Kb2 | K\beta_2 | K-N2,3 | Lg2 | L\gamma_2| L1-N2 |
# +--------+---------------------+--------+-----+----------+----------+
# | Kb3 | K\beta_3 | K-M2 | Lg3 | L\gamma_3| L1-N3 |
# +--------+---------------------+--------+-----+----------+----------+
# | Kb4 | K\beta_2 | K-N4,5 | Lg6 | L\gamma_6| L2-O4 |
# +--------+---------------------+--------+-----+----------+----------+
# | Kb5 | K\beta_3 | K-M4,5 | Ll | Ll | L3-M1 |
# +--------+---------------------+--------+-----+----------+----------+
# | La1 | L\alpha_1 | L3-M5 | Ln | L\nu | L2-M1 |
# +--------+---------------------+--------+-----+----------+----------+
# | La2 | L\alpha_1 | L3-M4 | Ma | M\alpha | M5-N6,7 |
# +--------+---------------------+--------+-----+----------+----------+
# | Lb1 | L\beta_1 | L2-M4 | Mb | M\beta | M4-N6 |
# +--------+---------------------+--------+-----+----------+----------+
# | Lb2,15 | L\beta_2,L\beta_{15}| L3-N4,5| Mg | M\gamma | M3-N5 |
# +--------+---------------------+--------+-----+----------+----------+
# | Lb3 | L\beta_3 | L1-M3 | Mz | M\zeta | M4,5-N6,7|
# +--------+---------------------+--------+-----+----------+----------+
if __name__ == "__main__":
# see tests/examples in xdata_tests.py
pass
|
<gh_stars>1-10
import os
import copy
import numpy
import torch
import torchaudio
import torch.nn.functional as F
import pytorch_lightning as pl
import wandb
from torch import nn
from .espnet_encoder import ESPnetEncoder
from .networks import Encoder, Generator, Discriminator
from .augment import augment, AdaptiveAugment
from .util import *
# Hyperparameters
ENC_LR = 1e-5
ADAM_BETA = (0.0, 0.99)
CHANNEL_MULTIPLIER = 2
class Speech2ImageSC(pl.LightningModule):
def __init__(self, img_size=256, latent=512, n_mlp=8, pretrained=None, audio_davenet=None, image_davenet=None):
super().__init__()
self.latent_size = latent
self.audio_model = audio_davenet
self.image_model = image_davenet
self.mel = torchaudio.transforms.MelSpectrogram(n_mels=40)
self._init_networks(img_size, self.latent_size, n_mlp, pretrained)
def _init_networks(self, img_size, latent, n_mlp, pretrained=None):
self.enc = ESPnetEncoder.from_pretrained()
self.enc.train()
self.audio_model.eval()
self.image_model.eval()
self.G = Generator(img_size, latent, n_mlp, channel_multiplier=CHANNEL_MULTIPLIER)
self.D = Discriminator(img_size, channel_multiplier=CHANNEL_MULTIPLIER)
self.G_EMA = copy.deepcopy(self.G).eval()
def del_networks(self):
del self.G
del self.D
def forward(self, x=None, nframes=None):
x = self.enc(x)
z = x.mean(dim=1).view(1, x.shape[0], -1)
z = torch.cat([z, torch.randn(1, x.shape[0], z.shape[-1], device=self.device)], dim=0).unbind(0)
return self.G_EMA(z, randomize_noise=False)[0]
def training_step(self, batch, batch_idx):
images, audio, nframes, apath = batch
spec = self.mel(audio)
# Generate image
x = self.enc(audio)
z = x.mean(dim=1).view(1, audio.shape[0], -1)
z = torch.cat([z, torch.randn(1, audio.shape[0], z.shape[-1], device=self.device)], dim=0).unbind(0)
fake_imgs, _ = self.G_EMA(z, randomize_noise=False)
l = sum([torch.abs(self._score(spec[i, :int(nframes[i] * 0.01), :], img) - self._score(spec[i, :nframes[i], :], images[i])) for i, img in enumerate(fake_imgs)])
self.log("L", l, on_step=True, on_epoch=True, prog_bar=True)
return l
def _get_imagefeatures(self, img):
image_feature_map = self.image_model(img.unsqueeze(0)).squeeze(0)
emb_dim = image_feature_map.size(0)
output_H = image_feature_map.size(1)
output_W = image_feature_map.size(2)
return image_feature_map.view(emb_dim, output_H * output_W), (emb_dim, output_H, output_W)
def _get_audiofeatures(self, melspec):
audio_output = self.audio_model(melspec.unsqueeze(0).unsqueeze(0)).squeeze(0)
return audio_output
def _score(self, melspec, img):
image_output, image_dim = self._get_imagefeatures(img)
audio_output = self._get_audiofeatures(melspec)
_, img_outputH, img_outputW = image_dim
heatmap = torch.mm(audio_output.t(), image_output).squeeze()
heatmap = heatmap.view(audio_output.size(1), img_outputH, img_outputW)
N_t = audio_output.size(1)
N_r = img_outputH
N_c = img_outputW
return torch.sum(heatmap)/(N_t * N_r * N_c)
def configure_optimizers(self):
self.enc_optim = optim.Adam(self.enc.parameters(), lr=ENC_LR, betas=ADAM_BETA)
return self.enc_optim
def validation_step(self, batch, batch_idx):
images, audio, nframes, apath = batch
fake_imgs = self.forward(audio, nframes).cpu()
return {"G_IMGs": fake_imgs, "I_AUDs": apath, "R_IMGs": images.cpu()}
def validation_epoch_end(self, outputs):
if not len(outputs):
return
f_imgs = []
r_imgs = []
i_auds = []
for output in outputs:
f_imgs += [wandb.Image(x, caption="G_IMG %d" % i) for i, x in enumerate(output["G_IMGs"])]
r_imgs += [wandb.Image(x, caption="R_IMG %d" % i) for i, x in enumerate(output["R_IMGs"])]
i_auds += [wandb.Audio(x, caption="I_AUD %d" % i) for i, x in enumerate(output["I_AUDs"])]
self.logger.experiment.log({"G_IMG Val": f_imgs}, commit=False)
self.logger.experiment.log({"R_IMG Val": r_imgs}, commit=False)
self.logger.experiment.log({"I_AUD Val": i_auds}, commit=False)
def test_step(self, batch, batch_idx):
images, (audio, _), nframes, apath = batch
fake_imgs = self.forward(audio, nframes).cpu()
return {"G_IMGs": fake_imgs, "I_AUDs": apath, "R_IMGs": images.cpu()}
def test_epoch_end(self, outputs):
return outputs
|
<gh_stars>0
import React from 'react';
import './index.css';
function ContactLinks() {
return (
<div id="ContactLinks__container">
<h3>Let's Get In Touch</h3>
<hr />
<div>
<h5>LinkedIn</h5>
<a>linkedin.com/in/ashish-shevale</a>
</div>
<div>
<h5>GitHub</h5>
<a>github.com/AshishS-1123</a>
</div>
<div>
<h5>Email</h5>
<a><EMAIL></a>
</div>
<div>
<h5>Phone</h5>
<a>+91-9022160734</a>
</div>
</div>
)
}
ContactLinks.propTypes = {};
export default ContactLinks;
|
import torch
from torch.utils.data import Dataset
import pandas as pd
from PIL import Image
class FollowSuit(Dataset):
def __init__(self, csv_file):
"""
Args:
csv_file (string): Path to the csv file with image indexes and class label annotations.
"""
self.data = pd.read_csv(csv_file) # Read the CSV file into a pandas DataFrame
# Additional preprocessing if needed, such as transforming labels to numerical format
def __len__(self):
return len(self.data) # Return the total number of samples in the dataset
def __getitem__(self, index):
img_index = self.data.iloc[index, 0] # Get the image index at the specified index
img_path = f"images/{img_index}.jpg" # Assuming images are stored in a directory named 'images'
image = Image.open(img_path) # Load the image using PIL
# Additional preprocessing steps for image data, such as normalization or resizing
label = self.data.iloc[index, 1] # Get the class label at the specified index
# Additional processing if needed, such as converting label to tensor format
return image, label # Return the image data and its corresponding class label |
import UIKit
class ViewController: UIViewController {
let books = [
Book(title: "The Catcher in the Rye", author: "JD Salinger"),
Book(title: "To Kill a Mockingbird", author: "Harper Lee"),
Book(title: "The Great Gatsby", author: "F. Scott Fitzgerald")
]
@IBOutlet weak var tableView: UITableView!
override func viewDidLoad() {
super.viewDidLoad()
tableView.delegate = self
tableView.dataSource = self
}
}
// MARK: - Table view delegate
extension ViewController: UITableViewDelegate {
func tableView(_ tableView: UITableView, didSelectRowAt indexPath: IndexPath) {
let book = books[indexPath.row]
let storyboard = UIStoryboard(name: "Main", bundle: nil)
let bookVC = storyboard.instantiateViewController(withIdentifier: "BookVC") as! BookViewController
bookVC.book = book
navigationController?.pushViewController(bookVC, animated: true)
}
}
// MARK: - Table view data source
extension ViewController: UITableViewDataSource {
func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int {
return books.count
}
func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell {
let book = books[indexPath.row]
let cell = tableView.dequeueReusableCell(withIdentifier: "BookCell", for: indexPath)
cell.textLabel?.text = book.title
cell.detailTextLabel?.text = book.author
return cell
}
} |
<gh_stars>0
const isValid = (s: string): boolean => {
const stack = []
for (const str of s) {
switch (str) {
case '(':
case '[':
case '{':
stack.push(str)
break
case ')':
if (stack.pop() !== '(') {
return false
}
break
case ']':
if (stack.pop() !== '[') {
return false
}
break
case '}':
if (stack.pop() !== '{') {
return false
}
break
default:
break
}
}
return !stack.length
}
console.log(isValid('()'))
console.log(isValid('(]){}'))
console.log(isValid('(){}[]'))
console.log(isValid('{()}'))
|
package adapter
import (
"math"
"time"
"github.com/ajityagaty/go-kairosdb/builder"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/prompb"
"github.com/sirupsen/logrus"
)
// BuildKairosDBMetrics takes in prometheus samples and returns a KairosDB MetricBuilder
func BuildKairosDBMetrics(samples model.Samples) builder.MetricBuilder {
logger := log.WithField("function", "BuildKairosDBMetric")
logger.Debug("Building Metrics!")
mb := builder.NewMetricBuilder()
for _, s := range samples {
v := float64(s.Value)
if math.IsNaN(v) || math.IsInf(v, 0) {
logger.WithFields(logrus.Fields{"value": v, "sample": s}).Debug("cannot send to KairosDB, skipping sample")
ignoredSamples.Inc()
continue
}
metric := mb.AddMetric(string(s.Metric[model.MetricNameLabel]))
// KairosDB timestamps are in milliseconds
metric.AddDataPoint(s.Timestamp.UnixNano()/int64(time.Millisecond), v)
tags := tagsFromMetric(s.Metric)
for name, value := range tags {
// KairosDB does not like tags with empty values
if len(value) != 0 {
metric.AddTag(name, value)
}
}
}
return mb
}
// protoToSamples turns the prometheus protobuf values into Sample objects
func protoToSamples(req *prompb.WriteRequest) model.Samples {
var samples model.Samples
for _, ts := range req.Timeseries {
metric := make(model.Metric, len(ts.Labels))
for _, l := range ts.Labels {
metric[model.LabelName(l.Name)] = model.LabelValue(l.Value)
}
for _, s := range ts.Samples {
samples = append(samples, &model.Sample{
Metric: metric,
Value: model.SampleValue(s.Value),
Timestamp: model.Time(s.Timestamp),
})
}
}
return samples
}
// tagsFromMetric extracts KairosDB tags from a Prometheus metric.
func tagsFromMetric(m model.Metric) map[string]string {
tags := make(map[string]string, len(m)-1)
for l, v := range m {
if l != model.MetricNameLabel {
tags[string(l)] = string(v)
}
}
return tags
}
|
var Q = require('q');
var _ = require('lodash');
var log = require('npmlog');
var chronoCustom = require('./chronoCustomPL');
function parse(context, options) {
var deferred = Q.defer();
options = _.isEmpty(options) ? {} : options;
var txt = context.command.text;
var parseResults = _.isDate(options.referenceDate) ? chronoCustom.parse(txt, options.referenceDate) : chronoCustom.parse(txt);
if (parseResults.length === 0) {
deferred.reject(new Error("Cannot parse input: '" + txt + "'"));
return deferred.promise;
}
var remains = txt.substr(0, parseResults[0].index) + txt.substr(parseResults[0].index + parseResults[0].text.length);
var location = null;
var locationIndex = remains.indexOf('@');
if (locationIndex > 0) {
location = remains.substr(locationIndex + 1).trim();
}
if (remains) {
remains = remains.trim();
}
var start = parseResults[0].start.date();
var end;
var parsedEnd = parseResults[0].end;
if (_.isEmpty(parsedEnd)) {
end = new Date(start);
end.setDate(start.getDate() + 1);
} else {
var parsedEndDate = parsedEnd.date();
end = new Date(parsedEndDate);
end.setDate(parsedEndDate.getDate() + 1);
}
var result = {
date: {
allDay: true,
start: start,
end: end
},
summary: remains,
location: location
};
log.info("Parsed event details %j", result);
deferred.resolve(result);
return deferred.promise;
}
module.exports = {
parse: parse
}; |
# Setup Python environment
source ~/.bash_profile
init_conda
conda activate dr17-binaries
export HQ_RUN_PATH=/mnt/ceph/users/apricewhelan/projects/apogee-dr17-binaries/vac-pipeline/hq-config
|
import React, {useState, useEffect} from 'react'
import {animated, useTransition} from 'react-spring'
import {Link, useStaticQuery, graphql} from 'gatsby'
import PropertyFilter from '../PropertyFilter'
import Section from '../Section'
import TextImageBox from '../TextImageBox'
import BottomBorderedContainer from '../BottomBorderedContainer'
import SpacedItemsContainer from '../SpacedItemsContainer'
import property from '../../img/property.png'
import ButtonBordered from '../ButtonBordered'
import styles from './filter-property-section.module.scss'
import TiltableContainer from '../TiltableContainer'
import PropertySorting from '../PropertySorting'
const FilterPropertySection = ({kaufenProperties, mietenProperties}) => {
const data = useStaticQuery(graphql`
query FilterPropertyQuery{
allPrismicProperty(sort: {order: DESC, fields: last_publication_date}){
edges{
node{
data {
besichtigung_information {
html
}
category
description {
html
}
images {
image {
alt
localFile {
childImageSharp {
fluid(maxWidth:1000){
...GatsbyImageSharpFluid
}
}
}
}
}
important_information {
information_name
information_value
}
ort
other_information {
information_name
information_value
}
preis
property_document {
url
}
property_document_card_text
property_geocode {
latitude
longitude
}
property_heading
type_of_property
wohnflache
zimmer
}
uid
}
}
}
}
`)
const [numOfLoadedItems, setNumOfLoadedItems] = useState(5)
const [scrollFromTop, setScrollFromTop] = useState(0)
const [filters, setFilters] = useState({})
const [sorting, setSorting] = useState({})
const [filteredData, setFilteredData] = useState([])
const [properties, setProperties] = useState([])
const transitions = useTransition(filteredData, {
from: { maxHeight: '0vh', overflow:'hidden', opacity:0 },
enter: { maxHeight: '250vh', overflow:'hidden', opacity:1 },
leave: { maxHeight: '0vh', overflow:'hidden', opacity:0 },
})
const setScrollPosition = () => {
setScrollFromTop(window.pageYOffset)
}
useEffect(() => {
let filteredArray = data.allPrismicProperty.edges.filter(({node:property}) => {
if(kaufenProperties){
if(property.data.type_of_property == false){
return true
}
}else if(mietenProperties){
if(property.data.type_of_property == true){
return true
}
}
})
setProperties([...filteredArray])
}, [])
useEffect(() => {
window.scrollTo(0, scrollFromTop)
}, [numOfLoadedItems])
useEffect(() => {
//filtering function
//filterung filter
//filter for Prismic lost data that cannot be removed
let filteredArray = properties.filter(({node:property}) => {
if(property.uid != "familienhaus"){
return true
}
})
filteredArray = filteredArray.filter(({node:property}) => {
if((filters.filterung && (filters.filterung.indexOf(property.data.category) != -1)) || (!filters.filterung || filters.filterung.length <= 0)){
return true
}
})
//zimmer filter
filteredArray = filteredArray.filter(({node:property}) => {
let numberOfRooms = Number(property.data.zimmer)
let zimmerMapping;
if(numberOfRooms < 2){
zimmerMapping = 'bis zu Zimmer'
}else if(numberOfRooms >= 2 && numberOfRooms < 3){
zimmerMapping = '2-3 Zimmer'
}else if(numberOfRooms >= 3 && numberOfRooms < 4){
zimmerMapping = '3-4 Zimmer'
}else if(numberOfRooms >= 4 && numberOfRooms < 5){
zimmerMapping = '4-5 Zimmer'
}else if(numberOfRooms >= 5){
zimmerMapping = 'über 5 Zimmer'
}
if((property.data.zimmer && (filters.zimmer == zimmerMapping)) || !filters.zimmer){
return true
}
})
//ort filter
filteredArray = filteredArray.filter(({node:property}) => {
if((property.data.ort && (filters.ort == property.data.ort)) || !filters.ort){
return true
}
})
//price from filter
filteredArray = filteredArray.filter(({node:property}) => {
if((property.data.preis && (filters.priceFrom <= property.data.preis)) || !filters.priceFrom){
return true
}
})
//price to filter
filteredArray = filteredArray.filter(({node:property}) => {
if((property.data.preis && (filters.priceTo > property.data.preis)) || !filters.priceTo){
return true
}
})
//sorting functionality
if(sorting.preis == 'ASC'){
filteredArray = filteredArray.sort(({node:propertyA}, {node:propertyB}) => {
return propertyA.data.preis - propertyB.data.preis
})
}
if(sorting.preis == 'DESC'){
filteredArray = filteredArray.sort(({node:propertyA}, {node:propertyB}) => {
return propertyB.data.preis - propertyA.data.preis
})
}
if(sorting.zimmer == 'ASC'){
filteredArray = filteredArray.sort(({node:propertyA}, {node:propertyB}) => {
return propertyA.data.zimmer - propertyB.data.zimmer
})
}
if(sorting.zimmer == 'DESC'){
filteredArray = filteredArray.sort(({node:propertyA}, {node:propertyB}) => {
return propertyB.data.zimmer - propertyA.data.zimmer
})
}
if(sorting.wohnflache == 'ASC'){
filteredArray = filteredArray.sort(({node:propertyA}, {node:propertyB}) => {
return propertyA.data.wohnflache - propertyB.data.wohnflache
})
}
if(sorting.wohnflache == 'DESC'){
filteredArray = filteredArray.sort(({node:propertyA}, {node:propertyB}) => {
return propertyB.data.wohnflache - propertyA.data.wohnflache
})
}
setFilteredData(filteredArray || data.allPrismicProperty.edges)
}, [filters, sorting, properties])
return(
<Section>
<div className={styles.row}>
<div className={styles.stickyFilterContainer}>
<div className={styles.filterContainer}>
<PropertyFilter data={properties} filters={filters} setFilters={setFilters}/>
</div>
</div>
<div className={styles.properties}>
<div className={`${styles.infoAndSorting} ${styles.rowSpaced}`}>
<h2>{filteredData.length} Immobilien gefunden</h2>
{/* <PropertySorting sorting={sorting} setSorting={setSorting}/> */}
</div>
{transitions((style, {node:item}, t, index) => {
if(index < numOfLoadedItems){
return <animated.div style={style} className={styles.property}>
<Link to={`/${item.data.type_of_property ? 'mieten' : 'kaufen'}/${item.uid}`}>
<TextImageBox image={item.data.images && item.data.images.length > 0 && item.data.images[0].image && item.data.images[0].image.localFile && item.data.images[0].image.localFile.childImageSharp.fluid} alt={item.data.images[0].image.alt}>
<h3>{item.data.property_heading}</h3>
<BottomBorderedContainer>
<SpacedItemsContainer>
<p>Filterung</p>
<p>{item.data.category}</p>
</SpacedItemsContainer>
</BottomBorderedContainer>
<BottomBorderedContainer>
<SpacedItemsContainer>
<p>Zimmer</p>
<p>{item.data.zimmer}</p>
</SpacedItemsContainer>
</BottomBorderedContainer>
<BottomBorderedContainer>
<SpacedItemsContainer>
<p>Ort</p>
<p>{item.data.ort}</p>
</SpacedItemsContainer>
</BottomBorderedContainer>
<BottomBorderedContainer>
<SpacedItemsContainer>
<p>Preis</p>
<p>CHF {item.data.preis}</p>
</SpacedItemsContainer>
</BottomBorderedContainer>
<BottomBorderedContainer>
<SpacedItemsContainer>
<p>Wohnfläche</p>
<p>{item.data.wohnflache} m<sup>2</sup></p>
</SpacedItemsContainer>
</BottomBorderedContainer>
</TextImageBox>
</Link>
</animated.div>
}
})}
{
filteredData.length > numOfLoadedItems &&
<div className={styles.seeMoreButton}>
<ButtonBordered onClick={() => {setScrollPosition();setNumOfLoadedItems(prevState => prevState + 5)}}>
Mehr Anzeigen
</ButtonBordered>
</div>
}
</div>
</div>
</Section>
)
}
export default FilterPropertySection |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.