text
stringlengths 1
1.05M
|
|---|
#include <iostream>
#include <string>
#include <stdexcept>
#include "kaitai/kaitaistream.h"
class rsm_t {
public:
rsm_t(kaitai::kstream* ks) : m__io(ks), m_scale_key_frames(0), m_volume_boxes(0) {}
void read() {
try {
_read();
} catch (...) {
_clean_up();
throw;
}
}
private:
kaitai::kstream* m__io;
int m_scale_key_frames;
int m_volume_boxes;
std::string m_magic;
void _read() {
m_magic = m__io->read_bytes(4);
if (m_magic != "\x47\x52\x53\x4D") {
throw kaitai::validation_not_equal_error<std::string>("GRSM", m_magic, m__io(), "/seq/0");
}
// Read and update m_scale_key_frames and m_volume_boxes based on the file format
// Example:
// m_scale_key_frames = m__io->read_something();
// m_volume_boxes = m__io->read_something_else();
}
void _clean_up() {
// Perform any necessary cleanup operations
}
};
int main() {
try {
// Open the RSM file and create a Kaitai stream
kaitai::kstream ks("example.rsm");
// Create an instance of the rsm_t class and read the file
rsm_t rsm(&ks);
rsm.read();
// Process the RSM file data
// Example:
// std::cout << "Scale Key Frames: " << rsm.m_scale_key_frames << std::endl;
// std::cout << "Volume Boxes: " << rsm.m_volume_boxes << std::endl;
} catch (const std::exception& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
return 0;
}
|
#!/bin/bash
set -e
echo '===> DSE Configuration'
# Default addresses to use for DSE cluster if starting in Docker
dse_ip='dse'
dse_external_ip=$KILLRVIDEO_DOCKER_IP
dse_enable_ssl='false'
# Create cql_options variable to consolidate multiple options into one
# variable for easier reading
cql_options=''
# Use space variable to concatenate options
space=' '
# If an external cluster address is provided, use that
if [ ! -z "$KILLRVIDEO_DSE_CONTACT_POINTS" ]; then
dse_ip=$KILLRVIDEO_DSE_CONTACT_POINTS
dse_external_ip=$KILLRVIDEO_DSE_CONTACT_POINTS
fi
echo "=> Setting up KillrVideo via DSE node at: $dse_ip"
# If a request timeout is available use that. This is useful
# in cases where a longer timeout is needed for cqlsh operations
if [ ! -z "$KILLRVIDEO_DSE_REQUEST_TIMEOUT" ]; then
dse_request_timeout="--request-timeout=$KILLRVIDEO_DSE_REQUEST_TIMEOUT --connect-timeout=$KILLRVIDEO_DSE_REQUEST_TIMEOUT"
cql_options="$dse_request_timeout"
echo "=> Request timeout set at: $dse_request_timeout"
fi
# If SSL is enabled, then provide SSL info
if [ "$KILLRVIDEO_ENABLE_SSL" = 'true' ]; then
dse_enable_ssl='true'
# The reference to this file is provided via a volume enabled
# on the dse-config container within docker-compose.yaml
# in the killrvideo-docker-common repo
dse_ssl_certfile='/opt/killrvideo-data/cassandra.cert'
dse_ssl='--ssl'
cql_options="$cql_options$space$dse_ssl"
# These 2 environment variables are needed for cqlsh to
# properly handle SSL
export SSL_CERTFILE=$dse_ssl_certfile
export SSL_VALIDATE=true
echo "=> SSL encryption is ENABLED with CERT FILE: $dse_ssl_certfile"
fi
# Wait for port 9042 (CQL) to be ready for up to 300 seconds
echo '=> Waiting for DSE to become available'
/wait-for-it.sh -t 300 $dse_ip:9042
echo '=> DSE is available'
echo "=> If any exist, cql_options are: $cql_options"
# Default privileges
admin_user='cassandra'
admin_password='cassandra'
dse_user='cassandra'
dse_password='cassandra'
# If requested, create a new superuser to replace the default superuser
if [ "$KILLRVIDEO_CREATE_ADMIN_USER" = 'true' ]; then
# Check if initialisation is done already
if [ cqlsh $dse_ip 9042 -u $admin_user -p $admin_password $cql_options -e "DESCRIBE KEYSPACE kv_init_done;" 2>&1 | grep -q 'CREATE KEYSPACE kv_init_done' ]; then
echo "The database is already initialised, exiting..."
exit 0
fi
echo "=> Creating new superuser $KILLRVIDEO_ADMIN_USERNAME"
cqlsh $dse_ip 9042 -u $admin_user -p $admin_password $cql_options -e "CREATE ROLE $KILLRVIDEO_ADMIN_USERNAME with SUPERUSER = true and LOGIN = true and PASSWORD = '$KILLRVIDEO_ADMIN_PASSWORD'"
# Login as new superuser to delete default superuser (cassandra)
cqlsh $dse_ip 9042 -u $KILLRVIDEO_ADMIN_USERNAME -p $KILLRVIDEO_ADMIN_PASSWORD $cql_options -e "DROP ROLE $admin_user"
fi
# Use new admin credentials for future actions
if [ ! -z "$KILLRVIDEO_ADMIN_USERNAME" ]; then
admin_user=$KILLRVIDEO_ADMIN_USERNAME
admin_password=$KILLRVIDEO_ADMIN_PASSWORD
fi
if cqlsh $dse_ip 9042 -u $admin_user -p $admin_password $cql_options -e "DESCRIBE KEYSPACE kv_init_done;" 2>&1 | grep -q 'CREATE KEYSPACE kv_init_done'; then
if [ ! -z "$KILLRVIDEO_FORCE_BOOTSTRAP" ]; then
echo '=> Forced bootstrap!'
else
echo "The database is already initialised, exiting..."
exit 0
fi
fi
# If requested, create a new standard user
if [ "$KILLRVIDEO_CREATE_DSE_USER" = 'true' ]; then
# Create user and grant permission to create keyspaces (generator and web will need)
echo "=> Creating user $KILLRVIDEO_DSE_USERNAME and granting keyspace creation permissions"
cqlsh $dse_ip 9042 -u $admin_user -p $admin_password $cql_options -e "CREATE ROLE $KILLRVIDEO_DSE_USERNAME with LOGIN = true and PASSWORD = '$KILLRVIDEO_DSE_PASSWORD'"
echo '=> Granting keyspace creation permissions'
cqlsh $dse_ip 9042 -u $admin_user -p $admin_password $cql_options -e "GRANT CREATE on ALL KEYSPACES to $KILLRVIDEO_DSE_USERNAME"
cqlsh $dse_ip 9042 -u $admin_user -p $admin_password $cql_options -e "GRANT ALL PERMISSIONS on ALL SEARCH INDICES to $KILLRVIDEO_DSE_USERNAME"
fi
# Use the provided username/password for subsequent non-admin operations
if [ ! -z "$KILLRVIDEO_DSE_USERNAME" ]; then
dse_user=$KILLRVIDEO_DSE_USERNAME
dse_password=$KILLRVIDEO_DSE_PASSWORD
fi
# Create the keyspace if necessary
echo '=> Ensuring keyspace is created'
keyspace_file='/opt/killrvideo-data/keyspace.cql'
if [ ! -z "$KILLRVIDEO_CASSANDRA_REPLICATION" ]; then
# TODO: check for valid replication format? https://stackoverflow.com/questions/21112707/check-if-a-string-matches-a-regex-in-bash-script
sed -i "s/{.*}/$KILLRVIDEO_CASSANDRA_REPLICATION/;" $keyspace_file
fi
cqlsh $dse_ip 9042 -f $keyspace_file -u $dse_user -p $dse_password $cql_options
# TODO: Complete nodesync section once documentation is available
# Once we create the keyspace enable nodesync
# Commenting this out for now until we can get the correct
# documentation needed for using nodesync over SSL
#echo '=> Enabling NodeSync for KillrVideo keyspace'
#/opt/dse/resources/cassandra/bin/nodesync -cu $dse_user -cp $dse_password -h $dse_ip --cql-ssl enable -v -k killrvideo "*"
# Create the schema if necessary
echo '=> Ensuring schema is created'
cqlsh $dse_ip 9042 -f /opt/killrvideo-data/schema.cql -k killrvideo -u $dse_user -p $dse_password $cql_options
# Create DSE Search core if necessary
echo '=> Ensuring DSE Search is configured'
# TODO: temp workaround - if search index already exists, ALTER statements will cause non-zero exit
set +e
cqlsh $dse_ip 9042 -f /opt/killrvideo-data/videos_search.cql -k killrvideo -u $dse_user -p $dse_password $cql_options
# TODO: remove workaround
set -e
# Wait for port 8182 (Gremlin) to be ready for up to 120 seconds
echo '=> Waiting for DSE Graph to become available'
/wait-for-it.sh -t 120 $dse_ip:8182
echo '=> DSE Graph is available'
# Update the gremlin-console remote.yaml file to set the remote hosts, username, and password
# This is required because the "dse gremlin-console" command does not accept username/password via command line
echo '=> Setting up remote.yaml for gremlin-console'
sed -i "s/.*hosts:.*/hosts: [$dse_ip]/;s/.*username:.*/username: $dse_user/;s/.*password:.*/password: $dse_password/;s|enableSsl:.*|enableSsl: $dse_enable_ssl, trustCertChainFile: $dse_ssl_certfile,|;" /opt/dse/resources/graph/gremlin-console/conf/remote.yaml
# Create the graph if necessary
echo '=> Ensuring graph is created'
graph_file='/opt/killrvideo-data/killrvideo_video_recommendations_schema.groovy'
if [ ! -z "$KILLRVIDEO_GRAPH_REPLICATION" ]; then
sed -i "s/{.*}/$KILLRVIDEO_GRAPH_REPLICATION/;" $graph_file
fi
dse gremlin-console -e $graph_file
echo '=> Configuration of DSE users and schema complete'
# Don't bootstrap next time we start
cqlsh $dse_ip 9042 -u $dse_user -p $dse_password $cql_options -e "CREATE KEYSPACE IF NOT EXISTS kv_init_done WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1};"
|
"""
Generate a function for calculating the Euclidean distance between two points
"""
import numpy as np
def euclidean_distance(point1, point2):
"""
Calculates the Euclidean distance between two n-dimensional points
"""
point1 = np.array(point1)
point2 = np.array(point2)
distance = np.linalg.norm(point1-point2)
return distance
|
#!/bin/bash
set -x
export MYSQL_PWD=nuserv-demo
get_data(){
# get = $(date +%Y-%m-%d --date='100 day ago')
# DAYS="100"
dt=$(date '+%d/%m/%Y %H:%M:%S');
file=$(date '+%d-%m-%Y');
onehundreddays="$(date +%m-%d-%Y --date='100 day ago')"
current="$(date '+%m-%d-%Y')"
if [ -f "remarks.*" ]; then
rm remarks.*
fi
echo "Select Remark.Id, Author, Message, Timestamp, TaskId From Task
inner join Remark on Remark.TaskId = Task.Id
where Task.TaskNumber Like 'GBI%'
and Format(Task.DateCreated, 'MM-dd-yyyy', 'en-US') >= '${onehundreddays}'
AND Format(Task.DateCreated, 'MM-dd-yyyy', 'en-US') <= '${current}';" > remarks.sql
if [ -f "remarks.csv" ]; then
rm remarks.csv
touch remarks.csv
else
touch remarks.csv
fi
sqlcmd -S 122.54.171.141 -U dashboard -Ppowerformdb1 -d powerform-db1 -i "remarks.sql" -o "remarks.csv" -s"," -h-1
n=`cat remarks.csv |wc -l`
x=$((n-1))
{ echo "Id,Author,Message,Timestamp,TaskId"; sed "$x,\$d" remarks.csv; } > remarksnew.csv
while IFS="," read -r Id Author Message Timestamp TaskId
do
mysql -unuserv-demo -h 122.248.200.34 -D gbi -e "INSERT IGNORE INTO Remark
(Id, TaskId, Timestamp, Message, Author)
VALUES
('$(echo ${Id})', \"$(echo ${TaskId})\", \"$(echo ${Timestamp})\", \"$(echo ${Message})\", \"$(echo ${Author})\");"
done < <(tail -n +2 remarksnew.csv)
# sed -i 's/\x27Null\x27/NULL/gI' $1.sql
# mysql -vv -unuserv-demo -pnuserv-demo -h 122.248.200.34 -D gbi < $1.sql
# rm remarks*.*
}
get_data
|
#!/bin/bash
set -euo pipefail
# Make preinstall a no-op, we ship node as a tarball.
echo '#!/bin/bash' > scripts/preinstall.bash
yarn install
yarn build
cp ./lib/node/node $PREFIX/bin/node-nbin
|
import { User, UserProfile } from '../../../models';
export const schema = [
`
# User profile data for creating a new local database user account
input UserProfile {
# A display name for the logged-in user
displayName: String!
# A profile picture URL
picture: String
# The user's gender
gender: String
# The user's location
location: String
# A website URL
website: String
}
`,
];
export const mutation = [
`
# Creates a new user and profile in the local database
databaseCreateUser(
# The email of the new user, this email must be unique in the database
email: String!
# User profile information for creating a new local database user account
profile: UserProfile!
): DatabaseUser
`,
];
export const resolvers = {
Mutation: {
async databaseCreateUser(parent: any, args: any) {
// If user already exists, throw error
const lookupUser = await User.findOne({ where: { email: args.email } });
if (lookupUser) {
// eslint-disable-next-line no-throw-literal
throw 'User already exists!';
}
// Create new user with profile in database
const user = await User.create(
{
email: args.email,
profile: {
...args.profile,
},
},
{
include: [{ model: UserProfile, as: 'profile' }],
},
);
return user;
},
},
};
|
def findMax(arr):
maxNum = arr[0]
for num in arr:
if num > maxNum:
maxNum = num
return maxNum
arr = [5, 10, 15, 20, 25]
result = findMax(arr)
print(result)
|
// Grab the articles as a json
$.getJSON("/articles", function (data) {
// For each one
for (var i = 0; i < data.length; i++) {
// Display the apropos information on the page
var accordion = $("<div class = 'accordion' id = 'accordion'>").appendTo($("#articles"));
var card = $("<div class = 'card'>").appendTo(accordion);
var cardHeader = $("<div class = 'card-header' id = 'title'>").appendTo(card);
var button = $("<button class='btn btn-link artButton' data-id='" + data[i]._id + "' type='button' data-toggle='collapse' data-target='#collapse-" + i + "' aria-expanded='false' aria-controls='collapse-" + i + "'>" + data[i].title + "</button>").appendTo(cardHeader);
var collapse = $("<div id='collapse-" + i + "' class='collapse' aria-labelledby='headingOne' data-parent='#accordion'>").appendTo(card);
var body = $("<div class = 'card-body'>" + data[i].title + "<br />" + data[i].link + "</div>").appendTo(collapse);
}
$(".artButton").on("click", function () {
$("#comments").empty();
$("#commentsHistory").empty();
var thisId = $(this).attr("data-id");
$.ajax({
method: "GET",
url: "/articles/" + thisId
}).then(function (data) {
console.log(data.comment)
if (data.comment) {
var commentCard = $("<div class = 'card'>").appendTo($("#commentsHistory"));
var commentBody = $("<div class = 'card-body'> <h2>" + data.comment.title + "</h2></div>").appendTo(commentCard);
var commentText = $("<p class = 'card-text'>" + data.comment.body + "</p>").appendTo(commentBody);
var deleteButton = $("<a href = '#' class = 'btn btn-primary' id = 'delete'>Delete This Comment" + "</a>").appendTo(commentBody);
$("#delete").click(function() {
$.ajax({
method: "DELETE",
url: "/articles/" + thisId
}).then(function (data) {
$("#commentsHistory").empty();
});
});
}
$.ajax({
method: "GET",
url: "/articles/" + thisId
})
// With that done, add the note information to the page
.then(function (data) {
//console.log(data);
// The title of the article
$("#comments").append("<h2>" + data.title + "</h2>");
// An input to enter a new title
$("#comments").append("<input id='titleinput' name='title' >");
// A textarea to add a new note body
$("#comments").append("<textarea id='bodyinput' name='body'></textarea>");
// A button to submit a new note, with the id of the article saved to it
$("#comments").append("<button data-id='" + data._id + "' id='savecomment'>Save Comment</button>");
// If there's a note in the article
if (data.Comment) {
// Place the title of the note in the title input
$("#titleinput").val(data.comment.title);
// Place the body of the note in the body textarea
$("#bodyinput").val(data.comment.body);
}
});
});
});
$(document).on("click", "#savecomment", function() {
// Grab the id associated with the article from the submit button
var thisId = $(this).attr("data-id");
// Run a POST request to change the note, using what's entered in the inputs
$.ajax({
method: "POST",
url: "/articles/" + thisId,
data: {
// Value taken from title input
title: $("#titleinput").val(),
// Value taken from note textarea
body: $("#bodyinput").val()
}
})
// With that done
.then(function(data) {
// Log the response
console.log(data);
// Empty the notes section
$("#comments").empty();
});
// Also, remove the values entered in the input and textarea for note entry
$("#titleinput").val("");
$("#bodyinput").val("");
});
});
|
// The solution provides a complete implementation of the BinFileOutputStream class with the required getter methods and error handling for file I/O operations.
|
#!/usr/bin/env bash
# 装载其它库
ROOT=`dirname ${BASH_SOURCE[0]}`
source ${ROOT}/file.sh
# ------------------------------------------------------------------------------ nodejs 操作函数
# install Node Version Manager(nvm)
installNvm() {
local nvmVersion=0.35.2
if [[ $1 ]]; then
local nvmVersion=$1
fi
recreateDir "~/.nvm"
curl -o- https://raw.githubusercontent.com/creationix/nvm/v${nvmVersion}/install.sh | bash
source ~/.nvm/nvm.sh
if [[ "$?" != "${YES}" ]]; then
return ${FAILED}
fi
# Check
nvm version
if [[ "$?" != "${YES}" ]]; then
return ${FAILED}
fi
return ${SUCCEED}
}
# Check nodejs version
checkNodejsVersion() {
if [[ ! $1 ]]; then
printf "${C_B_RED}<<<< please specified expect nodejs version.${C_RESET}\n"
return ${FAILED}
fi
local expectVersion=$1
source /root/.bashrc
local nodeVersion=$(nvm version)
if [[ "$?" != "${YES}" ]]; then
printf "${C_B_YELLOW}>>>> nvm not installed.${C_RESET}\n"
local nvmVersion=v0.35.2
installNvm "${nvmVersion}"
if [[ "$?" != "${SUCCEED}" ]]; then
return ${FAILED}
fi
nodeVersion=$(nvm version)
fi
if [[ "${nodeVersion}" != "v${expectVersion}" ]]; then
printf "${C_B_YELLOW}>>>> current nodejs version is ${nodeVersion}, not ${expectVersion}.${C_RESET}\n"
nvm install ${expectVersion}
nvm use ${expectVersion}
fi
return ${SUCCEED}
}
# build nodejs project
buildNodejsProject() {
if [[ ! $1 ]]; then
printf "${C_B_RED}<<<< please input nodejs project path.${C_RESET}\n"
return ${FAILED}
fi
if [[ ! $2 ]]; then
printf "${C_B_RED}<<<< please input nodejs version.${C_RESET}\n"
return ${FAILED}
fi
isDirectory $1
if [[ "$?" != "${YES}" ]]; then
printf "${C_B_RED}<<<< $1 is not valid path.${C_RESET}\n"
return ${FAILED}
fi
local project=$1
local nodeVersion=$2
printf "${C_B_BLUE}>>>> build nodejs project $1 begin.${C_RESET}\n"
cd ${project} || (printf "${C_B_RED}<<<< ${project} is not exists.${C_RESET}\n" && exit 1)
checkNodejsVersion ${nodeVersion}
npm install
if [[ "$?" != "${YES}" ]]; then
printf "${C_B_RED}<<<< update dependencies failed.${C_RESET}\n"
return ${FAILED}
else
printf "${C_B_GREEN}>>>> update dependencies succeed.${C_RESET}\n"
fi
npm run build
if [[ "$?" != "${YES}" ]]; then
printf "${C_B_RED}<<<< build failed.${C_RESET}\n"
return ${FAILED}
else
printf "${C_B_GREEN}<<<< build succeed.${C_RESET}\n"
fi
return ${SUCCEED}
}
# package nodejs artifact dir (default is dist)
packageDist() {
zip -o -r -q ${branch}.zip *
}
|
#!/bin/bash
#
# build_android.sh
# Copyright (c) 2012 Jacek Marchwicki
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ "$NDK" = "" ]; then
echo NDK variable not set, exiting
echo "Use: export NDK=/your/path/to/android-ndk"
exit 1
fi
if [ "$NDK_ARCH" = "" ]; then
echo NDK_ARCH variable not set, exiting
echo "Use: export NDK_ARCH= for example: darwin-x86_64"
echo "Check your $NDK/toolchains/*/prebuilt/ directory"
exit 1
fi
OS=`uname -s | tr '[A-Z]' '[a-z]'`
function build_x264
{
PLATFORM=$NDK/platforms/$PLATFORM_VERSION/arch-$ARCH/
export PATH=${PATH}:$PREBUILT/bin/
CROSS_COMPILE=$PREBUILT/bin/$EABIARCH-
CFLAGS=$OPTIMIZE_CFLAGS
#CFLAGS=" -I$ARM_INC -fpic -DANDROID -fpic -mthumb-interwork -ffunction-sections -funwind-tables -fstack-protector -fno-short-enums -D__ARM_ARCH_5__ -D__ARM_ARCH_5T__ -D__ARM_ARCH_5E__ -D__ARM_ARCH_5TE__ -Wno-psabi -march=armv5te -mtune=xscale -msoft-float -mthumb -Os -fomit-frame-pointer -fno-strict-aliasing -finline-limit=64 -DANDROID -Wa,--noexecstack -MMD -MP "
export CPPFLAGS="$CFLAGS"
export CFLAGS="$CFLAGS"
export CXXFLAGS="$CFLAGS"
export CXX="${CROSS_COMPILE}g++ --sysroot=$PLATFORM"
export AS="${CROSS_COMPILE}gcc --sysroot=$PLATFORM"
export CC="${CROSS_COMPILE}gcc --sysroot=$PLATFORM"
export NM="${CROSS_COMPILE}nm"
export STRIP="${CROSS_COMPILE}strip"
export RANLIB="${CROSS_COMPILE}ranlib"
export AR="${CROSS_COMPILE}ar"
export LDFLAGS="-Wl,-rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib -lc -lm -ldl -llog"
cd x264
./configure --prefix=$(pwd)/$PREFIX --host=$ARCH-linux --enable-static $ADDITIONAL_CONFIGURE_FLAG || exit 1
make clean || exit 1
make -j4 install || exit 1
cd ..
}
function build_ogg
{
PLATFORM=$NDK/platforms/$PLATFORM_VERSION/arch-$ARCH/
export PATH=${PATH}:$PREBUILT/bin/
CROSS_COMPILE=$PREBUILT/bin/$EABIARCH-
CFLAGS=$OPTIMIZE_CFLAGS
#CFLAGS=" -I$ARM_INC -fpic -DANDROID -fpic -mthumb-interwork -ffunction-sections -funwind-tables -fstack-protector -fno-short-enums -D__ARM_ARCH_5__ -D__ARM_ARCH_5T__ -D__ARM_ARCH_5E__ -D__ARM_ARCH_5TE__ -Wno-psabi -march=armv5te -mtune=xscale -msoft-float -mthumb -Os -fomit-frame-pointer -fno-strict-aliasing -finline-limit=64 -DANDROID -Wa,--noexecstack -MMD -MP "
export CPPFLAGS="$CFLAGS"
export CFLAGS="$CFLAGS -lgcc"
export CXXFLAGS="$CFLAGS"
export CXX="${CROSS_COMPILE}g++ --sysroot=$PLATFORM"
export CC="${CROSS_COMPILE}gcc --sysroot=$PLATFORM"
export NM="${CROSS_COMPILE}nm"
export STRIP="${CROSS_COMPILE}strip"
export RANLIB="${CROSS_COMPILE}ranlib"
export AR="${CROSS_COMPILE}ar"
export LDFLAGS="-Wl,-rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib -lc -lm -ldl -llog"
cd libogg
./autogen.sh \
--prefix=$(pwd)/$PREFIX \
--host=$ARCH-linux \
--disable-dependency-tracking \
--disable-shared \
--enable-static \
--with-pic \
$ADDITIONAL_CONFIGURE_FLAG \
|| exit 1
make clean || exit 1
make -j4 install || exit 1
cd ..
}
function build_vorbis
{
PLATFORM=$NDK/platforms/$PLATFORM_VERSION/arch-$ARCH/
export PATH=${PATH}:$PREBUILT/bin/
CROSS_COMPILE=$PREBUILT/bin/$EABIARCH-
CFLAGS=$OPTIMIZE_CFLAGS
#CFLAGS=" -I$ARM_INC -fpic -DANDROID -fpic -mthumb-interwork -ffunction-sections -funwind-tables -fstack-protector -fno-short-enums -D__ARM_ARCH_5__ -D__ARM_ARCH_5T__ -D__ARM_ARCH_5E__ -D__ARM_ARCH_5TE__ -Wno-psabi -march=armv5te -mtune=xscale -msoft-float -mthumb -Os -fomit-frame-pointer -fno-strict-aliasing -finline-limit=64 -DANDROID -Wa,--noexecstack -MMD -MP "
export CPPFLAGS="$CFLAGS"
export CFLAGS="$CFLAGS -lgcc"
export CXXFLAGS="$CFLAGS"
export CXX="${CROSS_COMPILE}g++ --sysroot=$PLATFORM"
export CC="${CROSS_COMPILE}gcc --sysroot=$PLATFORM"
export NM="${CROSS_COMPILE}nm"
export STRIP="${CROSS_COMPILE}strip"
export RANLIB="${CROSS_COMPILE}ranlib"
export AR="${CROSS_COMPILE}ar"
export LDFLAGS="-Wl,-rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib -lc -lm -ldl -llog"
cd libvorbis
./configure \
--with-ogg=$(pwd)/ffmpeg-build \
--prefix=$(pwd)/$PREFIX \
--host=$ARCH-linux \
--disable-dependency-tracking \
--disable-shared \
--enable-static \
--with-pic \
$ADDITIONAL_CONFIGURE_FLAG \
|| exit 1
make clean || exit 1
make -j4 install || exit 1
cd ..
}
function build_amr
{
PLATFORM=$NDK/platforms/$PLATFORM_VERSION/arch-$ARCH/
export PATH=${PATH}:$PREBUILT/bin/
CROSS_COMPILE=$PREBUILT/bin/$EABIARCH-
CFLAGS=$OPTIMIZE_CFLAGS
#CFLAGS=" -I$ARM_INC -fpic -DANDROID -fpic -mthumb-interwork -ffunction-sections -funwind-tables -fstack-protector -fno-short-enums -D__ARM_ARCH_5__ -D__ARM_ARCH_5T__ -D__ARM_ARCH_5E__ -D__ARM_ARCH_5TE__ -Wno-psabi -march=armv5te -mtune=xscale -msoft-float -mthumb -Os -fomit-frame-pointer -fno-strict-aliasing -finline-limit=64 -DANDROID -Wa,--noexecstack -MMD -MP "
export CPPFLAGS="$CFLAGS"
export CFLAGS="$CFLAGS"
export CXXFLAGS="$CFLAGS"
export CXX="${CROSS_COMPILE}g++ --sysroot=$PLATFORM"
export CC="${CROSS_COMPILE}gcc --sysroot=$PLATFORM"
export NM="${CROSS_COMPILE}nm"
export STRIP="${CROSS_COMPILE}strip"
export RANLIB="${CROSS_COMPILE}ranlib"
export AR="${CROSS_COMPILE}ar"
export LDFLAGS="-Wl,-rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib -nostdlib -lc -lm -ldl -llog"
cd vo-amrwbenc
./configure \
--prefix=$(pwd)/$PREFIX \
--host=$ARCH-linux \
--disable-dependency-tracking \
--disable-shared \
--enable-static \
--with-pic \
$ADDITIONAL_CONFIGURE_FLAG \
|| exit 1
make clean || exit 1
make -j4 install || exit 1
cd ..
}
function build_aac
{
PLATFORM=$NDK/platforms/$PLATFORM_VERSION/arch-$ARCH/
export PATH=${PATH}:$PREBUILT/bin/
CROSS_COMPILE=$PREBUILT/bin/$EABIARCH-
CFLAGS=$OPTIMIZE_CFLAGS
#CFLAGS=" -I$ARM_INC -fpic -DANDROID -fpic -mthumb-interwork -ffunction-sections -funwind-tables -fstack-protector -fno-short-enums -D__ARM_ARCH_5__ -D__ARM_ARCH_5T__ -D__ARM_ARCH_5E__ -D__ARM_ARCH_5TE__ -Wno-psabi -march=armv5te -mtune=xscale -msoft-float -mthumb -Os -fomit-frame-pointer -fno-strict-aliasing -finline-limit=64 -DANDROID -Wa,--noexecstack -MMD -MP "
export CPPFLAGS="$CFLAGS"
export CFLAGS="$CFLAGS"
export CXXFLAGS="$CFLAGS"
export CXX="${CROSS_COMPILE}g++ --sysroot=$PLATFORM"
export CC="${CROSS_COMPILE}gcc --sysroot=$PLATFORM"
export NM="${CROSS_COMPILE}nm"
export STRIP="${CROSS_COMPILE}strip"
export RANLIB="${CROSS_COMPILE}ranlib"
export AR="${CROSS_COMPILE}ar"
export LDFLAGS="-Wl,-rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib -nostdlib -lc -lm -ldl -llog"
cd vo-aacenc
export PKG_CONFIG_LIBDIR=$(pwd)/$PREFIX/lib/pkgconfig/
export PKG_CONFIG_PATH=$(pwd)/$PREFIX/lib/pkgconfig/
./configure \
--prefix=$(pwd)/$PREFIX \
--host=$ARCH-linux \
--disable-dependency-tracking \
--disable-shared \
--enable-static \
--with-pic \
$ADDITIONAL_CONFIGURE_FLAG \
|| exit 1
make clean || exit 1
make -j4 install || exit 1
cd ..
}
function build_freetype
{
PLATFORM=$NDK/platforms/$PLATFORM_VERSION/arch-$ARCH/
export PATH=${PATH}:$PREBUILT/bin/
CROSS_COMPILE=$PREBUILT/bin/$EABIARCH-
CFLAGS=$OPTIMIZE_CFLAGS
#CFLAGS=" -I$ARM_INC -fpic -DANDROID -fpic -mthumb-interwork -ffunction-sections -funwind-tables -fstack-protector -fno-short-enums -D__ARM_ARCH_5__ -D__ARM_ARCH_5T__ -D__ARM_ARCH_5E__ -D__ARM_ARCH_5TE__ -Wno-psabi -march=armv5te -mtune=xscale -msoft-float -mthumb -Os -fomit-frame-pointer -fno-strict-aliasing -finline-limit=64 -DANDROID -Wa,--noexecstack -MMD -MP "
export CPPFLAGS="$CFLAGS"
export CFLAGS="$CFLAGS"
export CXXFLAGS="$CFLAGS"
export CXX="${CROSS_COMPILE}g++ --sysroot=$PLATFORM"
export CC="${CROSS_COMPILE}gcc --sysroot=$PLATFORM"
export NM="${CROSS_COMPILE}nm"
export STRIP="${CROSS_COMPILE}strip"
export RANLIB="${CROSS_COMPILE}ranlib"
export AR="${CROSS_COMPILE}ar"
export LDFLAGS="-Wl,-rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib -lc -lm -ldl -llog"
cd freetype2
export PKG_CONFIG_LIBDIR=$(pwd)/$PREFIX/lib/pkgconfig/
export PKG_CONFIG_PATH=$(pwd)/$PREFIX/lib/pkgconfig/
./configure \
--prefix=$(pwd)/$PREFIX \
--host=$ARCH-linux \
--disable-dependency-tracking \
--disable-shared \
--enable-static \
--with-pic \
--without-png \
$ADDITIONAL_CONFIGURE_FLAG \
|| exit 1
make clean || exit 1
make -j4 install || exit 1
cd ..
}
function build_ass
{
PLATFORM=$NDK/platforms/$PLATFORM_VERSION/arch-$ARCH/
export PATH=${PATH}:$PREBUILT/bin/
CROSS_COMPILE=$PREBUILT/bin/$EABIARCH-
CFLAGS="$OPTIMIZE_CFLAGS -I$(pwd)/ffmpeg-build/$TARGET/include/freetype2"
#CFLAGS=" -I$ARM_INC -fpic -DANDROID -fpic -mthumb-interwork -ffunction-sections -funwind-tables -fstack-protector -fno-short-enums -D__ARM_ARCH_5__ -D__ARM_ARCH_5T__ -D__ARM_ARCH_5E__ -D__ARM_ARCH_5TE__ -Wno-psabi -march=armv5te -mtune=xscale -msoft-float -mthumb -Os -fomit-frame-pointer -fno-strict-aliasing -finline-limit=64 -DANDROID -Wa,--noexecstack -MMD -MP "
export CPPFLAGS="$CFLAGS"
export CFLAGS="$CFLAGS"
export CXXFLAGS="$CFLAGS"
export CXX="${CROSS_COMPILE}g++ --sysroot=$PLATFORM"
export CC="${CROSS_COMPILE}gcc --sysroot=$PLATFORM"
export NM="${CROSS_COMPILE}nm"
export STRIP="${CROSS_COMPILE}strip"
export RANLIB="${CROSS_COMPILE}ranlib"
export AR="${CROSS_COMPILE}ar"
export LDFLAGS=" -Wl,-rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib -lc -lm -ldl -llog"
cd libass
export PKG_CONFIG_LIBDIR=$(pwd)/$PREFIX/lib/pkgconfig/
export PKG_CONFIG_PATH=$(pwd)/$PREFIX/lib/pkgconfig/
./autogen.sh \
--prefix=$(pwd)/$PREFIX \
--host=$ARCH-linux \
--disable-fontconfig \
--disable-dependency-tracking \
--disable-shared \
--enable-static \
--with-pic \
$ADDITIONAL_CONFIGURE_FLAG \
|| exit 1
./configure \
--prefix=$(pwd)/$PREFIX \
--host=$ARCH-linux \
--disable-fontconfig \
--disable-dependency-tracking \
--disable-shared \
--enable-static \
--with-pic \
$ADDITIONAL_CONFIGURE_FLAG \
|| exit 1
make clean || exit 1
make -j4 V=1 install || exit 1
cd ..
}
function build_fribidi
{
PLATFORM=$NDK/platforms/$PLATFORM_VERSION/arch-$ARCH/
export PATH=${PATH}:$PREBUILT/bin/
CROSS_COMPILE=$PREBUILT/bin/$EABIARCH-
CFLAGS="$OPTIMIZE_CFLAGS -std=gnu99"
#CFLAGS=" -I$ARM_INC -fpic -DANDROID -fpic -mthumb-interwork -ffunction-sections -funwind-tables -fstack-protector -fno-short-enums -D__ARM_ARCH_5__ -D__ARM_ARCH_5T__ -D__ARM_ARCH_5E__ -D__ARM_ARCH_5TE__ -Wno-psabi -march=armv5te -mtune=xscale -msoft-float -mthumb -Os -fomit-frame-pointer -fno-strict-aliasing -finline-limit=64 -DANDROID -Wa,--noexecstack -MMD -MP "
export CPPFLAGS="$CFLAGS"
export CFLAGS="$CFLAGS"
export CXXFLAGS="$CFLAGS"
export CXX="${CROSS_COMPILE}g++ --sysroot=$PLATFORM"
export CC="${CROSS_COMPILE}gcc --sysroot=$PLATFORM"
export NM="${CROSS_COMPILE}nm"
export STRIP="${CROSS_COMPILE}strip"
export RANLIB="${CROSS_COMPILE}ranlib"
export AR="${CROSS_COMPILE}ar"
export LDFLAGS="-Wl,-rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib -nostdlib -lc -lm -ldl -llog"
cd fribidi
./configure \
--prefix=$(pwd)/$PREFIX \
--host=$ARCH-linux \
--disable-bin \
--disable-dependency-tracking \
--disable-shared \
--enable-static \
--with-pic \
$ADDITIONAL_CONFIGURE_FLAG \
|| exit 1
make clean || exit 1
make -j4 install || exit 1
cd ..
}
function build_ffmpeg
{
PLATFORM=$NDK/platforms/$PLATFORM_VERSION/arch-$ARCH/
CC=$PREBUILT/bin/$EABIARCH-gcc
CROSS_PREFIX=$PREBUILT/bin/$EABIARCH-
PKG_CONFIG=${CROSS_PREFIX}pkg-config
if [ ! -f $PKG_CONFIG ];
then
cat > $PKG_CONFIG << EOF
#!/bin/bash
pkg-config \$*
EOF
chmod u+x $PKG_CONFIG
fi
NM=$PREBUILT/bin/$EABIARCH-nm
cd ffmpeg
export PKG_CONFIG_LIBDIR=$(pwd)/$PREFIX/lib/pkgconfig/
export PKG_CONFIG_PATH=$(pwd)/$PREFIX/lib/pkgconfig/
./configure --target-os=linux \
--prefix=$PREFIX \
--enable-cross-compile \
--extra-libs="-lgcc" \
--arch=$ARCH \
--cc=$CC \
--cross-prefix=$CROSS_PREFIX \
--nm=$NM \
--sysroot=$PLATFORM \
--extra-cflags=" -O3 -DANDROID -DHAVE_SYS_UIO_H=1 -Dipv6mr_interface=ipv6mr_ifindex -fasm -Wno-psabi -fno-short-enums -finline-limit=300 $OPTIMIZE_CFLAGS " \
--disable-shared \
--enable-static \
--enable-runtime-cpudetect \
--extra-ldflags="-Wl,-rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib -nostdlib -lc -lm -ldl -llog -L$PREFIX/lib" \
--extra-cflags="-I$PREFIX/include" \
--disable-everything \
--enable-libass \
--enable-libvo-aacenc \
--enable-libvo-amrwbenc \
--enable-hwaccel=h264_vaapi \
--enable-hwaccel=h264_vaapi \
--enable-hwaccel=h264_dxva2 \
--enable-hwaccel=mpeg4_vaapi \
--enable-demuxer=ac3 \
--enable-demuxer=flv \
--enable-demuxer=mov \
--enable-demuxer=h264 \
--enable-demuxer=mpegvideo \
--enable-demuxer=h263 \
--enable-demuxer=mpegps \
--enable-demuxer=mjpeg \
--enable-demuxer=rtsp \
--enable-demuxer=rtp \
--enable-demuxer=hls \
--enable-demuxer=matroska \
--enable-demuxer=mp3 \
--enable-demuxer=pcm_u16le \
--enable-demuxer=pcm_s16le \
--enable-demuxer=pcm_u16be \
--enable-demuxer=pcm_s16be \
--enable-demuxer=ogg \
--enable-muxer=null \
--enable-muxer=mp2 \
--enable-muxer=flv \
--enable-muxer=rtsp \
--enable-muxer=mp4 \
--enable-muxer=mov \
--enable-muxer=mjpeg \
--enable-muxer=matroska \
--enable-muxer=wav \
--enable-muxer=pcm_u16le \
--enable-muxer=pcm_s16le \
--enable-muxer=pcm_u16be \
--enable-muxer=pcm_s16be \
--enable-muxer=ogg \
--enable-protocol=crypto \
--enable-protocol=file \
--enable-protocol=rtp \
--enable-protocol=tcp \
--enable-protocol=udp \
--enable-protocol=hls \
--enable-protocol=http \
--enable-decoder=mp3 \
--enable-decoder=flv \
--enable-decoder=xsub \
--enable-decoder=jacosub \
--enable-decoder=dvdsub \
--enable-decoder=dvbsub \
--enable-decoder=subviewer \
--enable-decoder=rawvideo \
--enable-decoder=mjpeg \
--enable-decoder=h263 \
--enable-decoder=mpeg4 \
--enable-decoder=h264 \
--enable-decoder=aac \
--enable-decoder=mp2 \
--enable-decoder=amrwb \
--enable-decoder=amrnb \
--enable-decoder=ac3 \
--enable-decoder=flac \
--enable-decoder=pcm_u16le \
--enable-decoder=pcm_s16le \
--enable-decoder=pcm_u16be \
--enable-decoder=pcm_s16be \
--enable-encoder=flv \
--enable-encoder=rawvideo \
--enable-encoder=mjpeg \
--enable-encoder=mpeg4 \
--enable-encoder=aac \
--enable-encoder=mp2 \
--enable-encoder=pcm_u16le \
--enable-encoder=pcm_s16le \
--enable-encoder=pcm_u16be \
--enable-encoder=pcm_s16be \
--enable-encoder=libvo_amrwbenc \
--enable-parsers \
--enable-parser=h264 \
--enable-parser=h263 \
--enable-parser=flac \
--enable-parser=aac \
--enable-parser=ac3 \
--enable-parser=mpegaudio \
--enable-parser=mpegvideo \
--enable-parser=mpeg4video \
--enable-bsfs \
--enable-decoders \
--enable-encoders \
--enable-hwaccels \
--enable-muxers \
--enable-avformat \
--enable-avcodec \
--enable-avresample \
--enable-zlib \
--disable-decoder=vorbis \
--disable-encoder=vorbis \
--disable-doc \
--disable-ffplay \
--disable-ffmpeg \
--disable-ffplay \
--disable-ffprobe \
--disable-ffserver \
--disable-avfilter \
--disable-avdevice \
--enable-libvorbis \
--enable-nonfree \
--enable-version3 \
--enable-memalign-hack \
--enable-asm \
$ADDITIONAL_FFMPEG_FLAG \
$ADDITIONAL_CONFIGURE_FLAG \
|| exit 1
make clean || exit 1
make -j4 install || exit 1
cd ..
}
function build_one {
cd ffmpeg
PLATFORM=$NDK/platforms/$PLATFORM_VERSION/arch-$ARCH/
$PREBUILT/bin/$EABIARCH-ld -rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib -L$PREFIX/lib -soname $SONAME -shared -nostdlib -z noexecstack -Bsymbolic --whole-archive --no-undefined -o $OUT_LIBRARY -lavcodec -lavformat -lavresample -lavutil -lswresample -lass -lfreetype -lfribidi -lswscale -logg -lvorbis -lvorbisenc -lvo-aacenc -lvo-amrwbenc -lc -lm -lz -ldl -llog --dynamic-linker=/system/bin/linker -zmuldefs $PREBUILT/lib/gcc/$EABIARCH/$NDK_TOOLCHAIN/libgcc.a || exit 1
cd ..
}
function build_all {
build_x264
build_ogg
build_vorbis
build_amr
build_aac
build_fribidi
build_freetype
build_ass
build_ffmpeg
build_one
}
##GLOBAL
NDK_TOOLCHAIN=4.6
#x86
EABIARCH=i686-linux-android
ARCH=x86
TARGET=x86
OPTIMIZE_CFLAGS="-m32"
PREFIX="../ffmpeg-build/$ARCH"
OUT_LIBRARY=$PREFIX/libffmpeg.so
ADDITIONAL_CONFIGURE_FLAG="--disable-asm"
ADDITIONAL_FFMPEG_FLAG="--cpu=$CPU"
SONAME=libffmpeg.so
PREBUILT=$NDK/toolchains/x86-$NDK_TOOLCHAIN/prebuilt/$NDK_ARCH
PLATFORM_VERSION=android-9
build_all
#mips
EABIARCH=mipsel-linux-android
ARCH=mips
TARGET=mips
OPTIMIZE_CFLAGS="-EL -march=mips32 -mips32 -mhard-float"
PREFIX="../ffmpeg-build/$ARCH"
OUT_LIBRARY=$PREFIX/libffmpeg.so
ADDITIONAL_CONFIGURE_FLAG="--disable-mips32r2"
ADDITIONAL_FFMPEG_FLAG=
SONAME=libffmpeg.so
PREBUILT=$NDK/toolchains/mipsel-linux-android-$NDK_TOOLCHAIN/prebuilt/$NDK_ARCH
PLATFORM_VERSION=android-9
build_all
#arm v7vfpv3
EABIARCH=arm-linux-androideabi
ARCH=arm
CPU=armv7-a
TARGET=armeabi-v7a
OPTIMIZE_CFLAGS="-mfloat-abi=softfp -mfpu=vfpv3-d16 -marm -march=$CPU "
PREFIX=../ffmpeg-build/armeabi-v7a
OUT_LIBRARY=$PREFIX/libffmpeg.so
ADDITIONAL_CONFIGURE_FLAG=
ADDITIONAL_FFMPEG_FLAG="--cpu=$CPU"
SONAME=libffmpeg.so
PREBUILT=$NDK/toolchains/arm-linux-androideabi-$NDK_TOOLCHAIN/prebuilt/$NDK_ARCH
PLATFORM_VERSION=android-9
build_all
#arm v7 + neon (neon also include vfpv3-32)
EABIARCH=arm-linux-androideabi
ARCH=arm
CPU=armv7-a
TARGET=armeabi-v7a
OPTIMIZE_CFLAGS="-mfloat-abi=softfp -mfpu=neon -marm -march=$CPU -mtune=cortex-a8 -mthumb -D__thumb__ "
PREFIX="../ffmpeg-build/armeavi-v7a-neon"
OUT_LIBRARY=../ffmpeg-build/armeabi-v7a/libffmpeg-neon.so
ADDITIONAL_CONFIGURE_FLAG="--enable-neon"
ADDITIONAL_FFMPEG_FLAG="--cpu=$CPU"
SONAME=libffmpeg-neon.so
PREBUILT=$NDK/toolchains/arm-linux-androideabi-$NDK_TOOLCHAIN/prebuilt/$NDK_ARCH
PLATFORM_VERSION=android-9
build_all
|
<table border="1" width="100%">
<tr>
<th></th>
</tr>
<tr>
<td></td>
</tr>
</table>
|
<filename>JavaScript Algorithms and Data Structures Certification (300 hours)/Debugging/10. Catch Off By One Errors When Using Indexing.js
/* Fix the two indexing errors in the following function so all the
numbers 1 through 5 are printed to the console.
(1) Your code should set the initial condition of the loop so it starts
at the first index.
(2) Your code should fix the initial condition of the loop so that the
index starts at 0.
(3) Your code should set the terminal condition of the loop so it stops
at the last index.
(4) Your code should fix the terminal condition of the loop so that it
stops at 1 before the length. */
function countToFive() {
let firstFive = "12345";
let len = firstFive.length;
// Fix the line below
for (let i = 0; i < len; i++) {
// Do not alter code below this line
console.log(firstFive[i]);
}
}
countToFive();
|
package net.silentchaos512.iconify.data.icon;
import com.google.gson.JsonObject;
import net.minecraft.util.ResourceLocation;
import net.silentchaos512.iconify.icon.IconSerializers;
public class ModIdIconBuilder extends IconBuilder {
private final String modId;
public ModIdIconBuilder(ResourceLocation id, String modId) {
super(id, IconSerializers.MOD_ID);
this.modId = modId;
}
@Override
public JsonObject serialize() {
JsonObject json = super.serialize();
json.addProperty("mod_id", this.modId);
return json;
}
}
|
int findMax(int arr[][N], int n)
{
int max = arr[0][0];
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
if (arr[i][j] > max)
max = arr[i][j];
return max;
}
|
<reponame>googleapis/googleapis-gen<gh_stars>1-10
# Generated by the protocol buffer compiler. DO NOT EDIT!
# Source: google/cloud/dialogflow/v2beta1/context.proto for package 'google.cloud.dialogflow.v2beta1'
# Original file comments:
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'grpc'
require 'google/cloud/dialogflow/v2beta1/context_pb'
module Google
module Cloud
module Dialogflow
module V2beta1
module Contexts
# Service for managing [Contexts][google.cloud.dialogflow.v2beta1.Context].
class Service
include ::GRPC::GenericService
self.marshal_class_method = :encode
self.unmarshal_class_method = :decode
self.service_name = 'google.cloud.dialogflow.v2beta1.Contexts'
# Returns the list of all contexts in the specified session.
rpc :ListContexts, ::Google::Cloud::Dialogflow::V2beta1::ListContextsRequest, ::Google::Cloud::Dialogflow::V2beta1::ListContextsResponse
# Retrieves the specified context.
rpc :GetContext, ::Google::Cloud::Dialogflow::V2beta1::GetContextRequest, ::Google::Cloud::Dialogflow::V2beta1::Context
# Creates a context.
#
# If the specified context already exists, overrides the context.
rpc :CreateContext, ::Google::Cloud::Dialogflow::V2beta1::CreateContextRequest, ::Google::Cloud::Dialogflow::V2beta1::Context
# Updates the specified context.
rpc :UpdateContext, ::Google::Cloud::Dialogflow::V2beta1::UpdateContextRequest, ::Google::Cloud::Dialogflow::V2beta1::Context
# Deletes the specified context.
rpc :DeleteContext, ::Google::Cloud::Dialogflow::V2beta1::DeleteContextRequest, ::Google::Protobuf::Empty
# Deletes all active contexts in the specified session.
rpc :DeleteAllContexts, ::Google::Cloud::Dialogflow::V2beta1::DeleteAllContextsRequest, ::Google::Protobuf::Empty
end
Stub = Service.rpc_stub_class
end
end
end
end
end
|
<reponame>stalynbados/argon_lp2
export interface Curso {
id: number;
curso_nombre: string;
curso_descripcion: string;
curso_estado: number;
}
|
<gh_stars>0
import tempfile
import uuid
from dagster import build_sensor_context, validate_run_config
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus
from hacker_news.pipelines.dbt_pipeline import dbt_pipeline
from hacker_news.pipelines.download_pipeline import DEFAULT_PARTITION_RESOURCE_CONFIG
from hacker_news.sensors.download_pipeline_finished_sensor import dbt_on_hn_download_finished
def test_no_runs():
run_requests = list(
dbt_on_hn_download_finished(build_sensor_context(instance=DagsterInstance.local_temp()))
)
assert len(run_requests) == 0
def test_no_runs_for_different_pipeline():
with tempfile.TemporaryDirectory() as temp_dir:
instance = DagsterInstance.local_temp(temp_dir)
instance.add_run(
PipelineRun(status=PipelineRunStatus.SUCCESS, mode="prod", pipeline_name="xyz")
)
run_requests = list(dbt_on_hn_download_finished(build_sensor_context(instance=instance)))
assert len(run_requests) == 0
def test_no_runs_for_different_mode():
with tempfile.TemporaryDirectory() as temp_dir:
instance = DagsterInstance.local_temp(temp_dir)
instance.add_run(
PipelineRun(
status=PipelineRunStatus.SUCCESS, mode="xyz", pipeline_name="download_pipeline"
)
)
run_requests = list(dbt_on_hn_download_finished(build_sensor_context(instance=instance)))
assert len(run_requests) == 0
def test_no_runs_for_failed_run():
with tempfile.TemporaryDirectory() as temp_dir:
instance = DagsterInstance.local_temp(temp_dir)
instance.add_run(
PipelineRun(
status=PipelineRunStatus.FAILURE,
mode="prod",
pipeline_name="download_pipeline",
run_config={"resources": DEFAULT_PARTITION_RESOURCE_CONFIG},
)
)
run_requests = list(dbt_on_hn_download_finished(build_sensor_context(instance=instance)))
assert len(run_requests) == 0
def test_no_runs_for_invalid_config():
with tempfile.TemporaryDirectory() as temp_dir:
instance = DagsterInstance.local_temp(temp_dir)
instance.add_run(
PipelineRun(
status=PipelineRunStatus.FAILURE,
mode="prod",
pipeline_name="download_pipeline",
run_config={"I'm some config": {"that is not": "valid"}},
)
)
run_requests = list(dbt_on_hn_download_finished(build_sensor_context(instance=instance)))
assert len(run_requests) == 0
def test_multiple_runs_for_successful_runs():
def get_should_launch_run():
return PipelineRun(
run_id=str(uuid.uuid4()),
status=PipelineRunStatus.SUCCESS,
mode="prod",
pipeline_name="download_pipeline",
run_config={"resources": DEFAULT_PARTITION_RESOURCE_CONFIG},
)
with tempfile.TemporaryDirectory() as temp_dir:
instance = DagsterInstance.local_temp(temp_dir)
for run in [
get_should_launch_run(),
PipelineRun(
status=PipelineRunStatus.FAILURE, mode="prod", pipeline_name="download_pipeline"
),
PipelineRun(
status=PipelineRunStatus.SUCCESS, mode="dev", pipeline_name="weird_pipeline"
),
PipelineRun(
status=PipelineRunStatus.SUCCESS, mode="test", pipeline_name="download_pipeline"
),
PipelineRun(status=PipelineRunStatus.SUCCESS, mode="prod", pipeline_name="other"),
get_should_launch_run(),
get_should_launch_run(),
get_should_launch_run(),
]:
instance.add_run(run)
run_requests = list(dbt_on_hn_download_finished(build_sensor_context(instance=instance)))
assert len(run_requests) == 4
for run_request in run_requests:
assert validate_run_config(dbt_pipeline, run_request.run_config)
|
<gh_stars>1-10
import { HardhatArguments, HardhatParamDefinitions, TaskArguments, TaskDefinition } from "../../types";
export declare class ArgumentsParser {
static readonly PARAM_PREFIX = "--";
static paramNameToCLA(paramName: string): string;
static cLAToParamName(cLA: string): string;
parseHardhatArguments(hardhatParamDefinitions: HardhatParamDefinitions, envVariableArguments: HardhatArguments, rawCLAs: string[]): {
hardhatArguments: HardhatArguments;
taskName?: string;
unparsedCLAs: string[];
};
parseTaskArguments(taskDefinition: TaskDefinition, rawCLAs: string[]): TaskArguments;
private _parseTaskParamArguments;
private _addHardhatDefaultArguments;
private _addTaskDefaultArguments;
private _isCLAParamName;
private _hasCLAParamNameFormat;
private _parseArgumentAt;
private _parsePositionalParamArgs;
}
//# sourceMappingURL=ArgumentsParser.d.ts.map
|
<reponame>TSchlosser13/UOCTE<filename>src/converter.cpp
#include "converter.hpp"
namespace Converter
{
int load(const QString &path, oct_subject** mySubject)
{
if (path == "")
return -1;
else
{
try
{
*mySubject = new oct_subject(path.toLocal8Bit().data());
if ((*mySubject)->scans.empty())
qDebug() << "No scans in this file." << endl;
}
catch (std::exception &e)
{
qDebug() << "Error: " << e.what() << endl;
return -1;
}
}
return 0;
}
int save(const QString &path, oct_subject** mySubject, bool anonymize)
{
try
{
//uoctml
if (path != "" && *mySubject != NULL)
save_uoctml(path.toLocal8Bit().data(), **mySubject, anonymize);
else
return -1;
}
catch (std::exception &e)
{
qDebug() << "Error: " << e.what() << endl;
return -1;
}
return 0;
}
void toUoctml(QStringList &inputPaths, QProgressDialog &progress, QPlainTextEdit &output, bool anonymized)
{
progress.setValue(0);
oct_subject* mySubject = NULL;
unsigned int numLoadable = 0, numSuccess = 0, numTested = 0;
//create folder "anonymizedData" if it does not exist yet
//output.appendPlainText("Creating Folder");
QDir saveFolder(QCoreApplication::applicationDirPath()
.append(QDir::separator())
.append("anonymizedData"));
saveFolder.mkdir(".");
//create XML file for the timeline Visualization
//output.appendPlainText("Loading XML file");
xmlPatientList patientList("patient_list.xml");
//Convert all given OCT data into uoctml format
int i = 0;
foreach (QString inputPath, inputPaths)
{
numTested++;
output.appendPlainText(QString("Loading %1 ... ").arg(QFileInfo(inputPath).fileName()));
output.moveCursor(QTextCursor::End);
//Check for existing file
QString newFileName = QFileInfo(inputPath).completeBaseName().append(".uoctml");
if (saveFolder.exists(newFileName)) {
output.insertPlainText("exists already");
output.moveCursor (QTextCursor::End);
}
else
{
if ( load(inputPath, &mySubject) == -1) {
output.insertPlainText("No OCT scans found in this file.");
output.moveCursor (QTextCursor::End);
}
else
{
output.insertPlainText("done");
numLoadable++;
//extract Info
xmlPatientList::patient_scan patientInfo;
patientInfo.id = mySubject->info.at("ID");
if (anonymized == false) {
if (mySubject->info.find("name") != mySubject->info.end())
patientInfo.name = mySubject->info.at("name");
if (mySubject->info.find("birth date") != mySubject->info.end())
patientInfo.birth = mySubject->info.at("birth date");
if (mySubject->info.find("sex") != mySubject->info.end())
patientInfo.sex = mySubject->info.at("sex");
}
for (auto scan = mySubject->scans.begin(); scan != mySubject->scans.end(); ++scan)
{
//fixation might not be set, but if it is, it has to be "macula"
if (scan->second.info["fixation"] == "" || scan->second.info["fixation"] == "macula")
{
if (scan->second.info.find("scan date") != scan->second.info.end())
patientInfo.scan_date = scan->second.info.at("scan date");
if (scan->second.info.find("laterality") != scan->second.info.end())
patientInfo.laterality = scan->second.info.at("laterality");
calculateSectorValues(scan->second, patientInfo.sectorValues, patientInfo.totalVolume);
for (int i = 0; i < 9; ++i)
patientInfo.sectorValues[i] *= 1000;
patientInfo.contourValues.clear();
calculateContourValues(scan->second, patientInfo.contourValues);
for (auto contour = patientInfo.contourValues.begin(); contour != patientInfo.contourValues.end(); ++contour)
for (int i = 0; i < 9; ++i)
(*contour)[i] *= 1000;
patientList.addEntry(patientInfo);
}
}
}
output.appendPlainText("Converting to uoctml... ");
output.moveCursor(QTextCursor::End);
if ( save(saveFolder.absoluteFilePath(newFileName), &mySubject, anonymized) == -1) {
output.insertPlainText("failed");
output.moveCursor (QTextCursor::End);
}
else {
output.insertPlainText("done");
output.moveCursor (QTextCursor::End);
numSuccess++;
}
delete mySubject;
mySubject = NULL;
}
progress.setValue(++i);
if (progress.wasCanceled()) {
output.appendPlainText(QString("\nAborting...\n%1 files aborted").arg(inputPaths.size() - numTested));
break;
}
}
patientList.save();
output.appendPlainText(QString("\n%1 of %2 loadable files have been converted").arg(numSuccess).arg(numLoadable));
output.appendPlainText("This Application can now be closed.");
}
void calculateSectorValues(const oct_scan &m_scan, std::vector<double> §orValues, double &totalVolume, int contour_1, int contour_2)
{
//find 2 contours of oct data to calculate their difference
int test = m_scan.contours.size();
if ((contour_1 >= test) || (contour_2 >= test)) {
qDebug() << "Not enough contours in the scan";
return;
}
std::map<std::string, image<float>>::const_iterator m_base;
std::map<std::string, image<float>>::const_iterator m_other;
//default
if (contour_1 < 0 && contour_2 < 0) {
m_base = m_scan.contours.begin();
m_other = m_scan.contours.end();
m_other--; //use last contour
//ignore NaN as last contour (found in E2E files)
//use center of image as example for validation
float test = m_other->second(m_other->second.width()/2,m_other->second.height()/2);
if (test != test)
m_other--;
if (m_base == m_other) {
qDebug() << "No Contour available";
return;
}
}
else if (contour_1 >= contour_2) {
qDebug() << "Contour 2 must be greater than Contour 1";
return;
}
else {
m_base = m_scan.contours.begin();
for (int i = 0; i < contour_1; ++i)
m_base++;
m_other = m_scan.contours.begin();
for (int i = 0; i < contour_2; ++i)
m_other++;
}
const image<float> &o1 = m_base->second;
const image<float> &o2 = m_other->second;
size_t X = o1.width();
size_t Y = o1.height();
if (X != o2.width() || Y != o2.height())
return;
//sector values
double c = 0, no = 0, nr = 0, nl = 0, nu = 0, fo = 0, fr = 0, fl = 0, fu = 0, t = 0, v = 0;
double m_aspect = m_scan.size[0] / m_scan.size[2];
double m_width_in_mm = m_scan.size[0];
std::unique_ptr<uint8_t []> depth(new uint8_t [X*Y]);
size_t nc = 0, nno = 0, nnr = 0, nnl = 0, nnu = 0, nfo = 0, nfr = 0, nfl = 0, nfu = 0, n = 0;
for (size_t y = 0; y != Y; ++y)
{
for (size_t x = 0; x != X; ++x)
{
double d = std::abs(m_scan.size[1] / m_scan.tomogram.height() * (o2(x, y) - o1(x, y))); // thickness in mm
double mx = m_scan.size[0] * (2 * x + 1.0 - X) / 2 / X; // x-distance in mm
double my = m_scan.size[2] * (2 * y + 1.0 - Y) / 2 / Y; // y-distance in mm
double r2 = mx*mx + my*my; // square of distance
if (d != d) // skip NaNs
{
depth[y*X+x] = 255;
continue;
}
// total volume
if (r2 <= 9.0)
{
t += d;
++n;
}
depth[y*X+x] = std::min(255.0 / 0.5 * d, 255.0);
// sector thickness
if (r2 <= 0.25)
{
c += d;
++nc;
}
else if (r2 <= 2.25)
{
if (mx <= my)
{
if (mx + my >= 0)
{
nu += d;
++nnu;
}
else
{
nl += d;
++nnl;
}
}
else
{
if (mx + my >= 0)
{
nr += d;
++nnr;
}
else
{
no += d;
++nno;
}
}
}
else if (r2 <= 9.0)
{
if (mx <= my)
{
if (mx + my >= 0)
{
fu += d;
++nfu;
}
else
{
fl += d;
++nfl;
}
}
else
{
if (mx + my >= 0)
{
fr += d;
++nfr;
}
else
{
fo += d;
++nfo;
}
}
}
}
}
// average
c /= nc;
no /= nno;
nr /= nnr;
nl /= nnl;
nu /= nnu;
fo /= nfo;
fr /= nfr;
fl /= nfl;
fu /= nfu;
v = t * m_scan.size[0] * m_scan.size[2] / X / Y;
t /= n;
sectorValues = std::vector<double>({c,no,nr,nl,nu,fo,fr,fl,fu});
totalVolume = v;
}
void calculateContourValues(const oct_scan &m_scan, std::vector<std::vector<double> > &contourValues)
{
std::map<std::string, image<float>>::const_iterator contour;
for (auto contour = m_scan.contours.begin(); contour != m_scan.contours.end(); ++contour)
{
//ignore NaN
float test = contour->second(contour->second.width()/2,contour->second.height()/2);
if (test != test)
continue;
const image<float> &o1 = contour->second;
size_t X = o1.width();
size_t Y = o1.height();
double c = 0, no = 0, nr = 0, nl = 0, nu = 0, fo = 0, fr = 0, fl = 0, fu = 0, t = 0, v = 0;
double m_aspect = m_scan.size[0] / m_scan.size[2];
double m_width_in_mm = m_scan.size[0];
std::unique_ptr<uint8_t []> depth(new uint8_t [X*Y]);
size_t nc = 0, nno = 0, nnr = 0, nnl = 0, nnu = 0, nfo = 0, nfr = 0, nfl = 0, nfu = 0, n = 0;
for (size_t y = 0; y != Y; ++y)
{
for (size_t x = 0; x != X; ++x)
{
double d = std::abs(m_scan.size[1] / m_scan.tomogram.height() * (o1(x, y))); // thickness in mm
double mx = m_scan.size[0] * (2 * x + 1.0 - X) / 2 / X; // x-distance in mm
double my = m_scan.size[2] * (2 * y + 1.0 - Y) / 2 / Y; // y-distance in mm
double r2 = mx*mx + my*my; // square of distance
if (d != d) // skip NaNs
{
depth[y*X+x] = 255;
continue;
}
// total volume
if (r2 <= 9.0)
{
t += d;
++n;
}
depth[y*X+x] = std::min(255.0 / 0.5 * d, 255.0);
// sector thickness
if (r2 <= 0.25)
{
c += d;
++nc;
}
else if (r2 <= 2.25)
{
if (mx <= my)
{
if (mx + my >= 0)
{
nu += d;
++nnu;
}
else
{
nl += d;
++nnl;
}
}
else
{
if (mx + my >= 0)
{
nr += d;
++nnr;
}
else
{
no += d;
++nno;
}
}
}
else if (r2 <= 9.0)
{
if (mx <= my)
{
if (mx + my >= 0)
{
fu += d;
++nfu;
}
else
{
fl += d;
++nfl;
}
}
else
{
if (mx + my >= 0)
{
fr += d;
++nfr;
}
else
{
fo += d;
++nfo;
}
}
}
}
}
// average
c /= nc;
no /= nno;
nr /= nnr;
nl /= nnl;
nu /= nnu;
fo /= nfo;
fr /= nfr;
fl /= nfl;
fu /= nfu;
v = t * m_scan.size[0] * m_scan.size[2] / X / Y;
t /= n;
contourValues.push_back(std::vector<double>({c,no,nr,nl,nu,fo,fr,fl,fu}));
}
}
}
|
echo "-------------------------------Wine Quality Red AdaBoost----------------------------------" >> AdaBoost_wine_red
for iter in 10 20 40
do
for confidence in 0.125 0.25 0.5
do
echo "------------------------------------------------------------------------------------------------" >> AdaBoost_wine_red
echo "confidence:" $confidence ", iter: " $iter >> AdaBoost_wine_red
java -classpath weka.jar weka.classifiers.meta.AdaBoostM1 \
-t ./dataset/winequality-red_training_70.arff \
-P 100 \
-S 1 \
-I $iter \
-W weka.classifiers.trees.J48 \
-- -C $confidence \
-M 2 \
>> AdaBoost_wine_red
done
echo "------------------------------------------------------------------------------------------------" >> AdaBoost_wine_red
echo "confidence: unpruned, iter: " $iter >> AdaBoost_wine_red
java -classpath weka.jar weka.classifiers.meta.AdaBoostM1 \
-t ./dataset/winequality-red_training_70.arff \
-P 100 \
-S 1 \
-I $iter \
-W weka.classifiers.trees.J48 \
-- -U \
-M 2 \
>> AdaBoost_wine_red
done
echo "-------------------------------Wine Quality White AdaBoost----------------------------------" >> AdaBoost_wine_white
for iter in 10 20 40
do
for confidence in 0.125 0.25 0.5
do
echo "------------------------------------------------------------------------------------------------" >> AdaBoost_wine_white
echo "confidence:" $confidence ", iter: " $iter >> AdaBoost_wine_white
java -classpath weka.jar weka.classifiers.meta.AdaBoostM1 \
-t ./dataset/winequality-white_training_70.arff \
-P 100 \
-S 1 \
-I $iter \
-W weka.classifiers.trees.J48 \
-- -C $confidence \
-M 2 \
>> AdaBoost_wine_white
done
echo "------------------------------------------------------------------------------------------------" >> AdaBoost_wine_white
echo "confidence: unpruned, iter: " $iter >> AdaBoost_wine_white
java -classpath weka.jar weka.classifiers.meta.AdaBoostM1 \
-t ./dataset/winequality-white_training_70.arff \
-P 100 \
-S 1 \
-I $iter \
-W weka.classifiers.trees.J48 \
-- -U \
-M 2 \
>> AdaBoost_wine_white
done
echo "-------------------------------MUSK AdaBoost----------------------------------" >> AdaBoost_musk
for iter in 10 20 40
do
for confidence in 0.125 0.25 0.5
do
echo "------------------------------------------------------------------------------------------------" >> AdaBoost_musk
echo "confidence:" $confidence ", iter: " $iter >> AdaBoost_musk
java -classpath weka.jar weka.classifiers.meta.AdaBoostM1 \
-t ./dataset/clean2_training_70.arff \
-P 100 \
-S 1 \
-I $iter \
-W weka.classifiers.trees.J48 \
-- -C $confidence \
-M 2 \
>> AdaBoost_musk
done
echo "------------------------------------------------------------------------------------------------" >> AdaBoost_musk
echo "confidence: unpruned, iter: " $iter >> AdaBoost_musk
java -classpath weka.jar weka.classifiers.meta.AdaBoostM1 \
-t ./dataset/clean2_training_70.arff \
-P 100 \
-S 1 \
-I $iter \
-W weka.classifiers.trees.J48 \
-- -U \
-M 2 \
>> AdaBoost_musk
done
|
#!/usr/bin/env python3
'''
This scripts checks the metadata.json against a set of rules for allowed
values. This allows degradation in results to be flagged as an error
in the build.
The rules file has the form
{
"rules": [
{
"field" : "<name>",
"value" : <numeric_value>,
"compare": "<operator>"
}, ...
]
}
field is the name of a field in the metadata file
value is the reference value to compare to
operator can be one of "<", ">", "<=", ">=", "==", "!=".
The value is converted to a float for comparison if possible
'''
from os.path import isfile
import argparse
import json
import operator
import sys
parser = argparse.ArgumentParser(
description='Checks metadata from OpenROAD flow against a set of rules')
parser.add_argument('--metadata', '-m', required=True,
help='The metadata file')
parser.add_argument('--rules', '-r', required=True, nargs='+',
help='The rules file')
args = parser.parse_args()
with open(args.metadata) as metadataFile:
metadata = json.load(metadataFile)
rules = dict()
for filePath in args.rules:
if isfile(filePath):
with open(filePath) as rulesFile:
rules.update(json.load(rulesFile))
else:
print(f"[WARN] File {filePath} not found")
if len(rules) == 0:
print('No rules')
sys.exit(1)
def try_number(string):
'''
Convert to a float if possible
'''
try:
return float(string)
except ValueError:
return string
ops = {
'<': operator.lt,
'>': operator.gt,
'<=': operator.le,
'>=': operator.ge,
'==': operator.eq,
'!=': operator.ne,
}
ERRORS = 0
for field, rule in rules.items():
compare = rule['compare']
op = ops[compare]
rule_value = try_number(rule['value'])
build_value = try_number(metadata[field])
formatError = list()
if not isinstance(rule_value, float):
formatError.append('rule_value')
if not isinstance(build_value, float):
formatError.append('build_value')
if len(formatError) != 0:
print(f"Error: field {field}, has invalid float format for "
f"{', '.join(formatError)}")
ERRORS += 1
continue
if op(build_value, rule_value):
PRE = '[INFO]'
CHECK = 'pass'
else:
PRE = '[ERROR]'
CHECK = 'fail'
ERRORS += 1
print(PRE, field, CHECK, 'test:', build_value, compare, rule_value)
if ERRORS == 0:
print(f"All metadata rules passed ({len(rules)} rules)")
else:
print(f"Failed metadata checks: {ERRORS} out of {len(rules)}")
sys.exit(1 if ERRORS else 0)
|
#!/usr/bin/env bash
# MIT License
# Copyright (c) 2021 Martín Montes
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
set -o errexit
set -o nounset
set -o pipefail
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
HACK_PKG=${HACK_PKG:-$(
cd "${SCRIPT_ROOT}"
ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator
)}
GO_PKG="github.com/eumel8/echoperator/pkg"
bash "${HACK_PKG}"/generate-groups.sh "all" \
${GO_PKG}/rds/v1alpha1/apis \
${GO_PKG} \
rds:v1alpha1 \
--go-header-file "${SCRIPT_ROOT}"/hack/boilerplate.go.txt
|
def list_primes(start, end):
primes = []
for num in range(start, end + 1):
if num > 1:
for i in range(2, num):
if (num % i) == 0:
break
else:
primes.append(num)
return primes
|
import random
def randomize(arr, n):
# Start from the last element and
# swap one by one. We don't need to
# run for the first element
# that's why i > 0
for i in range(n-1,0,-1):
# Pick a random index
j = random.randint(0,i+1)
# Swap arr[i] with the element
# at random index
arr[i],arr[j] = arr[j],arr[i]
return arr
|
const addLoggingToDispatch = (dispatch, getState) => {
const rawDispatch = dispatch;
if (!console.group) {
return rawDispatch
}
return (action) => {
console.group(action.type)
console.log('%c prev state', 'color:gray', getState())
console.log('%c action', 'color:blue', action)
const returnValue = rawDispatch(action)
console.log('%c next state', 'color:gray', getState())
console.groupEnd(action.type)
return returnValue
}
}
export default addLoggingToDispatch;
|
<filename>src/tree/expressions/TPostfixExpressionA.java
package tree.expressions;
import tree.symbols.TSBracketLeft;
import tree.symbols.TSBracketRight;
public class TPostfixExpressionA extends TPostfixExpression {
public TPostfixExpressionA(TPostfixExpressionA node) {
super(node);
}
public TPostfixExpression getPExpression() {
return (TPostfixExpression)getChild(0);
}
public TExpression getIndex() {
return (TExpression)getChild(1);
}
public TPostfixExpressionA(TPostfixExpression pexpr, TSBracketLeft bra_left, TExpression expr, TSBracketRight bra_right) {
addChild(pexpr);
addChild(bra_left);
addChild(expr);
addChild(bra_right);
}
}
|
<filename>src/main.js<gh_stars>0
import Vue from 'vue'
import App from './App.vue'
import router from './router'
import store from './store'
import BootstrapVue from 'bootstrap-vue'
import 'bootstrap/dist/css/bootstrap.css'
import 'bootstrap-vue/dist/bootstrap-vue.css'
import { library } from '@fortawesome/fontawesome-svg-core'
import {
faUser,
faExclamationTriangle,
faEdit,
faPlayCircle,
faAngleDoubleRight,
faUndo,
faThumbsUp,
faSave,
faBars
} from '@fortawesome/free-solid-svg-icons'
import { faFontAwesome } from '@fortawesome/free-brands-svg-icons'
import { FontAwesomeIcon } from '@fortawesome/vue-fontawesome'
Vue.use(BootstrapVue)
library.add(
faUser,
faExclamationTriangle,
faEdit,
faPlayCircle,
faAngleDoubleRight,
faUndo,
faThumbsUp,
faSave,
faBars
)
library.add(faFontAwesome)
Vue.component('font-awesome-icon', FontAwesomeIcon)
Vue.config.productionTip = false
Vue.config.productionTip = false
new Vue({
router,
store,
render: h => h(App)
}).$mount('#app')
|
<gh_stars>10-100
/*------------------------------------------------------------------
* ossl_srv.h - Entry point definitions into the OpenSSL
* interface for EST server operations.
*
* November, 2012
*
* Copyright (c) 2012 by cisco Systems, Inc.
* All rights reserved.
*------------------------------------------------------------------
*/
#ifndef HEADER_OSSL_SRV_H
#define HEADER_OSSL_SRV_H
BIO * ossl_simple_enroll(unsigned char *p10buf, int p10len, char *configfile);
#endif
|
#! /bin/bash
set -ex
if [[ "$target_platform" == "osx-arm64" ]]; then
# Remove x86 specific flags. Upstream assumes Darwin is x86
sed -i.bak 's/-mfpmath=sse -msse2//g' src/CMakeLists.txt
fi
mkdir build
cd build
cmake ${CMAKE_ARGS} \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_COLOR_MAKEFILE=OFF \
-DCMAKE_INSTALL_PREFIX="$PREFIX" \
..
make -j$CPU_COUNT VERBOSE=1
# make test -- these do not pass
make install
cd $PREFIX
rm -f lib/libgraphite2.la bin/gr2fonttest
|
<filename>app/views/settings/settings-page.ts<gh_stars>0
import { EventData, Page, isIOS, PercentLength } from "tns-core-modules/ui/page/page";
import ViewModel from "./settings-page-vm";
export function onLoaded(args: EventData) {
let page = <Page>args.object;
if (isIOS) {
const view = page.getViewById("main-view");
view.style.marginBottom = PercentLength.parse("39");
}
page.bindingContext = ViewModel;
}
|
#!/usr/bin/env bash
set -e # Exit when any command fails.
export REPO_PATH="$(pwd)/.."
export BLUEPRINT_PATH="$(pwd)"
export PYTHONPATH="$BLUEPRINT_PATH/py"
export MYPYPATH="$PYTHONPATH"
echo "Checking mypy"
./mypy.sh
echo "Ok mypy"
echo "Checking unit tests"
python3 -m unittest unit_tests/*.py
echo "Ok unit tests"
echo "Checking integration tests"
python3 -m unittest integration_tests/*.py
echo "Ok integration tests"
echo "ALL TESTS PASSED"
|
#!/usr/bin/env sh
# 当发生错误时中止脚本
set -e
# 构建
npm run build
# cd 到构建输出的目录下
cd dist
# 部署到自定义域域名
# echo 'www.example.com' > CNAME
git init
git add -A
git commit -m 'deploy'
# 部署到 https://<USERNAME>.github.io
# git push -f git@github.com:<USERNAME>/<USERNAME>.github.io.git master
# 部署到 https://<USERNAME>.github.io/<REPO>
git push -f https://github.com/wwwwwwwow/VueTube-practice.git master:gh-pages
cd -
|
#!/bin/sh
. ~/.dzen-popup-config.sh
for pid in `pgrep -f -x "/bin/sh /home/jln/.bin/dzen-kill-spawner.sh"`
do
kill "$pid"
done
SCREEN_WIDTH=$(sres -W)
CAL_START=0
CAL_END=$((SCREEN_WIDTH/2))
#music_text=$(mpc current)
#music_width=$(txtw -f $PANEL_FONT -s $PANEL_FONT_SIZE "$music_text")
MUSIC_START=$((CAL_START-140-music_width))
MUSIC_START=$((SCREEN_WIDTH/2))
MUSIC_END=$SCREEN_WIDTH
while true; do
mousepos=$(xdotool getmouselocation 2> /dev/null | tail -1 2> /dev/null)
YPOS=$(echo $mousepos | awk '{print $2}' | cut -d ":" -f 2 2> /dev/null)
if [ $YPOS -gt $BAR_HEIGHT ]; then
break
fi
XPOS=$(echo $mousepos | awk '{print $1}' | cut -d ":" -f 2)
if [ $XPOS -gt $CAL_START -a $XPOS -lt $CAL_END ]; then
pid=$(pgrep -f "dzen-popup-cal")
if [ -z "$pid" ]; then
/home/jln/.bin/dzen-kill-popup.sh
/home/jln/.bin/dzen-cal.sh 2> /dev/null &
fi
elif [ $XPOS -gt $MUSIC_START -a $XPOS -lt $MUSIC_END ]; then
pid=$(pgrep -f "dzen-popup-music")
if [ -z "$pid" ]; then
/home/jln/.bin/dzen-kill-popup.sh
/home/jln/.bin/dzen-music.sh 2> /dev/null &
fi
else
/home/jln/.bin/dzen-kill-popup.sh
fi
sleep 0.4
done
|
import { Component, Input, OnInit } from '@angular/core';
import { DbServiceService } from '../db-service.service';
import { ElectronService } from 'ngx-electron';
@Component({
selector: 'tree-branch',
templateUrl: './tree-branch.component.html',
styleUrls: ['./tree-branch.component.css']
})
export class TreeBranchComponent implements OnInit {
constructor(
private conn: DbServiceService,
private _electronService: ElectronService
) { }
@Input() tree: any = null;
@Input() level: number = 0;
@Input() selectElement = (className) => {};
@Input() getSelectedClass = () => {};
newSubclassSuper = '';
fakeEditableChild = false;
newSubclass(superClass) {
//console.log(`TreeBranch newSubclass('${superClass}')`);
this.newSubclassSuper = superClass;
this.fakeEditableChild = true;
}
removeClass(name) {
//console.log(`TreeBranch removeClass('${name}')`)
let superClass = this.conn.classesMap[name].superClass;
const choice = this._electronService.remote.dialog.showMessageBoxSync(this._electronService.remote.getCurrentWindow(), {
type: 'question',
buttons: ['No', 'Yes'],
title: 'Удалить класс',
message: `Вы уверены, что хотите безвозвратно удалить класс ${name}?
Все подклассы ${name} будут также удалены, а все экземпляры ${name} и его подклассов
будут переведены в класс ${superClass}.`
});
if (choice === 1) {
let willBeRemoved = this.conn.getClassWithDescendants(name).map(c => c.name)
let selectedClass = this.getSelectedClass()
if (willBeRemoved.includes(selectedClass)) this.selectElement(this.conn.classesMap[name].superClass)
this.conn.removeClass(name);
}
}
hideFakeField() {
this.fakeEditableChild = false;
this.newSubclassSuper = '';
}
isClassNameInvalid(newClass) {
newClass = this.trim(newClass)
return (newClass == '' || this.conn.classesMap[newClass])
}
trim(string) {
return string.trim().replace(/\s\s+/g, ' ')
}
cutForbidden(string) {
string = string.replace(/[\n\r\t\0]/g, ' ')
return string
}
checkClassName(event) {
var element = event.target || event.srcElement || event.currentTarget;
if (element.value.includes('\n') || element.value.includes('\r')) {
element.value = this.trim(this.cutForbidden(element.value))
//element.dispatchEvent(new Event('change', { 'bubbles': true }))
element.blur()
//console.log('dispatched event')
return;
}
element.value = this.cutForbidden(element.value)
//console.log(oldValue, element.value, oldValue != element.value)
let value = this.trim(element.value)
if (this.isClassNameInvalid(value) && element.className.indexOf('invalid_input') < 0) {
element.className += ' invalid_input';
} else {
element.className = element.className.replace(/\s*invalid_input/g, '');
}
//console.log('parent css class =', parent.className, typeof parent.className, parent)
}
setNewClassName(event) {
//console.log(`setNewClassName `, event)
if (!this.isClassNameInvalid(event.target.value)) {
this.conn.createClass(event.target.value, this.newSubclassSuper, {})
this.fakeEditableChild = false;
this.newSubclassSuper = '';
this.selectElement(event.target.value)
var element = event.target || event.srcElement || event.currentTarget;
let parent = element.parentElement.parentElement;
parent.className = parent.className.replace(/\s*invalid_input/g, '');
}
}
ngOnInit(): void {
this.level = this.level*1
//console.log('tree margin:' + ((this.level + 1) * 20) + 'px')
}
}
|
<filename>main.go
//Copyright 2019 <NAME>
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
package main
import (
"fmt"
"os"
"github.com/godbus/dbus"
)
func main() {
// Get player name from command line args
if len(os.Args) < 2 {
fmt.Fprintln(os.Stderr, "Need media player name. Usage: mpris-current [player name]")
os.Exit(1)
}
player := os.Args[1]
// Setup player status template
template := SetupTemplate()
// Connect to dbus
conn, err := dbus.SessionBus()
if err != nil {
panic(err)
}
// Make new player status based on dbus object
obj := conn.Object("org.mpris.MediaPlayer2."+player, "/org/mpris/MediaPlayer2")
status := NewPlayerStatus(obj)
if status == nil {
fmt.Fprintln(os.Stderr, "Can't connect to dbus object", obj.Destination())
fmt.Fprintln(os.Stderr, "Make sure", player, "is running")
os.Exit(1)
}
// Subscibe to PropertiesChanged signal
conn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
"type='signal',path='/org/mpris/MediaPlayer2',interface='org.freedesktop.DBus.Properties',member=PropertiesChanged")
// Make signal
c := make(chan *dbus.Signal, 10)
conn.Signal(c)
fmt.Println("Start monitoring signal for org.mpris.MediaPlayer2." + player)
fmt.Println("Press Ctrl+C to quit...")
// Loop over the channel
for v := range c {
// Get signal's message body
reply := v.Body[1].(map[string]dbus.Variant)
// If Playback Status changed, update the player status
if ps, ok := reply["PlaybackStatus"]; ok {
status.Status = ps.Value().(string)
}
// If songs changed, update the artist and title
if metadata, ok := reply["Metadata"]; ok {
metadata := metadata.Value().(map[string]dbus.Variant)
status.Artist, status.Title = GetArtistAndTitle(metadata)
}
// Open file for player status
f := CreateFile()
defer f.Close()
// Merge player status with template, and write to file
template.Execute(f, status)
}
}
|
import math
def compute_area_of_the_circle(radius):
return math.pi * (radius**2)
|
def flip_box(box):
return [row[::-1] for row in box]
|
#!/bin/bash
pip freeze | grep -v "^-e" | xargs pip uninstall -y
|
<gh_stars>0
"""
Functions to read and write CSV file to / from numpy arrays.
"""
import csv
import numpy as np
comment = "#" # Comment delimeter
def readCSV(file,cols = None,separ=",",headerskip = 0):
"""
Read simple csv file of floats with secified columns if supplied.
:param file: the csv file to be read
:type file: str or file
:param cols: truple of locicals specifiying which columns to be read Default = None (all)
:type cols: list[bool] or None
:param separ: field seperator, (Default = comma)
:param headerskip: number of lines in headed to skip (Default = 0)
:return: two dimensional numpy.array of values.
"""
# Open the file if a name was given as a str.
if isinstance(file,str):
file = open(file,"r",newline='')
# open the reader with specified field separator
reader = csv.reader(file,delimiter=separ)
# Create list and read line one at time
data = []
i = 0 # Nubmber of lines read
for line in reader:
# Skip if in of lenth 0, in header or starts with comment
if len(line) > 0 and i >= headerskip and not line[0].startswith(comment) :
vals = []
if cols == None: # If no cols given so read all
for t in line:
vals.append(float(t))
else:
for t,c in zip(line,cols):
if c:
vals.append(float(t))
data.append(vals) # Add read line to data
i += 1
file.close() # Close the file
# Now transpose and convert to np arrays.
return np.array([*zip(*data)])
def writeCSV(file,data,cols = None):
"""
Write CSV file with data typically supplied as a list or np.arrays.
:param file: file or name of file,
:type file: file or str
:param data: list of numpy.array or two dimensional numpy.array
:type data: list of numpy.array or two dimensional numpy.array.
:param cols: boolean list specifying which colums of data are to be written, defaults to None, which means all cols.
:type cole: truple of booleans or None
:return: The number of lines of data written
"""
# Open the file if a name was given as a str.
if isinstance(file,str):
file = open(file,"w",newline='')
writer = csv.writer(file) # Make writer
# Read through data writing out what is required.
for j in range(0,data[0].size): # Number of elements
s = []
for i in range(0,len(data)): # Number of cole
if cols == None or cols[i] :
s.append("{0:12.5e}".format(data[i][j]))
writer.writerow(s)
file.close()
return data[0].size # Number writer
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script has two purposes:
1. Apply the attributes from a cobra model to an existing Escher
map. For instance, update modified reversibilities.
2. Convert maps made with Escher beta versions to valid jsonschema/1-0-0
maps.
"""
from __future__ import print_function, unicode_literals
usage_string = """
Usage:
./convert_map.py {map path} {model path}
OR
python -m escher.convert_map {map path} {model path}
"""
try:
import cobra.io
import jsonschema
except ImportError:
raise Exception(('The Python packages jsonschema and COBRApy (0.3.0b3 or later) '
'are required for converting maps.'))
import sys
import json
import random
import string
import hashlib
from os.path import basename, join
import logging
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from escher.validate import validate_map, genes_for_gene_reaction_rule
# configure logger
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def main():
"""Main entrypoint for convert_map. Instructions are at the top of this file."""
# get the arguments
try:
in_file = sys.argv[1]
model_path = sys.argv[2]
except IndexError:
print(usage_string)
sys.exit()
# load the cobra model
try:
model = cobra.io.load_json_model(model_path)
except (IOError, ValueError):
try:
model = cobra.io.read_sbml_model(model_path)
except IOError:
raise Exception('Could not load the model: %s' % model_path)
# get the current map
with open(in_file, 'r') as f:
out = json.load(f)
# convert the map
the_map = convert(out, model)
# don't replace the file
out_file = in_file.replace('.json', '_converted.json')
logging.info('Saving validated map to %s' % out_file)
with open(out_file, 'w') as f:
json.dump(the_map, f, allow_nan=False)
# ------------------------------------------------------------------------------
# Functions for manipulating Escher maps as Python objects
# ------------------------------------------------------------------------------
class MissingDefaultAttribute(Exception):
pass
def make_map(header, body):
return [header, body]
def get_header(a_map):
return a_map[0]
def set_header(a_map, value):
a_map[0] = value
def get_body(a_map):
return a_map[1]
def set_body(a_map, value):
a_map[1] = value
def get_nodes(body):
return body['nodes']
def set_nodes(body, value):
body['nodes'] = value
def get_reactions(body):
return body['reactions']
def set_reactions(body, value):
body['reactions'] = value
def is_valid_header(val):
"""Header must have these values."""
return (isinstance(val, dict) and
all(x in val for x in
['schema', 'homepage', 'map_name', 'map_id', 'map_description']))
def is_valid_body(val):
"""Body must be a dictionary."""
return isinstance(val, dict)
def has_header_and_body(val):
"""Check for header and body."""
return (isinstance(val, list) and len(val) == 2 and
is_valid_header(get_header(val)) and is_valid_body(get_body(val)))
# ------------------------------------------------------------------------------
# Functions for fixing nested dictionaries & lists
# ------------------------------------------------------------------------------
def dict_with_required_elements(the_dict, required_attributes, get_default=None,
nullable=[], cast={}):
"""Remove unsupported elements and provide defaults for missing
elements or elements with zero length (e.g. {}, []).
Arguments
---------
the_dict: A dictionary.
required_attributes: The attributes required in the dictionary.
get_default: A function that takes the attribute name and the current
object, and returns a default value. If not function is provided, then
MissingDefaultAttribute is raised when an attribute is not present.
nullable: A list of attributes that can be None.
cast: A dictionary of attributes for keys and functions for values.
"""
if type(the_dict) is not dict:
raise MissingDefaultAttribute('(Bad object)')
def has_zero_length(o):
"""Returns True if o has a length and it is zero."""
try:
return len(o) == 0
except TypeError:
return False
def current_otherwise_default(name):
"""Take the value in the current dict, or else provide a default."""
if name not in the_dict or has_zero_length(the_dict[name]):
if get_default is not None:
default = get_default(name, the_dict)
the_dict[name] = default
else:
raise MissingDefaultAttribute(name)
elif the_dict[name] is None and name not in nullable:
raise MissingDefaultAttribute('%s (is None)' % name)
# casting
try:
new = cast[name](the_dict[name])
except KeyError:
pass
else:
the_dict[name] = new
# map over the dict
not_required = set(the_dict.keys())
for name in required_attributes:
current_otherwise_default(name)
# remember the keys that are not required
try:
not_required.remove(name)
except KeyError:
pass
# remove not required
for key in not_required:
del the_dict[key]
def map_over_dict_with_deletions(the_dict, fix_function):
"""Use this to map over dictionary. The fix function updates values, and returns
None to delete a value.
Returns the dictionary.
Arguments
---------
the_dict: A dictionary to fix.
fix_function: A function that takes the dictionary key and value as
arguments, and that returns None if the key is deleted, otherwise
returns the updated value.
"""
# this will update the dictionary and return a set of deleted ids
deleted_keys = set()
for key, val in list(the_dict.items()):
updated_val = fix_function(key, val)
if updated_val is None:
# add to aggregation set
deleted_keys.add(key)
# delete the value
del the_dict[key]
else:
# set the new value
the_dict[key] = updated_val
return the_dict
def list_of_dicts_with_required_elements(a_list, required_attributes,
get_default=None, nullable=[],
cast={}):
"""For a list (dictionary) of dictionaries with required attributes, check
each one. Returns the new list.
Arguments
---------
a_list: A dictionary with values that are dictionaries.
required_attributes: A list of required attributes for the internal
dictionaries.
get_default: A function that takes the attribute name and the current
object, and returns a default value. If not function is provided, then
MissingDefaultAttribute is raised when an attribute is not present.
"""
def fix_a_value(val):
try:
dict_with_required_elements(val, required_attributes,
nullable=nullable, cast=cast)
return val
except MissingDefaultAttribute:
return None
# fix each element in the list
return [y for y in (fix_a_value(x) for x in a_list)
if y is not None]
def collection_of_dicts_with_required_elements(collection, required_attributes,
get_default=None, nullable=[],
cast={}):
"""For a collection (dictionary) of dictionaries with required
attributes, check each one. Returns the new collection.
Arguments
---------
collection: A dictionary with values that are dictionaries.
required_attributes: A list of required attributes for the internal
dictionaries.
get_default: A function that takes the attribute name and the current
object, and returns a default value. If not function is provided, then
MissingDefaultAttribute is raised when an attribute is not present.
"""
def fix_a_value(val):
try:
dict_with_required_elements(val, required_attributes,
nullable=nullable, cast=cast)
return val
except MissingDefaultAttribute:
return None
# update the dictionary
for k, v in list(collection.items()):
new_value = fix_a_value(v)
if new_value is None:
del collection[k]
else:
collection[k] = new_value
return collection
# ------------------------------------------------------------------------------
# Functions for cleaning up unconnected map elements
# ------------------------------------------------------------------------------
def remove_unconnected_nodes(nodes, reactions, node_ids_deleted=set()):
"""Check for nodes with no connected segments in the reactions object, and
return a new nodes object with only connected nodes.
Arguments
---------
nodes: A collection (dict) of nodes.
reactions: A collection (dict) of reactions.
node_ids_deleted: A set of previously deleted ids to update.
"""
# update
def map_over_connected_node_ids(fn, reactions):
"""Run fn with all node ids that have connected segments."""
for reaction in reactions.values():
for segment in reaction['segments'].values():
fn(segment['from_node_id'])
fn(segment['to_node_id'])
# use a set to keep track of connected nodes
nodes_with_segments = set()
add_node = nodes_with_segments.add
has_node = lambda x: x in nodes_with_segments
# update
map_over_connected_node_ids(add_node, reactions)
# filter the nodes
def check_fn(key, value):
if has_node(key):
return value
else:
logging.debug('No segments for node %s. Deleting' % key)
return None
map_over_dict_with_deletions(nodes, check_fn)
def remove_unconnected_segments(reactions, nodes):
"""Check for segments with no connected nodes.
Arguments
---------
reactions: A collection (dict) of reactions.
nodes: A collection (dict) of nodes.
"""
# use a set to keep track of connected nodes
node_ids = set(nodes.keys())
is_node = lambda x: x in node_ids
# filter the segments
def check_fn(key, value):
if is_node(value['from_node_id']) and is_node(value['to_node_id']):
return value
else:
logging.debug('Missing node for segment %s. Deleting' % key)
return None
for reaction in reactions.values():
map_over_dict_with_deletions(reaction['segments'], check_fn)
def remove_reactions_with_missing_metabolites(reactions, nodes):
"""Check for reactions that do not have all of their metabolites.
Arguments
---------
reactions: A collection (dict) of reactions.
nodes: A collection (dict) of nodes.
"""
# filter the reactions
def check_fn(reaction_id, reaction):
# get the metabolites
metabolite_ids = {x['bigg_id'] for x in reaction['metabolites']}
# look for matching segments
for segment_id, segment in reaction['segments'].items():
# find node
for n in 'from_node_id', 'to_node_id':
node = nodes[segment[n]]
try:
metabolite_ids.remove(node['bigg_id'])
except KeyError:
pass
if len(metabolite_ids) > 0:
logging.info('Deleting reaction %s with missing metabolites %s' % (reaction['bigg_id'],
str(list(metabolite_ids))))
return None
else:
return reaction
map_over_dict_with_deletions(reactions, check_fn)
# ------------------------------------------------------------------------------
# Functions for converting maps
# ------------------------------------------------------------------------------
def old_map_to_new_schema(the_map, map_name=None, map_description=None):
"""Convert any old map to match the latest schema. Returns a new map.
Arguments
---------
the_map: An Escher map loaded as a Python object (e.g. json.load('my_map.json')).
map_name: A name for the map. If a name is already present, this name
overrides it.
map_description: A description for the map. If a name is already present,
this name overrides it.
"""
def add_default_header(body, name, description):
"""Return a map with header and body."""
def check_for(var, name):
"""Print a warning if var is None."""
if var is None:
logging.warn('No {} for map'.format(name))
return ''
return var
new_id = hashlib.md5(json.dumps(body).encode('utf-8')).hexdigest()
default_header = {
"schema": "https://escher.github.io/escher/jsonschema/1-0-0#",
"homepage": "https://escher.github.io",
"map_name": check_for(name, 'name'),
"map_id": new_id,
"map_description": check_for(description, 'description')
}
logging.info('Map has the ID {}'.format(new_id))
return make_map(default_header, body)
def add_header_if_missing(a_map):
"""Check for new, 2-level maps, and add the header."""
if has_header_and_body(a_map):
use_name = (get_header(a_map)['map_name'] if map_name is None else map_name)
use_description = (get_header(a_map)['map_description']
if map_description is None else map_description)
return add_default_header(get_body(a_map), use_name, use_description)
elif is_valid_body(a_map):
return add_default_header(a_map, map_name, map_description)
else:
raise Exception('The map provided cannot be converted. It is not a valid Escher map.')
def fix_body(body):
"""Fill in necessary attributes for the body."""
def get_default(key, _):
"""Get default value for key in body."""
if key == 'canvas':
# default canvas
return {'x': -1440, 'y': -775, 'width': 4320, 'height': 2325}
else:
# other defaults
return {}
def fix_canvas(canvas):
"""Return a canvas with correct attributes."""
canvas_keys = ['x', 'y', 'width', 'height']
cast = {'x': float, 'y': float, 'width': float, 'height': float}
dict_with_required_elements(canvas, canvas_keys, cast=cast)
def fix_text_labels(text_labels):
"""Return a list of text_labels with correct attributes."""
text_label_keys = ['x', 'y', 'text']
cast = {'x': float, 'y': float}
collection_of_dicts_with_required_elements(text_labels,
text_label_keys,
cast=cast)
# fix canvas before body so that a default canvas can be added if the
# canvas is invalid
try:
fix_canvas(body['canvas'])
except KeyError:
pass
except MissingDefaultAttribute:
del body['canvas']
# fix body
core_keys = ['nodes', 'reactions', 'text_labels', 'canvas']
dict_with_required_elements(body, core_keys, get_default)
# fix text labels
fix_text_labels(body['text_labels'])
def fix_nodes(nodes):
"""Fill in necessary attributes for the nodes. Returns nodes."""
def get_default_node_attr(name, obj):
"""Return default values when possible. Otherwise, raise MissingDefaultAttribute."""
if name == 'node_is_primary':
return False
elif name == 'name':
return ''
elif name == 'label_x' and 'x' in obj:
return obj['x']
elif name == 'label_y' and 'y' in obj:
return obj['y']
else:
raise MissingDefaultAttribute(name)
def fix_a_node(node_id, node):
"""Return an updated node, or None if the node is invalid."""
if not 'node_type' in node:
logging.debug('Deleting node %s with no node_type' % node_id)
return None
elif node['node_type'] == 'metabolite':
met_keys = ['node_type', 'x', 'y', 'bigg_id', 'name', 'label_x',
'label_y', 'node_is_primary']
cast = {'x': float, 'y': float, 'label_x': float, 'label_y': float}
try:
dict_with_required_elements(node, met_keys, get_default_node_attr, cast=cast)
return node
except MissingDefaultAttribute as e:
logging.debug('Deleting node %s with missing attribute %s' % (node_id, e))
return None
elif node['node_type'] in ['multimarker', 'midmarker']:
marker_keys = ['node_type', 'x', 'y']
cast = {'x': float, 'y': float}
try:
dict_with_required_elements(node, marker_keys, get_default_node_attr, cast=cast)
return node
except MissingDefaultAttribute as e:
logging.debug('Deleting node %s with missing attribute %s' % (node_id, e))
return None
else:
logging.debug('Deleting node %s with bad node_type %s' % (node_id, node['node_type']))
return None
# run fix functions
map_over_dict_with_deletions(nodes, fix_a_node)
def fix_reactions(reactions):
"""Fill in necessary attributes for the reactions.
Returns reactions.
"""
def get_default_reaction_attr(name, obj):
"""Return default values when possible. Otherwise, raise MissingDefaultAttribute."""
if name in ['name', 'gene_reaction_rule']:
return ''
elif name == 'reversibility':
return True
elif name in ['genes', 'metabolites']:
return []
else:
raise MissingDefaultAttribute(name)
def fix_a_reaction(reaction_id, reaction):
"""Return an updated reaction, or None if the reaction is invalid."""
def fix_segments(segments):
"""Fix dictionary of segments with correct attributes."""
def fix_a_segment(segment_id, segment):
segment_keys = ['from_node_id', 'to_node_id', 'b1', 'b2']
def get_default_segment_attr(key, _):
if key in ['b1', 'b2']:
return None
else:
raise MissingDefaultAttribute(key)
try:
dict_with_required_elements(segment, segment_keys,
get_default_segment_attr,
nullable=['b1', 'b2'])
except MissingDefaultAttribute as e:
logging.debug('Deleting segment %s with missing attribute %s' % (segment_id, e))
return None
# check the beziers too
required_bezier_keys = ['x', 'y']
cast = {'x': float, 'y': float}
for b in ['b1', 'b2']:
try:
dict_with_required_elements(segment[b],
required_bezier_keys,
cast=cast)
except MissingDefaultAttribute as e:
logging.debug('Deleting bezier %s with missing attribute %s in segment %s' % (b, e, segment_id))
segment[b] = None
return segment
map_over_dict_with_deletions(segments, fix_a_segment)
def fix_metabolites(metabolites):
"""Return a list of metabolites with correct attributes."""
metabolite_keys = ['coefficient', 'bigg_id']
cast = {'coefficient': float}
return list_of_dicts_with_required_elements(metabolites,
metabolite_keys,
cast=cast)
def fix_genes(genes):
"""Return a list of genes with correct attributes."""
gene_keys = ['bigg_id', 'name']
def get_default_gene_attr(name, _):
if name == 'name':
return ''
else:
raise MissingDefaultAttribute(name)
return list_of_dicts_with_required_elements(genes, gene_keys,
get_default_gene_attr)
# fix all the attributes
reaction_keys = ['name', 'bigg_id','reversibility', 'label_x',
'label_y', 'gene_reaction_rule', 'genes',
'metabolites', 'segments']
cast = {'label_x': float, 'label_y': float}
try:
dict_with_required_elements(reaction, reaction_keys,
get_default_reaction_attr,
cast=cast)
except MissingDefaultAttribute as e:
logging.debug('Deleting reaction %s with missing attribute %s' % (reaction_id, e))
return None
# fix segments, metabolites, and genes
fix_segments(reaction['segments'])
# must have segments
if len(reaction['segments']) == 0:
logging.debug('Deleting reaction %s with no segments' % reaction_id)
return None
reaction['metabolites'] = fix_metabolites(reaction['metabolites'])
reaction['genes'] = fix_genes(reaction['genes'])
return reaction
# run the fix functions
map_over_dict_with_deletions(reactions, fix_a_reaction)
# make sure there is a body and a head
the_map = add_header_if_missing(the_map)
body = get_body(the_map)
# add missing elements to body
fix_body(body)
# fix the nodes
fix_nodes(get_nodes(body))
# fix the reactions
fix_reactions(get_reactions(body))
# delete any nodes with no segment
remove_unconnected_nodes(get_nodes(body), get_reactions(body))
# delete segments with no nodes
remove_unconnected_segments(get_reactions(body), get_nodes(body))
# delete reactions with missing metabolite segments
remove_reactions_with_missing_metabolites(get_reactions(body), get_nodes(body))
return the_map
def apply_id_mappings(the_map, reaction_id_mapping=None,
metabolite_id_mapping=None, gene_id_mapping=None):
"""Convert bigg_ids in the map using the mappings dictionaries.
Arguments
---------
the_map: The Escher map Python object.
reaction_id_mapping: A dictionary with keys for existing bigg_ids and value for new bigg_ids.
metabolite_id_mapping: A dictionary with keys for existing bigg_ids and value for new bigg_ids.
gene_id_mapping: A dictionary with keys for existing bigg_ids and value for new bigg_ids.
"""
id_key = 'bigg_id'
def check(a_dict, mapping):
"""Try to change the value for id_key to a new value defined in mapping."""
try:
new_id = mapping[a_dict[id_key]]
except KeyError:
pass
else:
a_dict[id_key] = new_id
return a_dict
def apply_mapping_list(a_list, mapping):
"""Use the mapping on each dict in the list. Returns a new list."""
return [check(x, mapping) for x in a_list]
def apply_mapping_dict(collection, mapping):
"""Use the mapping on each dict in the collection (dict). Returns the dictionary."""
for val in collection.values():
check(val, mapping)
return collection
body = get_body(the_map)
# reactions
if reaction_id_mapping is not None:
apply_mapping_dict(get_reactions(body), reaction_id_mapping)
# metabolites
if metabolite_id_mapping is not None:
apply_mapping_dict(get_nodes(body), metabolite_id_mapping)
# genes & metabolites in reactions
for reaction in get_reactions(body).values():
if gene_id_mapping is not None:
reaction['genes'] = apply_mapping_list(reaction['genes'], gene_id_mapping)
if metabolite_id_mapping is not None:
reaction['metabolites'] = apply_mapping_list(reaction['metabolites'], metabolite_id_mapping)
def apply_cobra_model_to_map(the_map, model):
"""Apply the COBRA model attributes (descriptive names, gene reaction rules,
reversibilities) to the map.
Cleans up unconnected segments and nodes after deleting any nodes and
reactions not found in the cobra model.
"""
def apply_model_attributes(a_dict, dict_list, attribute_fns, collection_on='bigg_id'):
"""For each attribute_fn, apply the values from the dict_list to given
dictionary, using the given IDs. Returns the dictionary, or None if a
matching object was not found in the DictList.
"""
try:
on_id = a_dict[collection_on]
except KeyError:
return a_dict
try:
dl_object = dict_list.get_by_id(on_id)
except KeyError:
logging.info('Could not find %s in model. Deleting.' % on_id)
return None
else:
for attribute_fn in attribute_fns:
attribute_fn(a_dict, dl_object)
return a_dict
def apply_model_attributes_dict(collection, dict_list, attribute_fns,
collection_on='bigg_id'):
"""For each attribute_fn, apply the values from the dict_list to the matching
results in the collection, using the given IDs. Returns the collection."""
for key, val in list(collection.items()):
new_val = apply_model_attributes(val, dict_list, attribute_fns,
collection_on)
if new_val is None:
del collection[key]
def apply_model_attributes_list(a_list, dict_list, attribute_fns,
collection_on='bigg_id'):
"""For each attribute_fn, apply the values from the dict_list to the matching
results in the list of dicts, using the given IDs. Returns a new list."""
return [y for y in (apply_model_attributes(x, dict_list, attribute_fns, collection_on) for x in a_list)
if y is not None]
def get_attr_fn(attribute):
"""Make a default attribute setting function."""
def new_attr_fn(val, dl_object):
try:
val[attribute] = getattr(dl_object, attribute)
except AttributeError:
logging.debug('No %s found in DictList %s' % (attribute, on_id))
return new_attr_fn
def set_reversibility(reaction, cobra_reaction):
reaction['reversibility'] = (cobra_reaction.lower_bound < 0 and cobra_reaction.upper_bound > 0)
# reverse metabolites if reaction runs in reverse
rev_mult = (-1 if
(cobra_reaction.lower_bound < 0 and cobra_reaction.upper_bound <= 0)
else 1)
# use metabolites from reaction
reaction['metabolites'] = [{'bigg_id': met.id, 'coefficient': coeff * rev_mult}
for met, coeff in
cobra_reaction.metabolites.items()]
def set_genes(reaction, cobra_reaction):
reaction['genes'] = [{'bigg_id': x.id, 'name': x.name} for x in cobra_reaction.genes]
# vars
body = get_body(the_map)
# compare reactions to model
reaction_attributes = [get_attr_fn('name'), get_attr_fn('gene_reaction_rule'),
set_reversibility, set_genes]
apply_model_attributes_dict(get_reactions(body), model.reactions,
reaction_attributes)
# compare metabolites to model
metabolite_attributes = [get_attr_fn('name')]
apply_model_attributes_dict(get_nodes(body), model.metabolites,
metabolite_attributes)
# delete any nodes with no segment
remove_unconnected_nodes(get_nodes(body), get_reactions(body))
# delete segments with no nodes
remove_unconnected_segments(get_reactions(body), get_nodes(body))
# delete reactions with missing metabolite segments
remove_reactions_with_missing_metabolites(get_reactions(body), get_nodes(body))
def convert(the_map, model, map_name=None, map_description=None,
reaction_id_mapping=None, metabolite_id_mapping=None,
gene_id_mapping=None, debug=False):
"""Convert an Escher map to the latest format using the COBRA model to update
content. Returns a new map.
Arguments
---------
the_map: An Escher map loaded as a Python object (e.g. json.load('my_map.json')).
model: A COBRA model.
map_name: A name for the map. If a name is already present, this name
overrides it.
map_description: A description for the map. If a name is already present,
this name overrides it.
reaction_id_mapping: A dictionary with existing reaction IDs as keys and the
new reaction IDs as values.
metabolite_id_mapping: A dictionary with existing metabolite IDs as keys and the
new metabolite IDs as values.
gene_id_mapping: A dictionary with existing gene IDs as keys and the new
gene IDs as values.
debug: Check the map against the schema at some intermediate steps.
"""
# make sure everything is up-to-date
new_map = old_map_to_new_schema(the_map, map_name=map_name,
map_description=map_description)
if debug:
validate_map(new_map)
# apply the ids mappings
apply_id_mappings(new_map, reaction_id_mapping, metabolite_id_mapping,
gene_id_mapping)
if debug:
validate_map(new_map)
# apply the new model
apply_cobra_model_to_map(new_map, model)
validate_map(new_map)
return new_map
if __name__ == "__main__":
main()
|
"use strict";
const chai = require('chai');
const expect = chai.expect;
const CQL = require('../src/CQL');
describe('CQL', () => {
describe('Select', () => {
beforeEach( () => {
this.select = (list) => CQL.select(list).from('kn', 'tb');
});
describe('.constructor(list)', () => {
it('should not require a list', () => {
let q;
expect(() => {
let q = CQL.select();
}).to.not.throw;
});
it('should understand a selection list', () => {
let list = 'a b c'.split(' ');
let q = this.select(list);
expect(q.toString()).to.include('SELECT ' + list.join(', '));
});
});
describe('#from(keyspaceName,tableName)', () => {
beforeEach( () => {
this.q = CQL.select();
this.ksn = 'ksname';
this.tbn = 'tbname';
});
it('should be an instance method', () => {
expect(this.q).itself.to.respondTo('from');
});
it('should throw if no arguments is provided', () => {
expect(() => {
let q = this.q.from();
}).to.throw(Error)
.that.has.property('type')
.that.equals('ArgumentRequired');
});
it('should return the query object for chaining', () => {
let q = this.q.from(this.ksn, this.tbn);
expect(q).to.equal(this.q);
});
it('should set the keyspace name and table name', () => {
let q = this.q.from(this.ksn, this.tbn);
expect(q.toString()).to.include(`FROM ${this.ksn}.${this.tbn}`);
});
it('should understand a single argument', () => {
let q = this.q.from(this.tbn);
expect(q.toString()).to.include(`FROM ${this.tbn}`);
});
it('should understand .from(null, tableName) invocation', () => {
let q = this.q.from(null, this.tbn);
expect(q.toString()).to.include(`FROM ${this.tbn}`);
});
});
describe('#where(relations)', () => {
it('should be an instance method', () => {
let q = this.select();
expect(q).itself.to.respondTo('where');
});
it('should undestand equality relation .where({name:value})', () => {
let q = this.select().where({
identity: 'established',
mistake: 0
});
expect(q.toString()).to.include('WHERE identity = ? AND mistake = ?');
});
});
describe('#order(list)', () => {
it('should be a function', () => {
expect(CQL.select()).itself.to.respondTo('order');
});
it('should understand a list of type {col1:"asc", col2:"desc"}', () => {
let q = this.select().order({
col1:'asc',
col2:'desc'
});
expect(q.toString()).to.include('ORDER BY col1 ASC, col2 DESC');
});
it('should understand integers as ordering', () => {
let q = this.select().order({
col1: 1,
col2: -1
});
expect(q.toString()).to.include('ORDER BY col1 ASC, col2 DESC');
});
it('should throw Error.UnknownOrdering if incorrect ordering is requested', () => {
expect(() => {
this.select().order({col1:'sc'});
}).to.throw(Error)
.that.has.property('type')
.that.equals('UnknownOrdering');
});
});
describe('#limit(n)', () => {
it('should be a function', () => {
expect(CQL.select()).itself.to.respondTo('limit');
});
it('should add limit clause to the query', () => {
let q = this.select().limit(12);
expect(q.toString()).to.include('LIMIT 12');
});
it('should accept a digits-only string', () => {
let q = this.select().limit('13');
expect(q.toString()).to.include('LIMIT 13');
});
it('should throw Error:NaNLimit if not a clean number is passed', () => {
expect(() => {
this.select().limit('13k');
}).to.throw(Error)
.that.has.property('type')
.that.equals('NaNLimit');
});
});
describe('#getValues()', () => {
it('should be an instance method', () => {
expect(this.select()).itself.to.respondTo('getValues');
});
it('should return values from relations', () => {
let q = this.select().where({a:'aaa', b:1});
expect(q.getValues()).to.deep.equal(['aaa',1]);
});
});
describe('#toString()', () => {
it('should throw if no from clause was provided', () => {
let q = CQL.select();
expect(() => {
q.toString();
}).to.throw(Error)
.that.has.property('type')
.that.equals('ClauseRequired');
});
});
});
});
|
/*
* Copyright (c) 2013, 2015 Oracle and/or its affiliates. All rights reserved. This
* code is released under a tri EPL/GPL/LGPL license. You can use it,
* redistribute it and/or modify it under the terms of the:
*
* Eclipse Public License version 1.0
* GNU General Public License version 2
* GNU Lesser General Public License version 2.1
*/
package org.jruby.truffle.runtime.core;
import org.jruby.truffle.nodes.RubyNode;
import org.jruby.truffle.nodes.objects.Allocator;
import org.jruby.truffle.runtime.RubyContext;
import org.jruby.truffle.runtime.hash.Entry;
import org.jruby.truffle.runtime.hash.HashOperations;
import org.jruby.truffle.runtime.hash.KeyValue;
import org.jruby.truffle.runtime.subsystems.ObjectSpaceManager;
public class RubyHash extends RubyBasicObject {
private RubyProc defaultBlock;
private Object defaultValue;
private Object store;
private int storeSize;
private Entry firstInSequence;
private Entry lastInSequence;
public RubyHash(RubyClass rubyClass, RubyProc defaultBlock, Object defaultValue, Object store, int storeSize, Entry firstInSequence) {
super(rubyClass);
this.defaultBlock = defaultBlock;
this.defaultValue = defaultValue;
this.store = store;
this.storeSize = storeSize;
this.firstInSequence = firstInSequence;
}
public RubyProc getDefaultBlock() {
return defaultBlock;
}
public void setDefaultBlock(RubyProc defaultBlock) {
this.defaultBlock = defaultBlock;
}
public Object getDefaultValue() {
return defaultValue;
}
public void setDefaultValue(Object defaultValue) {
this.defaultValue = defaultValue;
}
public Object getStore() {
return store;
}
public void setStore(Object store, int storeSize, Entry firstInSequence, Entry lastInSequence) {
assert verifyStore(store, storeSize, firstInSequence, lastInSequence);
this.store = store;
this.storeSize = storeSize;
this.firstInSequence = firstInSequence;
this.lastInSequence = lastInSequence;
}
public boolean verifyStore(Object store, int storeSize, Entry firstInSequence, Entry lastInSequence) {
assert store == null || store instanceof Object[] || store instanceof Entry[];
if (store == null) {
assert storeSize == 0;
assert firstInSequence == null;
assert lastInSequence == null;
}
if (store instanceof Entry[]) {
final Entry[] entryStore = (Entry[]) store;
int foundSize = 0;
boolean foundFirst = false;
boolean foundLast = true;
for (int n = 0; n < entryStore.length; n++) {
Entry entry = entryStore[n];
while (entry != null) {
foundSize++;
if (entry == firstInSequence) {
foundFirst = true;
}
if (entry == lastInSequence) {
foundLast = true;
}
entry = entry.getNextInLookup();
}
}
//assert foundSize == storeSize; Can't do this because sometimes we set the store and then fill it up
assert firstInSequence == null || foundFirst;
assert lastInSequence == null || foundLast;
} else if (store instanceof Object[]) {
assert ((Object[]) store).length == HashOperations.SMALL_HASH_SIZE * 2 : ((Object[]) store).length;
assert firstInSequence == null;
assert lastInSequence == null;
}
return true;
}
public int getSize() {
return storeSize;
}
public void setSize(int storeSize) {
this.storeSize = storeSize;
}
public Entry getFirstInSequence() {
return firstInSequence;
}
public void setFirstInSequence(Entry firstInSequence) {
this.firstInSequence = firstInSequence;
}
public Entry getLastInSequence() {
return lastInSequence;
}
public void setLastInSequence(Entry lastInSequence) {
this.lastInSequence = lastInSequence;
}
@Override
public void visitObjectGraphChildren(ObjectSpaceManager.ObjectGraphVisitor visitor) {
for (KeyValue keyValue : HashOperations.verySlowToKeyValues(this)) {
if (keyValue.getKey() instanceof RubyBasicObject) {
((RubyBasicObject) keyValue.getKey()).visitObjectGraph(visitor);
}
if (keyValue.getValue() instanceof RubyBasicObject) {
((RubyBasicObject) keyValue.getValue()).visitObjectGraph(visitor);
}
}
}
public static class HashAllocator implements Allocator {
@Override
public RubyBasicObject allocate(RubyContext context, RubyClass rubyClass, RubyNode currentNode) {
return new RubyHash(rubyClass, null, null, null, 0, null);
}
}
}
|
package org.dalol.magictooltipview;
import android.content.Context;
import android.graphics.drawable.BitmapDrawable;
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
import android.support.annotation.IdRes;
import android.support.annotation.LayoutRes;
import android.view.Gravity;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.view.animation.Animation;
import android.view.animation.AnimationUtils;
import android.widget.LinearLayout;
import android.widget.PopupWindow;
public final class TooltipPopup {
private final static int WHAT_DISMISS_AFTER = 0x4444;
private final Context mContext;
private final LayoutInflater mInflater;
private long mDismissAfter;
private View mTooltipView;
private PopupWindow popupWindow;
public TooltipPopup(Context context) {
mContext = context;
popupWindow = new PopupWindow(mContext);
popupWindow.setTouchable(true);
// popupWindow.setBackgroundDrawable(new ColorDrawable(Color.TRANSPARENT));
popupWindow.setBackgroundDrawable(new BitmapDrawable());
popupWindow.setOutsideTouchable(false);
popupWindow.setAnimationStyle(android.R.anim.fade_in);
popupWindow.setOnDismissListener(new PopupWindow.OnDismissListener() {
@Override
public void onDismiss() {
handler.removeMessages(WHAT_DISMISS_AFTER);
}
});
mInflater = (LayoutInflater) context.getSystemService(Context.LAYOUT_INFLATER_SERVICE);
}
public TooltipPopup setLayout(@LayoutRes int layout) {
setView(mInflater.inflate(layout, new LinearLayout(mContext), false));
return TooltipPopup.this;
}
public TooltipPopup setView(View view) {
mTooltipView = view;
popupWindow.setContentView(mTooltipView);
popupWindow.setWindowLayoutMode(ViewGroup.LayoutParams.WRAP_CONTENT, ViewGroup.LayoutParams.WRAP_CONTENT);
return TooltipPopup.this;
}
public <T extends View> T findViewById(@IdRes int viewId) {
return mTooltipView.findViewById(viewId);
}
public TooltipPopup setAutoDismissTimeout(long duration) {
mDismissAfter = duration;
return TooltipPopup.this;
}
public TooltipPopup setFocusable(boolean focusable) {
popupWindow.setFocusable(focusable);
return TooltipPopup.this;
}
public void show(View anchorView) {
show(anchorView, 0);
}
public void show(View anchorView, float offset) {
mTooltipView.measure(ViewGroup.LayoutParams.WRAP_CONTENT, ViewGroup.LayoutParams.WRAP_CONTENT);
int measuredWidth = mTooltipView.getMeasuredWidth();
int[] location = new int[2];
anchorView.getLocationOnScreen(location);
int halfTW = measuredWidth/2;
int halfAV = anchorView.getMeasuredWidth()/2;
int extraOffset = Utils.toPx(mContext.getResources().getDisplayMetrics(), offset);
int xLocation = location[0] + (halfAV - halfTW);
int yLocation = location[1] + anchorView.getMeasuredHeight() + extraOffset;
//mTooltipView.setAlpha(0f);
// mTooltipView.setScaleX(0.5f);
// mTooltipView.setScaleY(0.5f);
popupWindow.showAtLocation(anchorView, Gravity.NO_GRAVITY, xLocation, yLocation);
//mTooltipView
mTooltipView.startAnimation(AnimationUtils.loadAnimation(mContext, android.R.anim.fade_in));
// mTooltipView.animate()
// .setInterpolator(new AccelerateDecelerateInterpolator())
// .scaleX(1f)
// .scaleY(1f)
// .setDuration(100);
if (mDismissAfter > 0) {
handler.sendEmptyMessageDelayed(WHAT_DISMISS_AFTER, mDismissAfter);
}
}
public boolean isTooltipShown() {
return popupWindow != null && popupWindow.isShowing();
}
private final Handler handler = new Handler(Looper.getMainLooper()) {
@Override
public void handleMessage(Message msg) {
super.handleMessage(msg);
Animation animation = AnimationUtils.loadAnimation(mContext, android.R.anim.fade_out);
animation.setAnimationListener(new Animation.AnimationListener() {
@Override
public void onAnimationStart(Animation animation) {
}
@Override
public void onAnimationEnd(Animation animation) {
if (popupWindow != null) {
popupWindow.dismiss();
}
}
@Override
public void onAnimationRepeat(Animation animation) {
}
});
mTooltipView.startAnimation(animation);
// mTooltipView.animate()
// .setInterpolator(new AccelerateDecelerateInterpolator())
// .scaleX(0f)
// .scaleY(0f)
// .setDuration(150)
// .setListener(new AnimatorListenerAdapter() {
// @Override
// public void onAnimationEnd(Animator animation) {
// super.onAnimationEnd(animation);
// if (popupWindow != null) {
// popupWindow.dismiss();
// }
// }
// });
}
};
public void dismiss() {
if (popupWindow != null) {
popupWindow.dismiss();
}
}
}
|
#! /bin/bash
# Runs the "345M" parameter model
GPUS_PER_NODE=8
# Change for multinode config
MASTER_ADDR=localhost
MASTER_PORT=6000
NNODES=1
NODE_RANK=0
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
DATA_PATH=<Specify path and file prefix>_text_document
CHECKPOINT_PATH=<Specify path>
DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
python -m torch.distributed.launch $DISTRIBUTED_ARGS \
pretrain_gpt2.py \
--model-parallel-size 1 \
--num-layers 24 \
--hidden-size 1024 \
--num-attention-heads 16 \
--batch-size 8 \
--seq-length 1024 \
--max-position-embeddings 1024 \
--train-iters 500000 \
--lr-decay-iters 320000 \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH \
--data-path $DATA_PATH \
--vocab-file gpt2-vocab.json \
--merge-file gpt2-merges.txt \
--data-impl mmap \
--split 949,50,1 \
--distributed-backend nccl \
--lr 0.00015 \
--lr-decay-style cosine \
--min-lr 1.0e-5 \
--weight-decay 1e-2 \
--clip-grad 1.0 \
--warmup .01 \
--checkpoint-activations \
--log-interval 100 \
--save-interval 10000 \
--eval-interval 1000 \
--eval-iters 10 \
--fp16
set +x
|
package org.sonatype.nexus.repository.protop.internal;
import org.codehaus.groovy.runtime.DefaultGroovyMethods;
import org.sonatype.nexus.repository.http.HttpStatus;
import org.sonatype.nexus.repository.storage.MissingAssetBlobException;
import org.sonatype.nexus.repository.storage.StorageFacet;
import org.sonatype.nexus.repository.view.Context;
import org.sonatype.nexus.repository.view.Response;
import org.sonatype.nexus.repository.view.Status;
import org.sonatype.nexus.repository.view.matchers.token.TokenMatcher;
import javax.annotation.Nonnull;
import javax.inject.Named;
import javax.inject.Singleton;
import java.io.IOException;
import java.io.InputStream;
import java.util.Map;
import java.util.Objects;
/**
* Merge metadata results from all member repositories.
*/
@Named
@Singleton
public class ProtopGroupPackageHandler extends ProtopGroupHandler {
@Override
protected Response doGet(@Nonnull final Context context,
@Nonnull final DispatchedRepositories dispatched) throws Exception {
log.debug("[getPackage] group repository: {} tokens: {}",
context.getRepository().getName(),
context.getAttributes().require(TokenMatcher.State.class).getTokens());
return buildMergedPackageRoot(context, dispatched);
}
private Response buildMergedPackageRoot(final Context context,
final DispatchedRepositories dispatched) throws Exception {
final ProtopGroupFacet groupFacet = getGroupFacet(context);
// Dispatch requests to members to trigger update events and group cache invalidation when a member has changed
final Map responses = getResponses(context, dispatched, groupFacet);
ProtopContent content = groupFacet.getFromCache(context);
// first check cached content against itself only
if (Objects.isNull(content)) {
if (Objects.isNull(responses) || responses.isEmpty()) {
return ProtopResponses.notFound("Not found");
}
return ProtopResponses.ok(groupFacet.buildPackageRoot(responses, context));
}
// only add missing blob handler if we actually had content, no need otherwise
content.missingBlobInputStreamSupplier(e ->
handleMissingBlob(context, responses, groupFacet, e));
return new Response.Builder().status(Status.success(HttpStatus.OK))
.payload(content)
.attributes(content.getAttributes())
.build();
}
private StorageFacet getStorageFacet(final Context context) {
return DefaultGroovyMethods.asType(context.getRepository()
.facet(StorageFacet.class), StorageFacet.class);
}
private InputStream handleMissingBlob(final Context context,
final Map responses,
final ProtopGroupFacet groupFacet,
final MissingAssetBlobException e) throws IOException {
// why check the response? It might occur that the members don't have cache on their own and that their remote
// doesn't have any responses (404) for the request. For this to occur allot must be wrong on its own already.
if (responses.isEmpty()) {
// We can't change the status of a response, as we are streaming out therefor be kind and return an error stream
return ProtopFacetUtils.errorInputStream("Members had no metadata to merge for repository " + context.getRepository().getName());
}
return groupFacet.buildMergedPackageRootOnMissingBlob(responses, context, e);
}
}
|
<filename>planner/admin.py
# coding=utf-8
# Python imports
# Django imports
from django.contrib import admin
# Third party app imports
# Local app imports
from .models import Program, ProgramPhase, Workout, WorkoutDay, WorkoutSession
class ProgramModelAdmin(admin.ModelAdmin):
list_display = ["id", "name", "image", "summary", "level", "goal", "start_date"]
list_display_links = ["id"]
list_editable = ["name", "image", "summary", "level", "goal", "start_date"]
list_filter = ["name"]
search_fields = ["name"]
class Meta:
model = Program
class ProgramPhaseModelAdmin(admin.ModelAdmin):
list_display = ["id", "order", "weeks_duration", "workout"]
list_display_links = ["id"]
list_editable = ["order", "weeks_duration", "workout"]
list_filter = ["order"]
search_fields = ["order"]
class Meta:
model = ProgramPhase
class WorkoutModelAdmin(admin.ModelAdmin):
list_display = ["id", "name", "image"]
list_display_links = ["id"]
list_editable = ["name", "image"]
list_filter = ["name"]
search_fields = ["name"]
class Meta:
model = Workout
class WorkoutDayModelAdmin(admin.ModelAdmin):
list_display = ["id", "day_of_week", "session"]
list_display_links = ["id"]
list_editable = ["day_of_week", "session"]
list_filter = ["day_of_week"]
search_fields = ["day_of_week"]
class Meta:
model = WorkoutDay
class WorkoutSessionModelAdmin(admin.ModelAdmin):
list_display = ["id", "name", "summary", "recommendations", "motivation_quotes"]
list_display_links = ["id"]
list_editable = ["name", "summary", "recommendations", "motivation_quotes"]
list_filter = ["name"]
search_fields = ["name"]
class Meta:
model = WorkoutSession
admin.site.register(Program, ProgramModelAdmin)
admin.site.register(ProgramPhase, ProgramPhaseModelAdmin)
admin.site.register(Workout, WorkoutModelAdmin)
admin.site.register(WorkoutDay, WorkoutDayModelAdmin)
admin.site.register(WorkoutSession, WorkoutSessionModelAdmin)
|
#!/usr/bin/env bash
# constants
init_text=" WORKSPACE READY "
status_work_text=" STATUS: WORKING "
status_repose_text=" STATUS: REPOSE "
init_padding=$(centerpad "$init_text")
status_work_padding=$(centerpad "$status_work_text XX:XX:XX")
status_repose_padding=$(centerpad "$status_repose_text XX:XX:XX")
init_msg=$(printf '%s%s%s%s%s%s' "$(tput setab 12)" "$(tput setaf 0)" "$(tput bold)" "$(tput blink)" "$init_text" "$(tput sgr0)")
status_work_msg=$(printf '%s%s%s%s%s' "$(tput setab 35)" "$(tput setaf 0)" "$(tput bold)" "$status_work_text" "$(tput sgr0)")
status_repose_msg=$(printf '%s%s%s%s%s' "$(tput setab 3)" "$(tput setaf 0)" "$(tput bold)" "$status_repose_text" "$(tput sgr0)")
status_repose_msg_long=$(printf '%s%s%s%s%s' "$(tput setab 124)" "$(tput setaf 0)" "$(tput bold)" "$status_repose_text" "$(tput sgr0)")
repose_scorn_threshold=5
timelog_file=$(findlocalest "meta/timelog.md")
# Initialize status variables
work_duration=0
work_start=0
repose_duration=0
mode_duration=$work_duration
status_mode_padding=$status_work_padding
status_mode_msg=$status_work_msg
timer_loop=true
mode='work'
print_hms() {
printf '%02d:%02d:%02d\n' $(($mode_duration/3600)) $(($mode_duration%3600/60)) $(($mode_duration%60))
}
parse_input() {
if [[ $input_key == 'q' ]]; then
pre_cleanup
cleanup
elif [[ $input_key == ' ' ]]; then
if [[ $mode == 'work' ]]; then
work_duration=$mode_duration
work_start=$mode_start
mode='repose'
mode_duration=$repose_duration
status_mode_padding=$status_repose_padding
status_mode_msg=$status_repose_msg
else
repose_duration=$mode_duration
repose_start=$mode_start
mode='work'
mode_duration=$work_duration
status_mode_padding=$status_work_padding
status_mode_msg=$status_work_msg
fi
mode_start=$(($(date '+%s') - mode_duration))
fi
input_key=''
}
write_to_timelog() {
clear
echo "DESCRIBE WORKBLOCK:"
read desc
if [[ $desc != 'discard' ]]; then
echo "" >> $timelog_file
#echo "#"$(aenow) >> $timelog_file
echo "#"$(gdate --rfc-3339=seconds) >> $timelog_file
if [[ -z "${desc// }" ]]; then
echo $(hms $work_duration) >> $timelog_file
else
echo $(hms $work_duration)" -- "$desc >> $timelog_file
fi
echo "> "$(hms $repose_duration)" -- repose" >> $timelog_file
echo "> Efficiency: "$(( work_duration * 100 / (work_duration + repose_duration) ))"%" >> $timelog_file
fi
}
final_output() {
if [[ $desc != 'discard' ]]; then
mode_duration=work_duration
#voice " End workblock. $(print_hms)"
printf '\nWork: %s\n' $(print_hms)
mode_duration=repose_duration
printf 'Repose: %s\n' $(print_hms)
fi
}
sigint_final_output() {
mode_duration=work_duration
#if [[ work_duration -gt 0 ]]; then
#voice "Aborted work block. $(print_hms)"
#else
#voice " Aborted work block."
#fi
printf '\nWork: %s\n' $(print_hms)
mode_duration=repose_duration
printf 'Repose: %s\n' $(print_hms)
}
pre_cleanup() {
if [[ $mode == 'work' ]]; then
work_duration=$mode_duration
else
repose_duration=$mode_duration
fi
timer_loop=false
}
cleanup() {
pre_cleanup; tput sgr0; tput cnorm; write_to_timelog; tput rmcup || clear; final_output; exit 0
}
sigint_cleanup() {
pre_cleanup; tput sgr0; tput cnorm; tput rmcup || clear; sigint_final_output; exit 0
}
if [ -z $timelog_file ]; then
echo ""
echo "Can't find project timelog file."
echo "Are you in the right directory?"
echo "If so, (create meta dir and) touch meta/timelog.md from project directory root."
exit;
fi
trap sigint_cleanup SIGINT
tput smcup
tput civis
printf "\n$init_padding$init_msg"
read -n 1 -s -r
work_start=$(($(date '+%s') - work_duration))
mode_start=$work_start
#voice "Begin workblock"
while $timer_loop; do
clear
mode_duration=$(($(date '+%s') - mode_start))
hms=$(print_hms)
printf "\n$status_mode_padding$status_mode_msg $hms"
# Set IFS to empty string so that read doesn't trim
IFS= read -rsn1 -t 1 input_key # wait for key entry for 1 sec before continuing
parse_input
if [[ $mode == 'repose' && $mode_duration -gt $work_duration ]]; then
status_mode_msg=$status_repose_msg_long
fi
done
|
import streamlit as st
import streamlit.components.v1 as components
from pathlib import Path
import pandas as pd
import numpy as np
import re
from scipy.io.arff import loadarff
import matplotlib.pyplot as plt
import pydeck as pdk
import wikipedia
@st.cache()
def load_mammals():
path_with_files = Path(r"C:\Users\zbenm\Mammals")
file = "mammals.arff"
#file = "commonmammals.arff"
data, _ = loadarff(path_with_files / file)
df = pd.DataFrame(data)
columns = df.columns
r = re.compile('^bio')
bio_columns = [col for col in columns if r.match(col)]
r = re.compile('^[A-Z]')
mammal_columns = [col for col in columns if r.match(col)]
location_columns = ['latitude', 'longitude']
monthly_columns = [col for col in columns if
col not in set(mammal_columns) | set(bio_columns) | set(location_columns)]
df['cell_id'] = df.index
df_grid_cell = df[['cell_id'] + location_columns + bio_columns]
df_monthly_v1 = df[['cell_id'] + monthly_columns]
df_monthly_v2 = df_monthly_v1.melt(id_vars=['cell_id'])
df_monthly_v2[['statistics', 'month']] = pd.DataFrame.from_records(
df_monthly_v2['variable'].str.split('_').apply(lambda l: ('_'.join(l[:-2]), '_'.join(l[-2:])))
)
df_monthly_v3 = df_monthly_v2.pivot(values='value',
index=['cell_id', 'month'],
columns=['statistics']).reset_index()
df_mammals_v1 = df[['cell_id'] + mammal_columns]
df_mammals_v2 = df_mammals_v1.melt(id_vars='cell_id', var_name='Mammal')
df_mammals_v2['value'] = df_mammals_v2['value'] == b'1'
return df_grid_cell, df_monthly_v3, df_mammals_v2
df_grid_cell, df_monthly, df_mammals = load_mammals()
def heatmap_of_varieties():
how_many_mammals = (
df_mammals
.groupby('cell_id')
.agg(count_animals=('value', 'sum'))
)
merged_df = how_many_mammals.merge(df_grid_cell, on='cell_id')[["longitude", "latitude", "count_animals"]]
view = pdk.data_utils.compute_view(df_grid_cell[["longitude", "latitude"]])
view.zoom = 3
COLOR_BREWER_BLUE_SCALE = [
[240, 249, 232],
[204, 235, 197],
[168, 221, 181],
[123, 204, 196],
[67, 162, 202],
[8, 104, 172],
]
mammals_kinds = pdk.Layer(
"HeatmapLayer",
data=merged_df,
opacity=0.9,
get_position=["longitude", "latitude"],
aggregation="MEAN",
color_range=COLOR_BREWER_BLUE_SCALE,
threshold=1,
get_weight='count_animals',
pickable=True,
)
r = pdk.Deck(
layers=[mammals_kinds],
initial_view_state=view,
map_provider="mapbox",
)
st.write('Heatmap of number of mammal kinds')
st.write(r, allow_unsafe=True)
heatmap_of_varieties()
selected_mammal = st.selectbox(
'Select a mammal',
df_mammals['Mammal'].unique()
)
url = wikipedia.page(selected_mammal).url
components.iframe(url, height=400, scrolling=True)
st.map(df_mammals.loc[lambda d: (d['Mammal'] == selected_mammal) &
d['value']].merge(df_grid_cell, on='cell_id'))
|
#pragma once
#include <KAI/Core/Config/Base.h>
#include <KAI/Core/Pathname.h>
#include <KAI/Core/Object/Object.h>
KAI_BEGIN
class Tree
{
public:
typedef std::list<Object> SearchPath;
private:
SearchPath _path;
Object _root, _scope;
Pathname _current;
public:
void SetRoot(const Object &Q) { _root = Q; }
void AddSearchPath(const Pathname &);
void AddSearchPath(const Object &);
Object Resolve(const Pathname &) const;
Object Resolve(const Label &) const;
Object GetRoot() const { return _root; }
Object GetScope() const { return _scope; }
const SearchPath &GetSearchPath() const { return _path; }
void SetScope(const Object &);
void SetScope(const Pathname &);
//void SetSearchPath(const SearchPath &);
//void GetChildren() const;
};
KAI_END
|
<gh_stars>1-10
#ifndef ROSE_BinaryAnalysis_String_H
#define ROSE_BinaryAnalysis_String_H
#include <MemoryMap.h>
#include <Sawyer/CommandLine.h>
#include <Sawyer/Optional.h>
namespace rose {
namespace BinaryAnalysis {
/** Suport for finding strings in memory.
*
* This namespace provides support for various kinds of strings in specimen memory, including an @ref StringFinder "analysis"
* that searches for strings in specimen memory. A string is a sequence of characters encoded in one of a variety of ways in
* memory. For instance, NUL-terminated ASCII is a common encoding from C compilers. The characters within the string must
* all satisfy some valid-character predicate. The terms used in this analysis are based on the Unicode standard, and are
* defined here in terms of string encoding (translation of a string as printed to a sequence of octets). Although this
* analysis can encode strings, its main purpose is decoding strings from an octet stream into a sequence of code points.
*
* Unicode and its parallel standard, the ISO/IEC 10646 Universal Character Set, together constitute a modern, unified
* character encoding. Rather than mapping characters directly to octets (bytes), they separately define what characters are
* available, their numbering, how those numbers are encoded as a series of "code units" (limited-size numbers), and finally
* how those units are encoded as a stream of octets. The idea behind this decomposition is to establish a universal set of
* characters that can be encoded in a variety of ways. To describe this model correctly one needs more precise terms than
* "character set" and "character encoding." The terms used in the modern model follow:
*
* A character repertoire is the full set of abstract characters that a system supports. The repertoire may be closed, i.e. no
* additions are allowed without creating a new standard (as is the case with ASCII and most of the ISO-8859 series), or it
* may be open, allowing additions (as is the case with Unicode and to a limited extent the Windows code pages). The
* characters in a given repertoire reflect decisions that have been made about how to divide writing systems into basic
* information units. The basic variants of the Latin, Greek and Cyrillic alphabets can be broken down into letters, digits,
* punctuation, and a few special characters such as the space, which can all be arranged in simple linear sequences that are
* displayed in the same order they are read. Even with these alphabets, however, diacritics pose a complication: they can be
* regarded either as part of a single character containing a letter and diacritic (known as a precomposed character), or as
* separate characters. The former allows a far simpler text handling system but the latter allows any letter/diacritic
* combination to be used in text. Ligatures pose similar problems. Other writing systems, such as Arabic and Hebrew, are
* represented with more complex character repertoires due to the need to accommodate things like bidirectional text and
* glyphs that are joined together in different ways for different situations.
*
* A coded character set (CCS) specifies how to represent a repertoire of characters using a number of (typically
* non-negative) integer values called code points. For example, in a given repertoire, a character representing the capital
* letter "A" in the Latin alphabet might be assigned to the integer 65, the character for "B" to 66, and so on. A complete
* set of characters and corresponding integers is a coded character set. Multiple coded character sets may share the same
* repertoire; for example ISO/IEC 8859-1 and IBM code pages 037 and 500 all cover the same repertoire but map them to
* different codes. In a coded character set, each code point only represents one character, i.e., a coded character set is a
* function.
*
* A character encoding form (CEF) specifies the conversion of a coded character set's integer codes into a set of
* limited-size integer code values that facilitate storage in a system that represents numbers in binary form using a fixed
* number of bits (i.e. practically any computer system). For example, a system that stores numeric information in 16-bit
* units would only be able to directly represent integers from 0 to 65,535 in each unit, but larger integers could be
* represented if more than one 16-bit unit could be used. This is what a CEF accommodates: it defines a way of mapping a
* single code point from a range of, say, 0 to 1.4 million, to a series of one or more code values from a range of, say, 0 to
* 65,535.
*
* The simplest CEF system is simply to choose large enough units that the values from the coded character set can be encoded
* directly (one code point to one code value). This works well for coded character sets that fit in 8 bits (as most legacy
* non-CJK encodings do) and reasonably well for coded character sets that fit in 16 bits (such as early versions of
* Unicode). However, as the size of the coded character set increases (e.g. modern Unicode requires at least 21
* bits/character), this becomes less and less efficient, and it is difficult to adapt existing systems to use larger code
* values. Therefore, most systems working with later versions of Unicode use either UTF-8, which maps Unicode code points to
* variable-length sequences of octets, or UTF-16, which maps Unicode code points to variable-length sequences of 16-bit
* words.
*
* Next, a character encoding scheme (CES) specifies how the fixed-size integer code values should be mapped into an octet
* sequence suitable for saving on an octet-based file system or transmitting over an octet-based network. With Unicode, a
* simple character encoding scheme is used in most cases, simply specifying whether the bytes for each integer should be in
* big-endian or little-endian order (even this isn't needed with UTF-8). However, there are also compound character encoding
* schemes, which use escape sequences to switch between several simple schemes (such as ISO/IEC 2022), and compressing
* schemes, which try to minimise the number of bytes used per code unit (such as SCSU, BOCU, and Punycode).
*
* Once the code points of a string are encoded as octets, the string as a whole needs some description to demarcate it from
* surrounding data. ROSE currently supports two styles of demarcation: length-encoded strings and terminated strings. A
* length-encoded string's code point octets are preceded by octets that encode the string length, usually in terms of the
* number of code points. Decoding such a string consists of decoding the length and then decoding code points until the
* required number of code points have been obtained. On the other hand, terminated strings are demarcated from surrounding
* data by a special code point such as the NUL character for ASCII strings. Decoding a terminated string consists of decoding
* code points until a terminator is found, then discarding the terminator.
*
* @section ex1 Example 1
*
* This example shows how to find all strings in memory that is readable but not writable using a list of common encodings
* such as C-style NUL-terminated printable ASCII, zero terminated UTF-16 little-endian, 2-byte little-endian length encoded
* ASCII, etc.
*
* @code
* #include <rose/BinaryString.h> // binary analysis string support
* using namespace rose::BinaryAnalysis::Strings;
* MemoryMap map = ...; // initialized elsewhere
*
* StringFinder finder; // holds settings
* finder.settings().minLength = 5; // no strings shorter than 5 characters
* finder.settings().maxLength = 65536; // ignore very long strings
* finder.insertCommonEncoders(); // how to match strings
* finder.find(map.require(MemoryMap::READABLE).prohibit(MemoryMap::WRITABLE));
*
* BOOST_FOREACH (const EncodedString &string, finder.strings()) {
* std::cout <<"string at " <<string.address() <<" for " <<string.size() <<" bytes\n";
* std::cout <<"encoding: " <<string.encoder()->name() <<"\n";
* std::cout <<"narrow value: \"" <<StringUtility::cEscape(string.narrow()) <<"\"\n"; // std::string
* std::cout <<"wide value: " <<string.wide() <<"\n"; // std::wstring
* }
*
* // This works too if you're not picky about the output format
* std::cout <<finder;
* @endcode
*
* @section ex2 Example 2
*
* The @ref StringFinder analysis is tuned for searching for strings at unknown locations while trying to decode multiple
* encodings simultaneously. If all you want to do is read a single string from a known location having a known encoding then
* you're probabily better off reading it directly from the @ref MemoryMap. The @ref StringFinder analysis can be used for
* that, but it's probably overkill. In any case, here's the overkill version to find a 2-byte little endian length-encoded
* UTF-8 string:
*
* @code
* #include <rose/BinaryString.h>
* using namespace rose::BinaryAnalysis::Strings;
* MemoryMap map = ...; // initialized elsewhere
* rose_addr_t stringVa = ...; // starting address of string
*
* StringFinder finder; // holds settings
* finder.settings().minLength = 0; // no strings shorter than 5 characters
* finder.settings().maxLength = 65536; // ignore very long strings
* finder.encoder(lengthEncodedString(basicLengthEncoder(2, ByteOrder::ORDER_LSB), // 2-byte little-endian length
* utf8CharacterEncodingForm(), // UTF-8 encoding
* basicCharacterEncodingScheme(1), // 1:1 mapping to octets
* anyCodePoint()); // allow any characters
* std::wstring s;
* BOOST_FOREACH (const EncodedString &string, finder.find(map.at(stringVa)).strings()) {
* s = string.wide();
* break;
* }
* @endcode
*
* @section ex3 Example 3
*
* The encoders can also be used to decode directly from a stream of octets. For instance, lets say you have a vector of
* octets that map 1:1 to code values, and then you want to decode the code values as a UTF-8 stream to get some code
* points. All decoders are implemented as state machines to make it efficient to send the same octets to many decoders
* without having to rescan/reread from a memory map. The UTF-8 decoder decodes one octet at a time and when it enters the
* FINAL_STATE or COMPLETED_STATE then a decoded code value can be consumed.
*
* @code
* #include <rose/BinaryString.h>
* using namespace rose::BinaryAnalysis::Strings;
* std::vector<Octet> octets = ...; // initialized elsewhere
*
* // Instantiate the encoder/decoder. These things are all reference
* // counted so there's no need to explicitly free them.
* Utf8CharacterEncodingForm::Ptr utf8 = utf8CharacterEncodingForm();
*
* CodePoints codePoints;
* BOOST_FOREACH (Octet octet, octets) {
* CodeValue codeValue = octet; // 1:1 translation
* if (isDone(utf8->decode(codeValue))) {
* codePoints.push_back(utf8->consume());
* } else if (utf8->state() == ERROR_STATE) {
* utf8->reset(); // skip this code value
* }
* }
* @endcode */
namespace Strings {
/** Diagnostics specific to string analysis. */
extern Sawyer::Message::Facility mlog;
typedef uint8_t Octet; /**< One byte in a sequence that encodes a code value. */
typedef std::vector<Octet> Octets; /**< A sequence of octets. */
typedef unsigned CodeValue; /**< One value in a sequence that encodes a code point. */
typedef std::vector<CodeValue> CodeValues; /**< A sequence of code values. */
typedef unsigned CodePoint; /**< One character in a coded character set. */
typedef std::vector<CodePoint> CodePoints; /**< A sequence of code points, i.e., a string. */
/** Errors for string analysis. */
class Exception: public std::runtime_error {
public:
Exception(const std::string &s): std::runtime_error(s) {}
};
/** Decoder state. Negative values are reserved.
*
* A decoder must follow these rules when transitioning from one state to another:
*
* @li A decoder is in the INITIAL_STATE when it is constructed and after calling @c reset.
*
* @li If the decoder is in an ERROR_STATE then @c decode does not change the state.
*
* @li If the decoder is in the FINAL_STATE then @c decode transitions to ERROR_STATE.
*
* @li If the decoder is in FINAL_STATE or COMPLETED state then @c consume transitions to INITIAL_STATE.
*
* All other transitions are user defined. */
enum State {
FINAL_STATE = -1, /**< Final state where nothing more can be decoded. */
COMPLETED_STATE = -2, /**< Completed state, but not a final state. */
INITIAL_STATE = -3, /**< Initial state just after a reset. */
ERROR_STATE = -4, /**< Decoder is in an error condition. */
USER_DEFINED_0 = 0, /**< First user-defined value. */
USER_DEFINED_1 = 1, /**< Second user-defined value. */
USER_DEFINED_2 = 2, /**< Third user-defined value. */
USER_DEFINED_MAX = 128 /**< Maximum user-defined value. */
};
/** Returns true for COMPLETED_STATE or FINAL_STATE. */
bool isDone(State st);
/** Initialize the diagnostics facility. This is called by @ref rose::Diagnostics::initialize. */
void initDiagnostics();
/** Defines mapping between code points and code values.
*
* A code point represents one character of a coded character set, such as one character of approximately 1.4 million
* distinct Unicode characters. The CharacterEncodingForm (CEF) is responsible for converting a code point to a sequence
* of one or more code values, or vice versa. Each code value, which may be multiple bytes, is eventually encoded into a
* sequence of octets by the @ref CharacterEncodingScheme (CES). */
class ROSE_DLL_API CharacterEncodingForm: public Sawyer::SharedObject {
protected:
State state_;
public:
CharacterEncodingForm(): state_(INITIAL_STATE) {}
virtual ~CharacterEncodingForm() {}
/** Shared ownership pointer to a @ref CharacterEncodingForm. See @ref heap_object_shared_ownership. */
typedef Sawyer::SharedPointer<CharacterEncodingForm> Ptr;
/** Create a new encoder from this one. */
virtual Ptr clone() const = 0;
/** Name of encoder. */
virtual std::string name() const = 0;
/** Encode a code point into a sequence of one or more code values.
*
* For instance, an ecoder for UTF-16 will encode a code point into one or more values in the range 0 through (2^16)-1. */
virtual CodeValues encode(CodePoint) = 0;
/** Decoder state. */
State state() const { return state_; }
/** Decode one code value.
*
* Processes a single code value and updates the decoder state machine. Returns the decoder's new state. See documentation
* for @ref State for restrictions on state transitions. */
virtual State decode(CodeValue) = 0;
/** Consume a decoded code point.
*
* The decoder must be in the FINAL_STATE or COMPLETED_STATE, and upon return will be in the INITIAL_STATE. */
virtual CodePoint consume() = 0;
/** Reset the decoder state machine. */
virtual void reset() = 0;
};
/** A no-op character encoding form.
*
* Encodes code points to code values and vice versa such that code points are equal to code values. */
class ROSE_DLL_API NoopCharacterEncodingForm: public CharacterEncodingForm {
CodePoint cp_;
protected:
NoopCharacterEncodingForm(): cp_(0) {}
public:
/** Shared-ownership pointer to a @ref NoopCharacterEncodingFormat. See @ref heap_object_shared_ownership. */
typedef Sawyer::SharedPointer<NoopCharacterEncodingForm> Ptr;
static Ptr instance() { return Ptr(new NoopCharacterEncodingForm); }
virtual CharacterEncodingForm::Ptr clone() const ROSE_OVERRIDE { return Ptr(new NoopCharacterEncodingForm(*this)); }
virtual std::string name() const ROSE_OVERRIDE { return "no-op"; }
virtual CodeValues encode(CodePoint cp) ROSE_OVERRIDE;
virtual State decode(CodeValue) ROSE_OVERRIDE;
virtual CodePoint consume() ROSE_OVERRIDE;
virtual void reset() ROSE_OVERRIDE;
};
/** Returns a new no-op character encoding form. */
NoopCharacterEncodingForm::Ptr noopCharacterEncodingForm();
/** UTF-8 character encoding form.
*
* Encodes each code point as one to six 8-bit code values. */
class ROSE_DLL_API Utf8CharacterEncodingForm: public CharacterEncodingForm {
CodePoint cp_;
protected:
Utf8CharacterEncodingForm(): cp_(0) {}
public:
/** Shared-ownership pointer to a @ref Utf8CharacterEncodingForm. See @ref heap_object_shared_ownership. */
typedef Sawyer::SharedPointer<Utf8CharacterEncodingForm> Ptr;
static Ptr instance() { return Ptr(new Utf8CharacterEncodingForm); }
virtual CharacterEncodingForm::Ptr clone() const ROSE_OVERRIDE { return Ptr(new Utf8CharacterEncodingForm(*this)); }
virtual std::string name() const ROSE_OVERRIDE { return "UTF-8"; }
virtual CodeValues encode(CodePoint cp) ROSE_OVERRIDE;
virtual State decode(CodeValue) ROSE_OVERRIDE;
virtual CodePoint consume() ROSE_OVERRIDE;
virtual void reset() ROSE_OVERRIDE;
};
/** Returns a new UTF-8 character encoding form. */
Utf8CharacterEncodingForm::Ptr utf8CharacterEncodingForm();
/** UTF-16 character encoding form.
*
* Encodes each code point as one or two 16-bit code values. */
class ROSE_DLL_API Utf16CharacterEncodingForm: public CharacterEncodingForm {
CodePoint cp_;
protected:
Utf16CharacterEncodingForm(): cp_(0) {}
public:
/** Shared-ownership pointer to a @ref Utf16CharacterEncodingForm. See @ref heap_object_shared_ownership. */
typedef Sawyer::SharedPointer<Utf16CharacterEncodingForm> Ptr;
static Ptr instance() { return Ptr(new Utf16CharacterEncodingForm); }
virtual CharacterEncodingForm::Ptr clone() const ROSE_OVERRIDE { return Ptr(new Utf16CharacterEncodingForm(*this)); }
virtual std::string name() const ROSE_OVERRIDE { return "UTF-16"; }
virtual CodeValues encode(CodePoint cp) ROSE_OVERRIDE;
virtual State decode(CodeValue) ROSE_OVERRIDE;
virtual CodePoint consume() ROSE_OVERRIDE;
virtual void reset() ROSE_OVERRIDE;
};
/** Returns a new UTF-16 character encoding form. */
Utf16CharacterEncodingForm::Ptr utf16CharacterEncodingForm();
/** Defines the mapping between code values and octets.
*
* A code value (one or more of which compose a code point, or a single character in a coded character set), is encoded as
* one or more octets. For instance, a UTF-16 code value will be converted to two octets in big or little endian order
* depending on the character encoding scheme. */
class ROSE_DLL_API CharacterEncodingScheme: public Sawyer::SharedObject {
protected:
State state_;
public:
CharacterEncodingScheme(): state_(INITIAL_STATE) {}
virtual ~CharacterEncodingScheme() {}
/** Shared ownership pointer to a @ref CharacterEncodingScheme. See @ref heap_object_shared_ownership. */
typedef Sawyer::SharedPointer<CharacterEncodingScheme> Ptr;
/** Create a new copy of this encoder. */
virtual Ptr clone() const = 0;
/** Name of encoder. */
virtual std::string name() const = 0;
/** Encode a code value into a sequence of octets. For instance, an encoder for UTF-16 will encode a code value into
* two octets. */
virtual Octets encode(CodeValue) = 0;
/** Decoder state. */
State state() const { return state_; }
/** Decode one octet.
*
* Processes a single octet and updates the decoder state machine. Returns the decoder's new state. See documentation for
* @ref State for restrictions on state transitions. */
virtual State decode(Octet) = 0;
/** Consume a decoded code value.
*
* The decoder must be in the FINAL_STATE or COMPLETED_STATE and upon return will be in the INITIAL_STATE. */
virtual CodeValue consume() = 0;
/** Reset the decoder state machine. */
virtual void reset() = 0;
};
/** Basic character encoding scheme.
*
* This character encoding scheme converts code value to a sequence of octets in big- or little-endian order, and vice
* versa. It needs to know the number of octets per code value, and the byte order of the octets per code value is larger
* than one. */
class ROSE_DLL_API BasicCharacterEncodingScheme: public CharacterEncodingScheme {
size_t octetsPerValue_;
ByteOrder::Endianness sex_;
CodeValue cv_;
protected:
BasicCharacterEncodingScheme(size_t octetsPerValue, ByteOrder::Endianness sex)
: octetsPerValue_(octetsPerValue), sex_(sex), cv_(0) {
ASSERT_require(1==octetsPerValue || sex!=ByteOrder::ORDER_UNSPECIFIED);
ASSERT_require(octetsPerValue <= sizeof(CodeValue));
}
public:
static Ptr instance(size_t octetsPerValue, ByteOrder::Endianness sex = ByteOrder::ORDER_UNSPECIFIED) {
return Ptr(new BasicCharacterEncodingScheme(octetsPerValue, sex));
}
virtual Ptr clone() const ROSE_OVERRIDE {
return Ptr(new BasicCharacterEncodingScheme(*this));
}
virtual std::string name() const ROSE_OVERRIDE;
virtual Octets encode(CodeValue) ROSE_OVERRIDE;
virtual State decode(Octet) ROSE_OVERRIDE;
virtual CodeValue consume() ROSE_OVERRIDE;
virtual void reset() ROSE_OVERRIDE;
};
/** Returns a new basic character encoding scheme. */
BasicCharacterEncodingScheme::Ptr basicCharacterEncodingScheme(size_t octetsPerValue,
ByteOrder::Endianness sex = ByteOrder::ORDER_UNSPECIFIED);
/** Encoding for the length of a string.
*
* Strings that are length-encoded must specify a length encoding scheme that gives the length of the string measured in
* code points. */
class ROSE_DLL_API LengthEncodingScheme: public Sawyer::SharedObject {
protected:
State state_;
public:
LengthEncodingScheme(): state_(INITIAL_STATE) {}
virtual ~LengthEncodingScheme() {}
/** Shared ownership pointer to a @ref LengthEncodingScheme. See @ref heap_object_shared_ownership. */
typedef Sawyer::SharedPointer<LengthEncodingScheme> Ptr;
/** Create a new copy of this encoder. */
virtual Ptr clone() const = 0;
/** Name of encoder. */
virtual std::string name() const = 0;
/** Encode a length into a sequence of octets. */
virtual Octets encode(size_t) = 0;
/** Decoder state. */
State state() const { return state_; }
/** Decode one octet.
*
* Processes a single octet and updates the decoder state machine. Returns the decoder's new state. See documentation for
* @ref State for restrictions on state transitions. */
virtual State decode(Octet) = 0;
/** Consume a decoded length.
*
* The decoder must be in the FINAL_STATE or COMPLETE_STATE, and upon return will be in the INITIAL_STATE. */
virtual size_t consume() = 0;
/** Reset the decoder state machine. */
virtual void reset() = 0;
};
/** Basic length encoding scheme.
*
* This length encoding scheme converts a length to a sequence of octets in big- or little-endian order, and vice
* versa. It needs to know the number of octets per length value, and the byte order of the octets if the length is
* greater than one. */
class ROSE_DLL_API BasicLengthEncodingScheme: public LengthEncodingScheme {
size_t octetsPerValue_;
ByteOrder::Endianness sex_;
size_t length_;
protected:
BasicLengthEncodingScheme(size_t octetsPerValue, ByteOrder::Endianness sex)
: octetsPerValue_(octetsPerValue), sex_(sex), length_(0) {
ASSERT_require(1==octetsPerValue || sex!=ByteOrder::ORDER_UNSPECIFIED);
ASSERT_require(octetsPerValue <= sizeof(size_t));
}
public:
static Ptr instance(size_t octetsPerValue, ByteOrder::Endianness sex = ByteOrder::ORDER_UNSPECIFIED) {
return Ptr(new BasicLengthEncodingScheme(octetsPerValue, sex));
}
virtual Ptr clone() const ROSE_OVERRIDE {
return Ptr(new BasicLengthEncodingScheme(*this));
}
virtual std::string name() const ROSE_OVERRIDE;
virtual Octets encode(size_t) ROSE_OVERRIDE;
virtual State decode(Octet) ROSE_OVERRIDE;
virtual size_t consume() ROSE_OVERRIDE;
virtual void reset() ROSE_OVERRIDE;
};
/** Returns a new basic length encoding scheme. */
BasicLengthEncodingScheme::Ptr basicLengthEncodingScheme(size_t octetsPerValue,
ByteOrder::Endianness sex = ByteOrder::ORDER_UNSPECIFIED);
/** Valid code point predicate.
*
* This predicate tests that the specified code point is valid for a string. */
class ROSE_DLL_API CodePointPredicate: public Sawyer::SharedObject {
public:
virtual ~CodePointPredicate() {}
/** Shared ownership pointer to a @ref CodePointPredicate. See @ref heap_object_shared_ownership. */
typedef Sawyer::SharedPointer<CodePointPredicate> Ptr;
/** Name of predicate. */
virtual std::string name() const = 0;
/** Predicate. */
virtual bool isValid(CodePoint) = 0;
};
/** ASCII valid code points.
*
* Returns true if the code point is a printable US-ASCII character. Printable characters are seven-bit code points for
* which C's @c isprint predicate returns true (anything but control characters). */
class ROSE_DLL_API PrintableAscii: public CodePointPredicate {
protected:
PrintableAscii() {}
public:
static Ptr instance() {
return Ptr(new PrintableAscii);
}
virtual std::string name() const ROSE_OVERRIDE { return "printable ASCII"; }
virtual bool isValid(CodePoint) ROSE_OVERRIDE;
};
/** Returns a new printable ASCII predicate. */
PrintableAscii::Ptr printableAscii();
/** Matches any code point.
*
* Returns true for all code points. */
class ROSE_DLL_API AnyCodePoint: public CodePointPredicate {
protected:
AnyCodePoint() {}
public:
static Ptr instance() { return Ptr(new AnyCodePoint); }
virtual std::string name() const ROSE_OVERRIDE { return "any code point"; }
virtual bool isValid(CodePoint) ROSE_OVERRIDE { return true; }
};
/** Returns a new predicate that matches all code points. */
AnyCodePoint::Ptr anyCodePoint();
/** String encoding scheme.
*
* A string encoding scheme indicates how a string (sequence of code points) is encoded as a sequence of octets and vice
* versa. */
class ROSE_DLL_API StringEncodingScheme: public Sawyer::SharedObject {
protected:
State state_; // decoding state
CodePoints codePoints_; // unconsumed code points
size_t nCodePoints_; // number of code points decoded since reset
CharacterEncodingForm::Ptr cef_;
CharacterEncodingScheme::Ptr ces_;
CodePointPredicate::Ptr cpp_;
protected:
StringEncodingScheme(): state_(INITIAL_STATE), nCodePoints_(0) {}
StringEncodingScheme(const CharacterEncodingForm::Ptr &cef, const CharacterEncodingScheme::Ptr &ces,
const CodePointPredicate::Ptr &cpp)
: cef_(cef), ces_(ces), cpp_(cpp) {}
public:
virtual ~StringEncodingScheme() {}
/** Shared ownership pointer to a @ref StringEncodingScheme. See @ref heap_object_shared_ownership. */
typedef Sawyer::SharedPointer<StringEncodingScheme> Ptr;
/** Name of encoding */
virtual std::string name() const = 0;
/** Create a new copy of this encoder. */
virtual Ptr clone() const = 0;
/** Encode a string into a sequence of octets. */
virtual Octets encode(const CodePoints&) = 0;
/** Decoder state. */
State state() const { return state_; }
/** Decode one octet.
*
* Processes a single octet and updates the decoder state machine. Returns the new state. See documentation for @ref
* State for restrictions on state transitions. */
virtual State decode(Octet) = 0;
/** Consume pending decoded code points.
*
* Returns code points that haven't been consume yet, and then removes them from the decoder. This can be called from any
* state because we want the caller to be able to consume code points as they're decoded, which is a little bit different
* than how @c consume methods operate in the decoders that return scalar values. A @ref reset will discard pending code
* points. */
CodePoints consume();
/** Return pending decoded code points without consuming them. */
const CodePoints& codePoints() const { return codePoints_; }
/** Number of code points decoded since reset. */
size_t length() const { return nCodePoints_; }
/** Reset the state machine to an initial state. */
virtual void reset();
/** Property: Character encoding format.
*
* The character encoding format is responsible for converting each code point to a sequence of code values. For instance,
* a UTF-16 encoding will convert each code point (a number between zero and about 1.2 million) into a sequence of
* 16-bit code values. Each code value will eventually be converted to a pair of octets by the character encoding
* scheme.
*
* @{ */
CharacterEncodingForm::Ptr characterEncodingForm() const { return cef_; }
void characterEncodingForm(const CharacterEncodingForm::Ptr &cef) { cef_ = cef; }
/** @} */
/** Property: Character encoding scheme.
*
* The character encoding scheme is responsible for converting each code value to a sequence of one or more octets. The
* code value is part of a sequence of code values generated by the character encoding format for a single code point. For
* instance, a character encoding scheme for UTF-16 will need to know whether the octets are stored in bit- or
* little-endian order.
*
* @{ */
CharacterEncodingScheme::Ptr characterEncodingScheme() const { return ces_; }
void characterEncodingScheme(const CharacterEncodingScheme::Ptr &ces) { ces_ = ces; }
/** @} */
/** Property: Code point predicate.
*
* The code point predicate tests whether a specific code point is allowed as part of a string. For instance, when
* decoding NUL-terminated ASCII strings one might want to consider only those strings that contain printable characters
* and white space in order to limit the number of false positives when searching for strings in memory.
*
* @{ */
CodePointPredicate::Ptr codePointPredicate() const { return cpp_; }
void codePointPredicate(const CodePointPredicate::Ptr &cpp) { cpp_ = cpp; }
/** @} */
};
/** Length-prefixed string encoding scheme.
*
* A string encoding where the octets for the characters are prefixed with an encoded length. */
class ROSE_DLL_API LengthEncodedString: public StringEncodingScheme {
LengthEncodingScheme::Ptr les_;
Sawyer::Optional<size_t> declaredLength_; // decoded length
protected:
LengthEncodedString(const LengthEncodingScheme::Ptr &les, const CharacterEncodingForm::Ptr &cef,
const CharacterEncodingScheme::Ptr &ces, const CodePointPredicate::Ptr &cpp)
: StringEncodingScheme(cef, ces, cpp), les_(les) {}
public:
/** Shared ownership pointer to a @ref LengthEncodedString. See @ref heap_object_shared_ownership. */
typedef Sawyer::SharedPointer<LengthEncodedString> Ptr;
static Ptr instance(const LengthEncodingScheme::Ptr &les, const CharacterEncodingForm::Ptr &cef,
const CharacterEncodingScheme::Ptr &ces, const CodePointPredicate::Ptr &cpp) {
return Ptr(new LengthEncodedString(les, cef, ces, cpp));
}
virtual StringEncodingScheme::Ptr clone() const ROSE_OVERRIDE {
LengthEncodingScheme::Ptr les = les_->clone();
CharacterEncodingForm::Ptr cef = cef_->clone();
CharacterEncodingScheme::Ptr ces = ces_->clone();
CodePointPredicate::Ptr cpp = cpp_; // not cloned since they have no state
LengthEncodedString *inst = new LengthEncodedString(les, cef, ces, cpp);
inst->state_ = state_;
inst->codePoints_ = codePoints_;
inst->nCodePoints_ = nCodePoints_;
inst->declaredLength_ = declaredLength_;
return Ptr(inst);
}
virtual std::string name() const ROSE_OVERRIDE;
virtual Octets encode(const CodePoints&) ROSE_OVERRIDE;
virtual State decode(Octet) ROSE_OVERRIDE;
virtual void reset() ROSE_OVERRIDE;
/** Returns the declared length, if any.
*
* The declared length is the value of the decoded length prefix, not necessarily the number of code points that have been
* decoded. This can be called from any state except it will always return nothing in the INITIAL_STATE. Therefore, this
* method should be called prior to the @ref consume call. */
Sawyer::Optional<size_t> declaredLength() const { return declaredLength_; }
/** Property: Lengh encoding scheme.
*
* The length encoding scheme is responsible for encoding the string length as a sequence of octets.
*
* @{ */
LengthEncodingScheme::Ptr lengthEncodingScheme() const { return les_; }
void lengthEncodingScheme(const LengthEncodingScheme::Ptr &les) { les_ = les; }
/** @} */
};
/** Returns a new length-prefixed string encoder. */
LengthEncodedString::Ptr lengthEncodedString(const LengthEncodingScheme::Ptr &les, const CharacterEncodingForm::Ptr &cef,
const CharacterEncodingScheme::Ptr &ces, const CodePointPredicate::Ptr &cpp);
/** Returns a new encoder for length-encoded printable ASCII strings. A byte order must be specified for length encodings
* larger than a single byte. */
LengthEncodedString::Ptr lengthEncodedPrintableAscii(size_t lengthSize,
ByteOrder::Endianness order = ByteOrder::ORDER_UNSPECIFIED);
/** Returns a new encoder for multi-byte length-encoded printable ASCII strings. */
LengthEncodedString::Ptr lengthEncodedPrintableAsciiWide(size_t lengthSize, ByteOrder::Endianness order, size_t charSize);
/** Terminated string encoding scheme.
*
* A string whose character octets are followed by octets for a special code point that marks the end of the string but is
* not included as part of the string's characters. An example is C-style NUL-terminated ASCII. */
class ROSE_DLL_API TerminatedString: public StringEncodingScheme {
CodePoints terminators_;
Sawyer::Optional<CodePoint> terminated_; // decoded termination
protected:
TerminatedString(const CharacterEncodingForm::Ptr &cef, const CharacterEncodingScheme::Ptr &ces,
const CodePointPredicate::Ptr &cpp, const CodePoints &terminators)
: StringEncodingScheme(cef, ces, cpp), terminators_(terminators) {}
public:
/** Shared ownership pointer to a @ref TerminatedString. See @ref heap_object_shared_ownership. */
typedef Sawyer::SharedPointer<TerminatedString> Ptr;
static Ptr instance(const CharacterEncodingForm::Ptr &cef, const CharacterEncodingScheme::Ptr &ces,
const CodePointPredicate::Ptr &cpp, const CodePoints &terminators) {
return Ptr(new TerminatedString(cef, ces, cpp, terminators));
}
static Ptr instance(const CharacterEncodingForm::Ptr &cef, const CharacterEncodingScheme::Ptr &ces,
const CodePointPredicate::Ptr &cpp, CodePoint terminator = 0) {
return Ptr(new TerminatedString(cef, ces, cpp, CodePoints(1, terminator)));
}
virtual StringEncodingScheme::Ptr clone() const ROSE_OVERRIDE {
CharacterEncodingForm::Ptr cef = cef_->clone();
CharacterEncodingScheme::Ptr ces = ces_->clone();
CodePointPredicate::Ptr cpp = cpp_; // not cloned since they have no state
TerminatedString *inst = new TerminatedString(cef, ces, cpp, terminators_);
inst->state_ = state_;
inst->codePoints_ = codePoints_;
inst->nCodePoints_ = nCodePoints_;
inst->terminated_ = terminated_;
return Ptr(inst);
}
virtual std::string name() const ROSE_OVERRIDE;
virtual Octets encode(const CodePoints&) ROSE_OVERRIDE;
virtual State decode(Octet) ROSE_OVERRIDE;
virtual void reset() ROSE_OVERRIDE;
/** Returns the decoded termination character, if any.
*
* This can be called from any state except it will always return nothing in the INITIAL_STATE. Therefore, this method
* should be called prior to the @ref consume call. */
Sawyer::Optional<CodePoint> terminated() const { return terminated_; }
/** Property: string termination code points.
*
* A list of code points (characters) that cause a string to be terminated. When decoding a string, if a terminating code
* point is encountered then the string ends at the previous code point even if the terminating code point also satisfies
* the code point predicate.
*
* @{ */
const CodePoints& terminators() const { return terminators_; }
CodePoints& terminators() { return terminators_; }
/** @} */
};
/** Returns a new encoder for NUL-terminated printable ASCII strings. */
TerminatedString::Ptr nulTerminatedPrintableAscii();
/** Returns a new encoder for multi-byte NUL-terminated printable ASCII strings. */
TerminatedString::Ptr nulTerminatedPrintableAsciiWide(size_t charSize, ByteOrder::Endianness order);
/** An encoder plus interval.
*
* Represents a string by specifying the encoding and an interval of virtual addresses where the encoded octets are
* stored. */
class ROSE_DLL_API EncodedString {
StringEncodingScheme::Ptr encoder_; // how string is encoded
AddressInterval where_; // where encoded string is located
public:
EncodedString() {}
EncodedString(const StringEncodingScheme::Ptr &encoder, const AddressInterval &where)
: encoder_(encoder), where_(where) {}
/** Information about the string encoding. */
StringEncodingScheme::Ptr encoder() const { return encoder_; }
/** Where the string is located in memory. */
const AddressInterval& where() const { return where_; }
/** Starting address of string in memory. */
rose_addr_t address() const { return where_.least(); }
/** Size of encoded string in bytes. */
size_t size() const { return where_.size(); }
/** Length of encoded string in code points. */
size_t length() const { return encoder_->length(); }
/** Code points associated with the string.
*
* If code points have been consumed then they may be partly or fully absent from the decoder. */
const CodePoints& codePoints() const { return encoder_->codePoints(); }
/** Return code points as a C++ std::string.
*
* This truncates each code point to eight bits. */
std::string narrow() const;
/** Return code points as a C++ std::wstring. */
std::wstring wide() const;
/** Decodes the string from memory.
*
* A string need not store its code points, in which case this method can decode them from memory. The memory should be
* the same as when the string was originally found, otherwise an std::runtime_error might be thrown. */
void decode(const MemoryMap&);
};
/** %Analysis to find encoded strings.
*
* This analysis searches user-specified parts of a binary specimen's memory space to find strings encoded in various formats
* specfieid by the user.
*
* See the @ref rose::BinaryAnalysis::Strings "Strings" namespace for details. */
class ROSE_DLL_API StringFinder {
public:
/** Settings and properties.
*
* These properties can be set directly or by the command-line parser. */
struct Settings {
/** Minimum length of matched strings.
*
* Strings having fewer than this many code points are discarded. If @ref minLength is larger than @ref maxLength then
* no strings will be matched. */
size_t minLength;
/** Maximum length of matched strings.
*
* Strings having more than this many code points are discarded. If @ref maxLength is smaller than @ref minLength then
* no strings will be matched. */
size_t maxLength;
/** Whether to allow overlapping strings.
*
* The number of strings that can overlap at a single address per encoder. For instance, for C-style NUL-terminated
* ASCII strings encoded as bytes, if memory contains the consecutive values 'a', 'n', 'i', 'm', 'a', 'l', '\0' then
* up to seven strings are possible: "animal", "nimal", "imal", "mal", "al", "l", and "". If the maximum overlap is
* set to three then only "animal", "nimal", and "imal" are found. Setting the maximum overlap to zero has the same
* effect as setting it to one: no overlapping is allowed. The overlap limits are applied before results are pruned
* based on length, so if the minimum legnth is four, the "imal" and shorter strings won't be found even though they
* are decoded under the covers.
*
* A maximum overlap of at least two is recommended if two-byte-per-character encoding is used when detecting
* NUL-terminated ASCII strings. The reason is that one decoder will be active at one address while another decoder is
* desired for the next address; then if the first address proves to not be part of a string, the second address can
* still be detected as a string. Similarly, a maximum overlap of at least four is recommended for
* four-byte-per-character encodings. Length-encoded strings will have similar issues. */
size_t maxOverlap;
/** Whether to keep only longest non-overlapping strings.
*
* If set, then only the longest detected strings are kept. The algorithm sorts all detected strings by decreasing
* length, then removes any string whose memory addresses overlap with any prior string in the list. */
bool keepingOnlyLongest;
Settings(): minLength(5), maxLength(-1), maxOverlap(8), keepingOnlyLongest(true) {}
};
private:
Settings settings_; // command-line settings for this analysis
bool discardingCodePoints_; // whether to store decoded code points
std::vector<StringEncodingScheme::Ptr> encoders_; // encodings to use when searching
std::vector<EncodedString> strings_; // strings that have been found
public:
/** Constructor.
*
* Initializes the analysis with default settings but no encoders. Encoders will need to be added before this analysis can
* be used to find any strings. */
StringFinder(): discardingCodePoints_(false) {}
/** Property: %Analysis settings often set from a command-line.
*
* @{ */
const Settings& settings() const { return settings_; }
Settings& settings() { return settings_; }
/** @} */
/** Property: Whether to discard code points.
*
* If this property is set, then the process of decoding strings does not actually store the code points (characters)
* of the string. This is useful when searching for lots of strings to reduce the amount of memory required. A string
* can be decoded again later if the code points are needed.
*
* @{ */
bool discardingCodePoints() const { return discardingCodePoints_; }
StringFinder& discardingCodePoints(bool b) { discardingCodePoints_=b; return *this; }
/** @} */
/** Property: List of string encodings.
*
* When searching for strings, this analysis must know what kinds of strings to look for, and does that with a vector of
* pointers to encoders. The default is an empty vector, in which no strings will be found.
*
* @{ */
const std::vector<StringEncodingScheme::Ptr>& encoders() const { return encoders_; }
std::vector<StringEncodingScheme::Ptr>& encoders() { return encoders_; }
/** @} */
/** Command-line parser for analysis settings.
*
* Returns the switch group that describes the command-line switches for this analysis. The caller can provide a @ref
* Settings object that will be adjusted when the command-line is parsed and applied; if no argument is supplied then the
* settings of this analysis are affected. In either case, the settings or analysis object must still be allocated when
* the command-line is parsed.
*
* @{ */
static Sawyer::CommandLine::SwitchGroup commandLineSwitches(Settings&);
Sawyer::CommandLine::SwitchGroup commandLineSwitches();
/** @} */
/** Inserts common encodings.
*
* Inserts the following string encodings into the analysis:
*
* @li NUL-terminated, byte-encoded, printable ASCII characters.
* @li NUL-terminated, 16-bit encoded, printable ASCII characters.
* @li NUL-terminated, 32-bit encoded, printable ASCII characters.
* @li 2-byte length-prefixed, byte encoded, printable ASCII characters.
* @li 4-byte length-prefixed, byte encoded, printable ASCII characters.
* @li 2-byte length-prefixed, 16-bit encoded, printable ASCII characters.
* @li 4-byte length-prefixed, 16-bit encoded, printable ASCII characters.
* @li 4-byte length-prefixed, 32-bit encoded, printable ASCII characters.
*
* The specified endianness is used for all multi-byte values. */
StringFinder& insertCommonEncoders(ByteOrder::Endianness);
/** Inserts less common encodings.
*
* Inserts the following string encodings into the analyses:
*
* @li Printable ASCII terminated by other code points or non-readable memory. */
StringFinder& insertUncommonEncoders(ByteOrder::Endianness);
/** Reset analysis results.
*
* Clears analysis results but does not change settings or properties. */
StringFinder& reset() { strings_.clear(); return *this; }
/** Finds strings by searching memory.
*
* Clears previous analysis results (e.g., @ref reset) and then searches for new strings. The resulting strings can be
* obtained from the @ref strings method.
*
* The memory constraints indicate where to search for strings, and the properties of this StringFinder class determine
* how to find strings. Specifically, this class must have at least one encoding registered in order to find anything (see
* @ref encoders).
*
* The search progresses by looking at each possible starting address using each registered encoding. The algorithm reads
* each byte from memory only one time, simultaneously attempting all encoders. If the MemoryMap constraint contains an
* anchor point (e.g., @ref MemoryMap::at) then only strings starting at the specified address are returned.
*
* Example 1: Find all C-style, NUL-terminated, ASCII strings contaiing only printable characters (no control characters)
* and containing at least five characters but not more than 31 (not counting the NUL terminator). Make sure that the
* string is in memory that is readable but not writable, and don't allow strings to overlap one another (i.e., "foobar"
* and "bar" cannot share their last for bytes):
*
* @code
* using namespace rose::BinaryAnalysis::String;
* MemoryMap map = ...;
* StringFinder sf;
* sf.encoder(nulTerminatedPrintableAscii());
* sf.settings().minLength = 5;
* sf.settings().maxLength = 31;
* sf.settings().allowOverlap = false;
* std::vector<EncodedString> strings = sf.find(map.require(MemoryMap::READABLE).prohibit(MemoryMap::WRITABLE)).strings();
* @endcode */
StringFinder& find(const MemoryMap::ConstConstraints&, Sawyer::Container::MatchFlags flags=0);
/** Obtain strings that were found.
*
* @{ */
const std::vector<EncodedString>& strings() const { return strings_; }
std::vector<EncodedString>& strings() { return strings_; }
/** @} */
/** Print results.
*
* Print information about each string, one string per line. Strings are displayed with C/C++ string syntax. */
std::ostream& print(std::ostream&) const;
};
std::ostream& operator<<(std::ostream&, const StringFinder&);
} // namespace
} // namespace
} // namespace
#endif
|
<gh_stars>1-10
package Gray_Code;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class Solution {
public List<Integer> grayCode(int n) {
if (n == 0) return Arrays.asList(0);
if (n == 1) return Arrays.asList(0, 1);
if (n == 2) return Arrays.asList(0, 1, 3, 2);
List<Integer> res = new ArrayList<>();
res.add(0);res.add(1);res.add(3);res.add(2);
for (int lo = 2, hi = res.size() - 1; n > 2; n--) {
for (int i = lo; i < hi; i += 2) {
res.add((res.get(i) << 1));
res.add((res.get(i) << 1) + 1);
res.add((res.get(i + 1) << 1) + 1);
res.add((res.get(i + 1) << 1));
}
lo = lo << 1;hi = res.size() - 1;
}
return res;
}
public static void main(String[] args) {
Solution s = new Solution();
List<Integer> output1 = s.grayCode(4);
for (Integer i: output1) System.out.print(i + " ");
}
}
|
<reponame>Raziel2244/nord
nord.cartography = {
// regions of the world
regions: [
{
name: "Kisarana",
level: 0,
chance: 95,
advantages: ["chkd", "grfn", "pfwl", "stgl"],
disadvantages: ["crvd", "inks", "orth", "pmkn", "unvs"],
items: [
["bkcl", "fthr", "sstk", "tnrk", "tcng"],
["lstk", "dedr", "frle", "ebtl", "smgb"],
["lpt1", "smcn", "sube", "atgb"],
["scps", "chbs", "drsc"],
["lsdn", "bcps", "lsct", "lshd", "frfx", "frsn", "atss", "tghs"],
],
risk: false,
},
{
name: "<NAME>",
level: 1,
chance: 85,
advantages: ["clpd", "jgur", "mdrn", "tbby", "tigr", "tgrn"],
disadvantages: ["blsp", "chkd", "flmt", "stgl"],
items: [
["bkcl", "fthr", "sstk", "plbk", "tnrk", "trsn", "tcng"],
["frle", "ntvn", "ebtl", "smgb"],
["arpe", "lstk", "smcn", "sube", "gogb"],
["scps", "rahb", "edrc", "drsc"],
["bcps", "tmcl", "lsdn", "prbd", "atss", "tmfl"],
],
risk: { chance: 5, cooldown: "1 days" },
},
{
name: "<NAME>",
level: 2,
chance: 70,
advantages: ["btfl", "embm", "hmbd", "kscl"],
disadvantages: ["cmph", "hnna", "mdrn", "pnda", "wood"],
items: [
["plft", "plst", "tcng"],
["frle", "lstk", "smcn", "smgb"],
["arpe", "scps", "pram", "brsp", "sdst"],
["bcps", "plfr", "atss", "drsc"],
["lshd", "wfsk", "frwl"],
],
risk: { chance: 7, cooldown: "3 days" },
},
{
name: "Amadora: Estrana",
level: 2,
chance: 70,
advantages: ["agis", "fawn", "hnna", "lctb", "zbra"],
disadvantages: [
"cndy",
"chbl",
"chlt",
"egng",
"frst",
"sndp",
"snfl",
"sngb",
],
items: [
["plft", "plst", "tcng"],
["lstk", "smcn", "smgb"],
["elsa", "hoco", "scps", "pram", "sthr"],
["bcps", "mgld", "atss", "fsdh", "drsc"],
["frfx", "fsvl", "obsd", "lgld", "sdtm"],
],
risk: { chance: 7, cooldown: "3 days" },
},
{
name: "Korandyire",
level: 3,
chance: 55,
advantages: ["aglr", "atln", "glmr", "muln", "unvs"],
disadvantages: ["clpd", "jgur", "tbby", "tigr", "tgrn"],
items: [
["rahb", "odgm", "tcng"],
["frle", "sash", "mggm"],
["arpe", "astd", "dngr", "arwt", "sdst"],
["arsh", "vdng", "atss", "drsc"],
["nksk", "spfm", "dndl", "xfth"],
],
risk: {
chance: 25,
cooldown: "5 days",
pchance: 10,
potion: "Auralight",
},
},
{
name: "Kothandyl",
level: 4,
chance: 40,
advantages: ["cmph", "flmt", "pnda", "wood"],
disadvantages: ["aglr", "btfl", "embm", "hmbd", "kscl"],
items: [
["skfg", "sksn", "tcng"],
["dngr", "scps", "smgb"],
["elsa", "vdng", "bcps"],
["bkcm", "hfmp", "atss", "acdh", "drsc"],
["rtgn", "frpl"],
],
risk: { chance: 25, cooldown: "7 days", pchance: 10, potion: "Toxicate" },
},
{
name: "Quendorin",
level: 5,
chance: 24,
advantages: [
"cndy",
"chbl",
"chlt",
"egng",
"frst",
"sndp",
"snfl",
"sngb",
],
disadvantages: ["agis", "atln", "fawn", "lctb", "zbra"],
items: [
["icsh", "dngr"],
["vdng", "rpft"],
["nseg", "aric"],
["mtrt", "xdng", "atss", "icfl", "drsc"],
["tcgl", "fzck"],
],
risk: { chance: 12, cooldown: "10 days" },
},
{
name: "Malgosfrom",
level: 6,
chance: 10,
advantages: ["blsp", "crvd", "inks", "orth", "pmkn"],
disadvantages: ["glmr", "grfn", "muln", "pfwl"],
items: [
["vdng", "pksd"],
["rtgn", "astd"],
["elsa", "mtrt", "xdng", "atss"],
["lsdn", "rdsv", "nchd", "drsc"],
["rtgm"],
],
risk: {
chance: 30,
cooldown: "14 days",
pchance: 15,
potion: "Blood/Nightshade",
},
},
],
// data for horses
horses: [
{ level: "0", name: "No registered level" },
{ level: "1", name: "Basic training" },
{ level: "2", name: "Quality blood" },
{ level: "3", name: "Excellent blood" },
{ level: "4", name: "Supreme blood" },
{ level: "5", name: "Heroic blood" },
{ level: "6", name: "Legendary blood" },
],
// reward tiers
tiers: [
{ name: "Very Common", chance: 50, binoChance: 33 },
{ name: "Common", chance: 29, binoChance: 26 },
{ name: "Uncommon", chance: 13, binoChance: 23 },
{ name: "Rare", chance: 6, binoChance: 13 },
{ name: "Very Rare", chance: 2, binoChance: 5 },
],
// possible rewards
items: {
acdh: { name: "Anaconda Head" },
atgb: { name: "Antique Gold Bridle" },
aric: { name: "Arcane Ice Shard" },
arsh: { name: "Arcane Shard" },
arwt: { name: "Arcane Water" },
arpe: { name: "Aromatic Petals" },
astd: { name: "Asteroid Fragment" },
atss: { name: "Attachment Stone Shard" },
brsp: { name: "Bear Spine" },
bcps: { name: "Bursting Coin Purse" },
bkcl: { name: "Broken Chalice" },
bkcm: { name: "Broken Compass" },
lsct: { name: "Cat Familiar (TI)" },
tmcl: { name: "Cat Familiar (TII)" },
fsvl: { name: "Cat Familiar (TIII)" },
chbs: { name: "Cherry Blossom Seeds" },
obsd: { name: "Chunk of Obsidian" },
dngr: { name: "Dangerous Encounter" },
dndl: { name: "Destabilising Nodule" },
dedr: { name: "Dew Droplets" },
drsc: { name: "Dragon Scales" },
elsa: { name: "Electrified Sand" },
ebtl: { name: "Empty Potion Bottle" },
xfth: { name: "Exquisite Feather" },
edrc: { name: "Extinct Dragon Skull" },
xdng: { name: "Extremely Dangerous Encounter" },
fthr: { name: "Feather" },
frpl: { name: "Fire Opal" },
fsdh: { name: "Fossilised Dragon Horn" },
frfx: { name: "Fox Familiar" },
frle: { name: "Fresh Leaves" },
fzck: { name: "Frozen Cookie" },
gogb: { name: "Golden Grub" },
hoco: { name: "Hot Coals" },
lshd: { name: "Hound Familiar" },
icfl: { name: "Ice Flower" },
icsh: { name: "Ice Shard" },
nksk: { name: "Ink Sack" },
ntvn: { name: "Interesting Vine" },
lgld: { name: "Large Gold Ore" },
lstk: { name: "Large Stick" },
lsdn: { name: "Lost Nordanner" },
lpt1: { name: "Luck Potion" },
mggm: { name: "Magnificently Odd Gemstone" },
mgld: { name: "Medium Gold Nugget" },
mtrt: { name: "Meteorite" },
nchd: { name: "Necromancer's Hand" },
nseg: { name: "Nest of Eggs" },
odgm: { name: "Odd Gemstone" },
plbk: { name: "Peeled Bark" },
pram: { name: "Piece of Rusty Armour" },
plft: { name: "Pile of Feathers (3 Feathers)" },
plfr: { name: "Pile of Furs" },
plst: { name: "Pile of Sticks (3 Sticks)" },
hfmp: { name: "Pirate Map Fragment" },
prbd: { name: "Potent Red Berry Dye" },
pksd: { name: "Pumpkin Seeds" },
rdsv: { name: "Radioactive Sliver" },
frwl: { name: "Raptor Familiar" },
rpft: { name: "Raptor Feathers" },
rahb: { name: "<NAME>" },
rtgm: { name: "Relic of The Gods (Mutation)" },
rtgn: { name: "Relic of The Gods (Natural)" },
sdtm: { name: "Sands of Time" },
sash: { name: "Small Arcane Shard" },
scps: { name: "Small Coin Purse" },
smgb: { name: "Small Grub" },
smcn: { name: "Small Pile of Coins" },
sstk: { name: "Small Stick" },
sthr: { name: "Small Thread" },
skfg: { name: "Snake Fang" },
sksn: { name: "Snake Skin" },
spfm: { name: "Sprite Familiar" },
sdst: { name: "Stardust" },
sube: { name: "Sugar Beet" },
frsn: { name: "Swan Familiar)" },
tcng: { name: "Tiny Copper Nugget" },
tnrk: { name: "Tiny Rock" },
tcgl: { name: "Touch of Glimmer (r)" },
trsn: { name: "Tree Resin" },
tghs: { name: "Tulia's Golden Horse Shoe" },
tmfl: { name: "Tulia's Mane Flower" },
vdng: { name: "Very Dangerous Encounter" },
wfsk: { name: "Wolf Skull" },
},
// nordanner mutations
mutations: {
agis: { name: "Agis" },
aglr: { name: "Angler" },
atln: { name: "Atlanticus" },
blsp: { name: "Bloodsplash" },
btfl: { name: "Butterfly" },
cndy: { name: "<NAME>" },
crvd: { name: "Carved" },
cmph: { name: "Cemophora" },
chkd: { name: "Checkered" },
chbl: { name: "<NAME>" },
chlt: { name: "<NAME>" },
clpd: { name: "<NAME>" },
egng: { name: "Eggnog" },
embm: { name: "Emblem" },
fawn: { name: "Fawn" },
flmt: { name: "Flametouched" },
frst: { name: "Frostsplash" },
glmr: { name: "Glimmer" },
grfn: { name: "<NAME>" },
hnna: { name: "Henna" },
hmbd: { name: "Hummingbird" },
inks: { name: "Inkspill" },
jgur: { name: "Jaguar" },
kscl: { name: "Kascel" },
lctb: { name: "<NAME>" },
mdrn: { name: "Mandarin" },
muln: { name: "Moulin" },
orth: { name: "Ortho" },
pnda: { name: "Panda" },
pfwl: { name: "Peafowl" },
pmkn: { name: "Pumpkin" },
sndp: { name: "Snowdripple" },
snfl: { name: "Snowfall" },
sngb: { name: "Snowglobe" },
stgl: { name: "Stained Glass" },
tbby: { name: "Tabby" },
tigr: { name: "Tiger" },
tgrn: { name: "<NAME>" },
unvs: { name: "Universe" },
wood: { name: "Wooded" },
zbra: { name: "Zebra" },
},
// ======================================================================
// helper functions
// called from roll button handler
roll: function () {
// "this" is module - nord.cartography
try {
const output = nord.state.cartography.output,
statsout = nord.state.cartography.statsout;
// console.dir(state);//debugger;
// clear output from previous use
if (output.firstChild) rzl.destroyChildren(output);
if (statsout.firstChild) rzl.destroyChildren(statsout);
if (!this.rollRegion()) {
output.classList.remove("rzl-hidden");
statsout.classList.remove("rzl-hidden");
return;
}
this.rollItems();
output.classList.remove("rzl-hidden");
statsout.classList.remove("rzl-hidden");
} catch (e) {
console.error(e);
return;
}
},
// roll for region success
rollRegion: function () {
// "this" is module - nord.cartography
const fields = nord.state.cartography.form.elements,
output = nord.state.cartography.output,
statsout = nord.state.cartography.statsout,
reg = nord.state.cartography.rollState.region;
// get id of region selected and level of horse selected
reg.id = fields.region.selectedIndex;
reg.rlevel = nord.cartography.regions[reg.id].level;
reg.hlevel = fields.horse.value;
// check horse level is high enough
if (reg.hlevel < reg.rlevel) {
console.log("region failure");
rzl.addDiv(output, { content: "Region : Failure" });
return false;
}
// chance calculation
reg.chance = nord.cartography.regions[reg.id].chance; // set base chance from region
console.log(reg.chance);
for (let i = reg.hlevel; i > reg.rlevel; i--) reg.chance += 15; // boost per level
console.log(reg.chance);
if (fields.group.checked) reg.chance += 15; // group horse boost
if (fields.compass.checked) reg.chance += 20; // compass boost
// apply mutation advantages + disadvantages
reg.advantages = fields.advantages.selectedOptions.length;
reg.disadvantages = fields.disadvantages.selectedOptions.length;
switch (true) {
case reg.advantages - reg.disadvantages > 0:
reg.mutationEffect = 5;
break;
case reg.advantages - reg.disadvantages < 0:
reg.mutationEffect = -5;
break;
default:
reg.mutationEffect = 0;
}
reg.chance += reg.mutationEffect;
// limit to 100 percent to avoid possible false negatives
if (reg.chance > 100) reg.chance = 100;
reg.rng = rzl.rng1to(100);
rzl.addDiv(statsout, {
content: "Region > Chance: " + reg.chance + " Roll: " + reg.rng,
});
if (reg.chance === 100 || reg.rng <= reg.chance) {
console.log("region success");
rzl.addDiv(output, { content: "Region : Success" });
// rzl.addDiv(fields.output,{content:"__ is victorious!"});
} else {
console.log("region failure");
rzl.addDiv(output, { content: "Region : Failure" });
// rzl.addDiv(fields.output,{content:"__ was unsuccessful!"});
return false;
}
if (this.regions[reg.id].risk) this.rollRisk();
return true;
},
// roll risk for region
rollRisk: function () {
// "this" is module - nord.cartography
const output = nord.state.cartography.output,
statsout = nord.state.cartography.statsout,
r = this.regions[nord.state.cartography.rollState.region.id].risk,
risk = nord.state.cartography.rollState.risk;
risk.potion = nord.state.cartography.form.elements.potion.checked;
risk.chance = risk.potion ? r.pchance : r.chance;
risk.cooldown = r.cooldown;
risk.rng = rzl.rng1to(100);
rzl.addDiv(statsout, {
content: `Risk > Chance: ${risk.chance} Roll: ${risk.rng}`,
});
if (risk.chance === 100 || risk.rng <= risk.chance) {
console.log("hurt by risk");
rzl.addDiv(output, { content: "Risk : Hurt" });
// rzl.addDiv(output,{content:"__ was hurt by risk! Cooldown: " + risk.cooldown});
} else {
console.log("safe from risk");
rzl.addDiv(output, { content: "Risk : Safe" });
// rzl.addDiv(output,{content:"__ is safe from risk!"});
return false;
}
},
// roll item rewards
rollItems: function () {
// "this" is module - nord.cartography
const fields = nord.state.cartography.form.elements,
rollState = nord.state.cartography.rollState,
output = nord.state.cartography.output,
statsout = nord.state.cartography.statsout,
item = (rollState.items = {});
item.count = fields.falrap.checked ? rzl.rng1to(3) + 1 : rzl.rng1to(3);
item.rolled = [];
item.tiers = [];
item.binoculars = fields.binoculars.checked;
item.chanceKey = item.binoculars ? "binoChance" : "chance";
let text = "";
for (let i = 1; i <= item.count; i++) {
const tierArr = this.tiers.reduce((a, c, i) => {
for (let l = 0; l < c[item.chanceKey]; l++) a.push(i);
return a;
}, []);
let tierId = rzl.randomArrayItem(tierArr);
item.tiers.push(this.tiers[tierId]);
let itemArr = this.regions[rollState.region.id].items[tierId];
let itemId = rzl.randomArrayItem(itemArr);
item.rolled.push(this.items[itemId]);
switch (true) {
case i === item.count && item.count > 1:
text += " and " + this.items[itemId].name;
break;
case i === item.count - 1 || item.count === 1:
text += this.items[itemId].name;
break;
case i < item.count - 1:
text += this.items[itemId].name + ", ";
break;
}
}
rzl.addDiv(output, { content: "Found : " + text });
console.log(item);
},
// update the ui based on region
applyRegion: function () {
// "this" is ui - nord.state.cartography
const fields = nord.state.cartography.form.elements,
rootNode = nord.state.cartography.rootNode,
region = nord.cartography.regions[fields.region.selectedIndex],
potionBox = rzl.findChild(rootNode, "div", "potionBox"),
size =
region.advantages.length > region.disadvantages.length
? region.advantages.length
: region.disadvantages.length;
if (region.risk && region.risk.pchance) {
potionBox.classList.remove("rzl-hidden");
potionBox.firstChild.innerHTML = region.risk.potion + " Potion:";
} else {
potionBox.classList.add("rzl-hidden");
potionBox.firstChild.innerHTML = "";
}
// mutations
const adv = {},
dis = {};
region.advantages.forEach((m) => {
adv[m] = this.mutations[m].name;
});
rzl.setSelOpts(fields.advantages, adv);
fields.advantages.size = size;
region.disadvantages.forEach((m) => {
dis[m] = this.mutations[m].name;
});
rzl.setSelOpts(fields.disadvantages, dis);
fields.disadvantages.size = size;
},
// ======================================================================
// event handlers
// called on UI build
built: function (ev) {
// this is ui
try {
this.form = rzl.findChild(this.rootNode, "form", "arena-form");
this.output = rzl.findChild(this.rootNode, "div", "output");
this.statsout = rzl.findChild(this.rootNode, "div", "statsout");
this.rollState = { region: {}, risk: {}, stats: {} };
this.optsRegion = nord.cartography.regions.map((o) => o.name);
rzl.setSelOpts(this.form.region, this.optsRegion);
this.optsHorse = nord.cartography.horses.map((o) => o.name);
rzl.setSelOpts(this.form.horse, this.optsHorse);
nord.cartography.applyRegion();
} catch (e) {
console.error(e);
return;
}
},
// click handler for roll button
rollClick: function (ev) {
//this is button
nord.cartography.roll();
},
// selected region in drop down
regionChange: function (ev) {
nord.cartography.applyRegion();
},
// ======================================================================
// SECTION: Definitions
uiDef: {
meta: {
name: "cartography",
domain: "nord",
pnode: "norduiBox",
builtCB: "nord.cartography.built",
displayedCB: "nord.cartography.displayed",
},
view: {
style: {
margin: "auto",
display: "flex",
"flex-flow": "column nowrap",
"align-items": "center",
},
children: [
{ tag: "h1", class: "title", content: "Cartography Roller" },
{
tag: "form",
id: "arena-form",
class: "rzl-form",
children: [
{
class: "rzl-form-row",
children: [
{
class: "rzl-form-item",
children: [
{
tag: "label",
content: "Region:",
props: { htmlFor: "region" },
},
{
tag: "select",
id: "region",
events: { change: "nord.cartography.regionChange" },
},
],
},
{
class: "rzl-form-item",
children: [
{
tag: "label",
content: "Horse:",
props: { htmlFor: "horse" },
},
{ tag: "select", id: "horse" },
],
},
],
},
{
class: "rzl-form-row",
children: [
// 15% chance boost
{
class: "rzl-form-item",
children: [
{
tag: "label",
content: "Group Horse:",
props: { htmlFor: "group" },
},
{ tag: "input", id: "group", props: { type: "checkbox" } },
],
},
// 20% chance boost
{
class: "rzl-form-item",
children: [
{
tag: "label",
content: "Compass:",
props: { htmlFor: "compass" },
},
{
tag: "input",
id: "compass",
props: { type: "checkbox" },
},
],
},
// risk reduction
{
id: "potionBox",
class: "rzl-form-item rzl-hidden",
children: [
{
tag: "label",
content: "Potion:",
props: { htmlFor: "potion" },
},
{ tag: "input", id: "potion", props: { type: "checkbox" } },
],
},
],
},
{
class: "rzl-form-row",
children: [
// +1 item
{
class: "rzl-form-item",
children: [
{
tag: "label",
content: "Falcon/Raptor:",
props: { htmlFor: "falrap" },
},
{ tag: "input", id: "falrap", props: { type: "checkbox" } },
],
},
// tier chance boost
{
class: "rzl-form-item",
children: [
{
tag: "label",
content: "Binoculars:",
props: { htmlFor: "binoculars" },
},
{
tag: "input",
id: "binoculars",
props: { type: "checkbox" },
},
],
},
],
},
{
class: "rzl-form-row",
children: [
{
class: "rzl-form-item",
children: [
{
tag: "label",
content: "Advantages:",
props: { htmlFor: "advantages" },
},
{
tag: "select",
id: "advantages",
props: { multiple: true },
},
// {tag:"select",id:"advantages",props:{multiple:true},style:{"max-height":"32px"}}
],
},
{
class: "rzl-form-item",
children: [
{
tag: "label",
content: "Disadvantages:",
props: { htmlFor: "disadvantages" },
},
{
tag: "select",
id: "disadvantages",
props: { multiple: true },
},
// {tag:"select",id:"disadvantages",props:{multiple:true},style:{"max-height":"32px"}}
],
},
],
},
{
class: "rzl-form-row",
children: [
{
tag: "button",
id: "btnRoll",
class: "rzl-btn",
content: "Roll",
events: { click: "nord.cartography.rollClick" },
props: { type: "button" },
},
],
},
],
},
{
id: "output",
class: "rzl-hidden",
style: { "text-align": "center" },
},
{
id: "statsout",
class: "rzl-hidden",
style: { "text-align": "center" },
},
],
},
},
};
|
#!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $DIR/..
DOCKER_IMAGE=${DOCKER_IMAGE:-lgcpay/lgcd-develop}
DOCKER_TAG=${DOCKER_TAG:-latest}
BUILD_DIR=${BUILD_DIR:-.}
rm docker/bin/*
mkdir docker/bin
cp $BUILD_DIR/src/lgcd docker/bin/
cp $BUILD_DIR/src/lgc-cli docker/bin/
cp $BUILD_DIR/src/lgc-tx docker/bin/
strip docker/bin/lgcd
strip docker/bin/lgc-cli
strip docker/bin/lgc-tx
docker build --pull -t $DOCKER_IMAGE:$DOCKER_TAG -f docker/Dockerfile docker
|
#!/usr/bin/env sh
#
# Ping Identity DevOps - Docker Build Hooks
#
#- Creates a message of the day (MOTD) file based on information prvoded by:
#- * Docker Varibbles
#- * Github MOTD file from PingIdentity Devops Repo
#- * Server-Profile motd file
#
${VERBOSE} && set -x
# shellcheck source=pingcommon.lib.sh
. "${HOOKS_DIR}/pingcommon.lib.sh"
_motdFile="/etc/motd"
_motdJsonFile="/tmp/motd.json"
_currentDate=$(date +%Y%m%d)
echo "
##################################################################################
Ping Identity DevOps Docker Image
Version: ${IMAGE_VERSION}
DevOps User: ${PING_IDENTITY_DEVOPS_USER}
Hostname: ${HOSTNAME}
Started: $(date)
##################################################################################" > "${_motdFile}"
#
# Get a MOTD from the server profile if it is set
#
if test -f "${STAGING_DIR}/motd" ; then
cat "${STAGING_DIR}/motd" >> "${_motdFile}"
fi
if test -z "${MOTD_URL}" ; then
echo "Not pulling MOTD since MOTD_URL is not set"
else
_motdCurlResult=$(curl -G -o "${_motdJsonFile}" -w '%{http_code}' "${MOTD_URL}" 2> /dev/null)
if test ${_motdCurlResult} -eq 200 ; then
echo "Successfully downloaded MOTD from ${MOTD_URL}"
_jqExpr=".[] | select(.validFrom <= ${_currentDate} and .validTo >= ${_currentDate}) |
\"\n---- SUBJECT: \" + .subject + \"\n\" +
(.message | join(\"\n\")) +
\"\n\""
_imageName=$(echo ${IMAGE_VERSION} | sed 's/-.*//')
cat ${_motdJsonFile} | jq -r ".devops | ${_jqExpr}" >> "${_motdFile}"
cat ${_motdJsonFile} | jq -r ".${_imageName} | ${_jqExpr}" >> "${_motdFile}"
else
echo_red "Unable to download MOTD from ${MOTD_URL}"
fi
fi
echo "##################################################################################" >> "${_motdFile}"
echo "Current ${_motdFile}"
cat_indent "${_motdFile}"
|
#!/bin/bash
echo '=============== Staring init script for My-eCommerce-AppAPI ==============='
# save all env for debugging
printenv > /var/log/colony-vars-"$(basename "$BASH_SOURCE" .sh)".txt
echo '==> Installing Node.js and NPM'
sudo apt-get update -y
sudo apt install curl -y
curl -sL https://deb.nodesource.com/setup_10.x | sudo bash -
apt install nodejs -y
echo '==> Extract api artifact to /var/-My-eCommerce-Appapi'
mkdir $ARTIFACTS_PATH/drop
tar -xvf $ARTIFACTS_PATH/promotions-manager-api.*.tar.gz -C $ARTIFACTS_PATH/drop/
mkdir /var/promotions-manager-api/
tar -xvf $ARTIFACTS_PATH/drop/drop/promotions-manager-api.*.tar.gz -C /var/promotions-manager-api
echo '==> Set the DATABASE_HOST env var to be globally available'
DATABASE_HOST=$DATABASE_HOST.$DOMAIN_NAME
echo 'DATABASE_HOST='$DATABASE_HOST >> /etc/environment
echo 'RELEASE_NUMBER='$RELEASE_NUMBER >> /etc/environment
echo 'API_BUILD_NUMBER='$API_BUILD_NUMBER >> /etc/environment
echo 'API_PORT='$API_PORT >> /etc/environment
source /etc/environment
echo '==> Install PM2, it provides an easy way to manage and daemonize nodejs applications'
npm install -g pm2
echo '==> Start our api and configure as a daemon using pm2'
cd /var/promotions-manager-api
pm2 start /var/promotions-manager-api/index.js
pm2 save
chattr +i /root/.pm2/dump.pm2
sudo su -c "env PATH=$PATH:/home/unitech/.nvm/versions/node/v4.3/bin pm2 startup systemd -u root --hp /root"
|
<gh_stars>1-10
/*
*
*/
package net.community.chest.jfree.jfreechart.data;
import net.community.chest.convert.DoubleValueStringConstructor;
import org.jfree.data.Range;
import org.w3c.dom.Element;
/**
* <P>Copyright 2008 as per GPLv2</P>
*
* @author <NAME>.
* @since Jan 27, 2009 2:24:55 PM
*/
public class BaseRange extends Range {
/**
*
*/
private static final long serialVersionUID = 1983493529169345309L;
public BaseRange (double lower, double upper)
{
super(lower, upper);
}
public static final String LOWER_ATTR="lower", UPPER_ATTR="upper";
public BaseRange (Element elem) throws RuntimeException
{
this(DoubleValueStringConstructor.DEFAULT.fromString(elem.getAttribute(LOWER_ATTR)),
DoubleValueStringConstructor.DEFAULT.fromString(elem.getAttribute(UPPER_ATTR)));
}
}
|
from typing import List
def reorder_columns(col_list: List[str], col_order: List[int]) -> List[str]:
return [col_list[i] for i in col_order]
|
package com.pinmi.react.printer.adapter;
/**
* Created by xiesubin on 2017/9/21.
*/
public class USBPrinterDeviceId extends PrinterDeviceId {
private Integer vendorId;
private Integer productId;
public Integer getVendorId() {
return vendorId;
}
public Integer getProductId() {
return productId;
}
public static USBPrinterDeviceId valueOf(Integer vendorId, Integer productId) {
return new USBPrinterDeviceId(vendorId, productId);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
if (!super.equals(o)) return false;
USBPrinterDeviceId that = (USBPrinterDeviceId) o;
if (!vendorId.equals(that.vendorId)) return false;
return productId.equals(that.productId);
}
@Override
public int hashCode() {
int result = vendorId.hashCode();
result = 31 * result + productId.hashCode();
return result;
}
private USBPrinterDeviceId(Integer vendorId, Integer productId){
this.vendorId = vendorId;
this.productId = productId;
}
}
|
import tensorflow as tf
def run_training_job(experiment_fn, arguments, output_dir):
# Call the experiment function with the provided arguments
experiment = experiment_fn(**arguments)
# Create a run configuration for the training job
run_config = tf.contrib.learn.RunConfig(model_dir=output_dir)
# Create an estimator based on the experiment
estimator = tf.contrib.learn.Estimator(model_fn=experiment.model_fn, config=run_config)
# Define the input function for the training data
input_fn = experiment.input_fn
# Execute the training job
estimator.fit(input_fn=input_fn, steps=experiment.train_steps)
|
#!/bin/bash
function parse_inputs {
# required inputs
if [ "${INPUT_KUSTOMIZE_VERSION}" != "" ]; then
kustomize_version=${INPUT_KUSTOMIZE_VERSION}
else
echo "Input kustomize_version cannot be empty."
exit 1
fi
# optional inputs
kustomize_build_dir="."
if [ "${INPUT_KUSTOMIZE_BUILD_DIR}" != "" ] || [ "${INPUT_KUSTOMIZE_BUILD_DIR}" != "." ]; then
kustomize_build_dir=${INPUT_KUSTOMIZE_BUILD_DIR}
fi
kustomize_comment=0
if [ "${INPUT_KUSTOMIZE_COMMENT}" == "1" ] || [ "${INPUT_KUSTOMIZE_COMMENT}" == "true" ]; then
kustomize_comment=1
fi
kustomize_output_file=""
if [ -n "${INPUT_KUSTOMIZE_OUTPUT_FILE}" ]; then
kustomize_output_file=${INPUT_KUSTOMIZE_OUTPUT_FILE}
fi
kustomize_build_options=""
if [ -n "${INPUT_KUSTOMIZE_BUILD_OPTIONS}" ]; then
kustomize_build_options=${INPUT_KUSTOMIZE_BUILD_OPTIONS}
fi
enable_alpha_plugins=""
if [ "${INPUT_ENABLE_ALPHA_PLUGINS}" == "1" ] || [ "${INPUT_ENABLE_ALPHA_PLUGINS}" == "true" ]; then
enable_alpha_plugins="--enable_alpha_plugins"
fi
}
function install_kustomize {
echo "getting download url for kustomize ${kustomize_version}"
for i in {1..100}; do
url=$(curl -s "https://api.github.com/repos/kubernetes-sigs/kustomize/releases?per_page=100&page=$i" | jq -r '.[].assets[] | select(.browser_download_url | test("kustomize(_|.)?(v)?'$kustomize_version'_linux_amd64")) | .browser_download_url')
if [ ! -z $url ]; then
echo "Download URL found in $url"
break
fi
done
echo "Downloading kustomize v${kustomize_version}"
if [[ "${url}" =~ .tar.gz$ ]]; then
curl -s -S -L ${url} | tar -xz -C /usr/bin
else
curl -s -S -L ${url} -o /usr/bin/kustomize
fi
if [ "${?}" -ne 0 ]; then
echo "Failed to download kustomize v${kustomize_version}."
exit 1
fi
echo "Successfully downloaded kustomize v${kustomize_version}."
echo "Allowing execute privilege to kustomize."
chmod +x /usr/bin/kustomize
if [ "${?}" -ne 0 ]; then
echo "Failed to update kustomize privilege."
exit 1
fi
echo "Successfully added execute privilege to kustomize."
}
function main {
scriptDir=$(dirname ${0})
source ${scriptDir}/kustomize_build.sh
parse_inputs
install_kustomize
kustomize_build
}
main "${*}"
|
<filename>Modules/IO/Carto/test/otbImageToOSMVectorDataGenerator.cxx
/*
* Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
*
* This file is part of Orfeo Toolbox
*
* https://www.orfeo-toolbox.org/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "otbCommandLineArgumentParser.h"
#include "otbImageToOSMVectorDataGenerator.h"
#include "otbVectorDataFileWriter.h"
#include "otbVectorImage.h"
#include "otbImageFileReader.h"
#include "otbVectorDataFileWriter.h"
typedef otb::VectorImage<unsigned int, 2> ImageType;
typedef otb::ImageToOSMVectorDataGenerator<ImageType> FilterType;
typedef FilterType::VectorDataType VectorDataType;
typedef otb::ImageFileReader<ImageType> ReaderType;
typedef otb::VectorDataFileWriter<VectorDataType> VectorDataFileWriterType;
int otbImageToOSMVectorDataGenerator(int argc, char * argv[])
{
// Parse command line parameters
typedef otb::CommandLineArgumentParser ParserType;
ParserType::Pointer parser = ParserType::New();
parser->AddInputImage();
parser->AddOption("--OutputVectorData","Output VectorData","-out", true);
parser->AddOption("--Key","Key to search in the XML OSM file","-key", 1, false);
parser->AddOption("--OSM","OSM XML file to be parsed","-osm", 1, false);
typedef otb::CommandLineArgumentParseResult ParserResultType;
ParserResultType::Pointer parseResult = ParserResultType::New();
try
{
parser->ParseCommandLine(argc, argv, parseResult);
}
catch ( itk::ExceptionObject & )
{
return EXIT_FAILURE;
}
// convenient typedefs to store keys and their value
typedef std::pair<std::string, std::string> KeyValueType;
typedef std::vector<KeyValueType> KeyValueListType;
// Instantiate the image reader
ReaderType::Pointer reader = ReaderType::New();
reader->SetFileName(parseResult->GetInputImage());
reader->UpdateOutputInformation();
// VectorData generator instantiation
FilterType::Pointer vdgenerator = FilterType::New();
vdgenerator->SetInput(reader->GetOutput());
if(parseResult->IsOptionPresent("--OSM"))
{
vdgenerator->SetUseUrl(false);
vdgenerator->SetFileName(parseResult->GetParameterString("--OSM"));
}
vdgenerator->Update();
// Split the classes to get classes and values
KeyValueListType keyvalueList;
for (unsigned int idClass = 0; idClass < 1; idClass++)
{
std::string key;
KeyValueType currentkeyvalue;
std::string str = parseResult->GetParameterString("--Key");
// find the position of the separator ,
size_t pos = str.find(",");
// split the string
currentkeyvalue.first = str.substr (0, pos);
if(pos != std::string::npos)
currentkeyvalue.second = str.substr (pos+1);
keyvalueList.push_back(currentkeyvalue);
}
std::cout <<"Searching for class "<<keyvalueList[0].first
<< " and subclass "<< keyvalueList[0].second << std::endl;
// Write the generated vector data
VectorDataFileWriterType::Pointer writer = VectorDataFileWriterType::New();
writer->SetFileName(parseResult->GetParameterString("--OutputVectorData"));
if(parseResult->IsOptionPresent("--Key"))
{
const VectorDataType *vd =
vdgenerator->GetVectorDataByName(keyvalueList[0].first,
keyvalueList[0].second);
writer->SetInput(vd);
}
else
{
const VectorDataType *vd =
vdgenerator->GetVectorDataByName("highway");
writer->SetInput(vd);
}
// trigger the execution
writer->Update();
return EXIT_SUCCESS;
}
|
/* ISC license. */
#include <skabus/rpc.h>
#include "skabus-rpc-internal.h"
uint64_t skabus_rpc_sendv_withfds (skabus_rpc_t *a, char const *ifname, struct iovec const *v, unsigned int vlen, int const *fds, unsigned int nfds, unsigned char const *bits, tain_t const *limit, tain_t const *deadline, tain_t *stamp)
{
return skabus_rpc_sendvq_withfds(a, 0, 0, ifname, v, vlen, fds, nfds, bits, limit, deadline, stamp) ;
}
|
<gh_stars>100-1000
package workflows
import (
res "github.com/pikami/tiktok-dl/resources"
fileio "github.com/pikami/tiktok-dl/utils/fileio"
log "github.com/pikami/tiktok-dl/utils/log"
)
// CanUseDownloadBatchFile - Check's if DownloadBatchFile can be used
func CanUseDownloadBatchFile(batchFilePath string) bool {
return batchFilePath != ""
}
// DownloadBatchFile - Download items from batch file
func DownloadBatchFile(batchFilePath string) {
if !fileio.CheckIfExists(batchFilePath) {
log.LogFatal(res.ErrorPathNotFound, batchFilePath)
}
fileio.ReadFileLineByLine(batchFilePath, downloadItem)
}
func downloadItem(batchItem string) {
if batchItem[0] == '#' {
return
}
StartWorkflowByParameter(batchItem)
}
|
public static boolean isPalindrome(String str) {
str = str.replaceAll("[^a-zA-Z0-9]", "").toLowerCase(); // Remove non-alphanumeric characters and convert to lowercase
int left = 0;
int right = str.length() - 1;
while (left < right) {
if (str.charAt(left) != str.charAt(right)) {
return false; // Not a palindrome
}
left++;
right--;
}
return true; // Palindrome
}
|
#!/bin/bash
# Author: Burak Himmetoglu
# Date : 04-13-2017
# -- AlcNet -- #
# Download data from PubChem repository
wget ftp://ftp.ncbi.nlm.nih.gov/pubchem/Compound_3D/01_conf_per_cmpd/SDF/00000001_00025000.sdf.gz
wget ftp://ftp.ncbi.nlm.nih.gov/pubchem/Compound_3D/01_conf_per_cmpd/SDF/00025001_00050000.sdf.gz
wget ftp://ftp.ncbi.nlm.nih.gov/pubchem/Compound_3D/01_conf_per_cmpd/SDF/00050001_00075000.sdf.gz
wget ftp://ftp.ncbi.nlm.nih.gov/pubchem/Compound_3D/01_conf_per_cmpd/SDF/00075001_00100000.sdf.gz
mkdir -o SDF
mv *sdf.gz ./SDF
|
#!/bin/bash
#DM*20151117 NotepadPlusPlusPortable(6.8.6) French version succesfully based
#On Host: KUbuntu(15.10), Docker(1.9.0) through KRDC-VNC(4.14.1)
#On Client with Root account: Ubuntu(14.10), Wine(1.17.50)
#DM*20151117 downloadFilePath complex selection files
#Specific Windows PortableApps registry
winProgramName="Notepad++Portable"
pafStandardProgramName="NotepadPlusPlusPortable.paf.exe"
winInstallFilePath='C:\Installers\NotepadPlusPlusPortable.paf.exe -q'
postInstallAliasScript="postInstall_AliasForNotepadPortable.sh"
downloadPattern=~/Downloads/Notepad*.paf.exe
#Common for all Windows PortableApps registry
downloadFilePath=$(find $downloadPattern -printf "%T@ %Tc %p\n" | sort -n | tail -1 | cut -d" " -f8)
installDirectory=~/.wine/drive_c/Installers
installFilePath=$installDirectory/$pafStandardProgramName
portableAppsDirectory=~/.wine/drive_c/PortableApps
targetDirectory=$portableAppsDirectory/$winProgramName
installLogFilePath=$targetDirectory/start$pafStandardProgramName.log.txt
winTargetDirectory="C:\PortableApps\\"$winProgramName
#Common for all Windows PortableApps script
if [ $downloadFilePath ];
then
echo "$downloadFilePath new version available ! Preparing installation..."
else
echo "$downloadPattern required but not found. Installation aborted !"
exit
fi
#Directories and bash_aliases
if [ -f ~/.bash_aliases ];
then
echo ""
else
touch ~/.bash_aliases
fi
if [ -d $installDirectory ];
then
echo "$installDirectory available."
else
mkdir $installDirectory && chmod 755 $installDirectory
echo "$installDirectory created."
fi
if [ -d $portableAppsDirectory ];
then
echo "$portableAppsDirectory available."
else
mkdir $portableAppsDirectory && chmod 755 $portableAppsDirectory
echo "$portableAppsDirectory created."
fi
#Uninstall if present
if [ -f $downloadFilePath ] && [ -f $installFilePath ];
then
rm $installFilePath
fi
if [ -f $downloadFilePath ] && [ -d $targetDirectory ];
then
rm -Rf $targetDirectory
echo "$targetDirectory/ previous version deleted."
fi
#Wine install from download
if [ -f $downloadFilePath ];
then
cp $downloadFilePath $installFilePath
chmod 755 $installFilePath
mkdir $targetDirectory
echo "$downloadFilePath new version used."
echo "Please choose to install it into '$winTargetDirectory' directory to prepare alias ready."
echo "After software installation through Wine, please launch '$postInstallAliasScript' script."
wine start $winInstallFilePath > $installLogFilePath 2>&1
else
echo "$downloadFilePath not available. Please download portable application before retry..."
fi
|
/*
Append.
Along with C library
Remarks:
Return the number of copied bytes.
*/
# define CBR
# include <conio.h>
# include <stdio.h>
# include <stdlib.h>
# include "../../../incl/config.h"
signed(__cdecl cli_append(signed char(*appendant),CLI_TYPEWRITER(*argp))) {
/* **** DATA, BSS and STACK */
auto signed char *b;
auto signed i,r;
auto signed short flag;
/* **** CODE/TEXT */
if(!appendant) return(0x00);
if(!argp) return(0x00);
b = (*(CLI_INDEX+(R(cur,*argp))));
r = cpy(*(CLI_OFFSET+(R(base,R(roll,*argp)))),b);
if(!r) {
/* empty or..
printf("%s\n","<< Error at fn. cpy()");
return(0x00);
//*/
}
r = cpy(b,appendant);
if(!r) {
/* empty or..
printf("%s\n","<< Error at fn. cpy()");
return(0x00);
//*/
}
i = (r);
while(r) {
b++;
--r;
}
r = cpy(b,*(CLI_OFFSET+(R(base,R(roll,*argp)))));
if(!r) {
/* empty or..
printf("%s\n","<< Error at fn. cpy()");
return(0x00);
//*/
}
return(i);
}
|
#include <cmath>
#include "glass/Uniform"
#include "glass/common.h"
#include "glass/SpotLight"
#include "glass/samplerCube"
#include "glass/utils/transform.h"
#include "glass/utils/helper.h"
using namespace glass;
SpotLight::SpotLight()
{
setCoverage(32);
update_internal();
update_mat();
}
void SpotLight::update_internal()
{
__direction = vec3(-cos(__pitch)*sin(__yaw),
sin(__pitch),
-cos(__pitch)*cos(__yaw));
shape.yawTo(__yaw);
shape.pitchTo(__pitch);
sync();
}
void SpotLight::update_mat()
{
double epsilon = 0.03;
double Delta = Kl*Kl - 4*Kq*(1 - __brightness*dot(vec3(0.299, 0.587, 0.114), __color)/epsilon);
__radius = 0.5 * (sqrt(Delta) - Kl) / Kq;
if(!__using_shadow)
{
shape.moveTo(__position);
sync();
return;
}
mat4 projection = perspective(PI/2, 1, 1, __radius);
mat4 view = translate(__position);
__mat[0] = projection * Rz(PI) * Ry(-PI/2) * view;
__mat[1] = projection * Rz(PI) * Ry( PI/2) * view;
__mat[2] = projection * Rx( PI/2) * view;
__mat[3] = projection * Rx(-PI/2) * view;
__mat[4] = projection * Rz(PI) * Ry( PI ) * view;
__mat[5] = projection * Rz(PI) * view;
shape.moveTo(__position);
sync();
}
vec3 SpotLight::direction()const
{
return __direction;
}
void SpotLight::setDirection(const vec3& new_direction)
{
__direction = normalize(new_direction);
__yaw = atan2(-__direction.x, -__direction.z);
__pitch = atan(__direction.y/sqrt(__direction.x*__direction.x + __direction.z*__direction.z));
shape.yawTo(__yaw);
shape.pitchTo(__pitch);
sync();
}
void SpotLight::setDirection(float x, float y, float z)
{
__direction = normalize(vec3(x, y, z));
__yaw = atan2(-__direction.x, -__direction.z);
__pitch = atan(__direction.y/sqrt(__direction.x*__direction.x + __direction.z*__direction.z));
shape.yawTo(__yaw);
shape.pitchTo(__pitch);
sync();
}
void SpotLight::yaw(float dyaw)
{
__yaw += dyaw;
update_internal();
}
void SpotLight::pitch(float dpitch)
{
__pitch += dpitch;
if(__pitch > PI/2-1E-6)
{
__pitch = PI/2-1E-6;
}
else if(__pitch < -PI/2+1E-6)
{
__pitch = -PI/2+1E-6;
}
update_internal();
}
float SpotLight::yaw()const
{
return __yaw;
}
float SpotLight::pitch()const
{
return __pitch;
}
void SpotLight::yawTo(float _yaw)
{
__yaw = _yaw;
update_internal();
}
void SpotLight::pitchTo(float _pitch)
{
__pitch = _pitch;
if(__pitch > PI/2-1E-6)
{
__pitch = PI/2-1E-6;
}
else if(__pitch < -PI/2+1E-6)
{
__pitch = -PI/2+1E-6;
}
update_internal();
}
mat4& SpotLight::mat(int i)
{
if(!__using_shadow)
{
throw glass::RuntimeError("using shadow is false, no mat avaiable!");
}
if(i < 0 || i > 5)
{
throw glass::IndexError(0, 5, i);
}
return __mat[i];
}
void SpotLight::useShadow(bool flag)
{
__using_shadow = flag;
update_mat();
}
void SpotLight::setCoverage(float d)
{
__coverage = fabs(d);
Kl = 3.651720188286232 / pow(__coverage - 1.379181323137789, 0.956790970458513);
Kq = 27.101525310782399 / pow(__coverage - 2.191989674193149, 1.727016118197271);
update_mat();
}
float SpotLight::coverage()const
{
return __coverage;
}
float SpotLight::radius()const
{
return __radius;
}
vec3 SpotLight::position()const
{
return __position;
}
vec3 SpotLight::color()const
{
return __color;
}
void SpotLight::setColor(const vec3& _color)
{
__color = _color;
update_mat();
}
void SpotLight::setColor(float r, float g, float b)
{
setColor(vec3(r, g, b));
}
float SpotLight::brightness()const
{
return __brightness;
}
void SpotLight::setBrightness(float _brightness)
{
__brightness = _brightness;
update_mat();
}
void SpotLight::setPosition(const vec3& v)
{
__position = v;
update_mat();
}
void SpotLight::setPosition(float x, float y, float z)
{
__position = vec3(x, y, z);
update_mat();
}
void SpotLight::move(float dx, float dy, float dz)
{
__position += vec3(dx, dy, dz);
update_mat();
}
void SpotLight::move(const vec3& v)
{
__position += v;
update_mat();
}
void SpotLight::moveTo(float x, float y, float z)
{
__position = vec3(x, y, z);
update_mat();
}
void SpotLight::moveTo(const vec3& v)
{
__position = v;
update_mat();
}
void SpotLight::moveX(float dx)
{
__position.x += dx;
update_mat();
}
void SpotLight::moveY(float dy)
{
__position.y += dy;
update_mat();
}
void SpotLight::moveZ(float dz)
{
__position.z += dz;
update_mat();
}
void SpotLight::moveXTo(float x)
{
__position.x = x;
update_mat();
}
void SpotLight::moveYTo(float y)
{
__position.y = y;
update_mat();
}
void SpotLight::moveZTo(float z)
{
__position.z = z;
update_mat();
}
void SpotLight::setShape(const Model& model)
{
shape = model;
shape.moveTo(__position);
shape.yawTo(__yaw);
shape.pitchTo(__pitch);
}
bool SpotLight::usingShadow()const
{
return __using_shadow;
}
float SpotLight::cutoffAngle()const
{
return __cutoff_angle;
}
void SpotLight::setCutoffAngle(float angle)
{
__cutoff_angle = angle;
sync(__cutoff_angle);
}
float SpotLight::softDistance()const
{
return __soft_distance;
}
void SpotLight::setSoftDistance(float distance)
{
__soft_distance = distance;
sync(__soft_distance);
}
void SpotLight::open()
{
is_open = true;
}
void SpotLight::close()
{
is_open = false;
}
bool SpotLight::isOpen()const
{
return is_open;
}
bool SpotLight::isClose()const
{
return !is_open;
}
|
class DashboardPortal extends TatorPage {
constructor() {
super();
this._loading = document.createElement("img");
this._loading.setAttribute("class", "loading");
this._loading.setAttribute("src", "/static/images/tator_loading.gif");
this._shadow.appendChild(this._loading);
//
// Header
//
const header = document.createElement("div");
this._headerDiv = this._header._shadow.querySelector("header");
header.setAttribute("class", "annotation__header d-flex flex-items-center flex-justify-between px-6 f3");
const user = this._header._shadow.querySelector("header-user");
user.parentNode.insertBefore(header, user);
const div = document.createElement("div");
div.setAttribute("class", "d-flex flex-items-center");
header.appendChild(div);
this._breadcrumbs = document.createElement("analytics-breadcrumbs");
div.appendChild(this._breadcrumbs);
this._breadcrumbs.setAttribute("analytics-name", "Dashboards");
//
// Main section
//
const main = document.createElement("main");
main.setAttribute("class", "layout-max py-4");
this._shadow.appendChild(main);
const title = document.createElement("div");
title.setAttribute("class", "main__header d-flex flex-items-center flex-justify-between py-6 px-2");
main.appendChild(title);
const h1 = document.createElement("h1");
h1.setAttribute("class", "h1");
title.appendChild(h1);
const h1Text = document.createTextNode("Dashboards");
h1.appendChild(h1Text);
this._dashboards = document.createElement("div");
this._dashboards.setAttribute("class", "d-flex flex-column");
main.appendChild(this._dashboards);
}
connectedCallback() {
TatorPage.prototype.connectedCallback.call(this);
}
static get observedAttributes() {
return ["project-name", "project-id"].concat(TatorPage.observedAttributes);
}
attributeChangedCallback(name, oldValue, newValue) {
TatorPage.prototype.attributeChangedCallback.call(this, name, oldValue, newValue);
switch (name) {
case "project-name":
this._breadcrumbs.setAttribute("project-name", newValue);
break;
case "project-id":
this._projectId = newValue;
this._getDashboards();
break;
}
}
_getDashboards() {
fetch("/rest/Dashboards/" + this._projectId, {
method: "GET",
credentials: "same-origin",
headers: {
"X-CSRFToken": getCookie("csrftoken"),
"Accept": "application/json",
"Content-Type": "application/json"
},
})
.then(response => response.json())
.then(dashboards => {
for (let dashboard of dashboards) {
this._insertDashboardSummary(dashboard);
}
this._loading.style.display = "none";
});
}
_insertDashboardSummary(dashboard) {
const summary = document.createElement("dashboard-summary");
summary.info = dashboard;
this._dashboards.appendChild(summary);
}
}
customElements.define("dashboard-portal", DashboardPortal);
|
import torch
from torch import nn
from models import ResNeXtBottleneck, DownBlock, Flatten
class Discriminator(nn.Module):
def __init__(self, dim: int):
super(Discriminator, self).__init__()
self.main = nn.Sequential(*[
DownBlock(3, dim // 2, 4),
DownBlock(dim // 2, dim // 2, 3),
DownBlock(dim // 2, dim * 1, 4),
ResNeXtBottleneck(dim * 1, dim * 1, cardinality=4, dilate=1),
DownBlock(dim * 1, dim * 1, 3),
DownBlock(dim * 1, dim * 2, 4),
ResNeXtBottleneck(dim * 2, dim * 2, cardinality=4, dilate=1),
DownBlock(dim * 2, dim * 2, 3),
DownBlock(dim * 2, dim * 4, 4),
ResNeXtBottleneck(dim * 4, dim * 4, cardinality=4, dilate=1),
Flatten()
])
self.last = nn.Linear(256 * 8 * 8, 1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, tensor: torch.Tensor) -> torch.Tensor:
""" Feed forward method of Discriminator
Args:
tensor (torch.Tensor): 4D(BCHW) RGB image tensor
Returns:
torch.Tensor: [description] 2D(BU) sigmoid output tensor
"""
tensor = self.main(tensor)
tensor = self.last(tensor)
return self.sigmoid(tensor)
|
# define the list
my_list = c(1, 2, 3, 4, 5)
# compute the mean of the list
mean_list = mean(my_list)
# print the result
print("The mean of the list is: " + mean_list)
|
<reponame>MiguelDelPinto/LCOM_TPs
#ifndef _LCOM_SPRITE_H_
#define _LCOM_SPRITE_H_
/** @defgroup sprite sprite
* @{
*
* File for sprite type, holding functions and types to manage sprites
*/
/**
* @brief struct Sprite
*
* @param x the x coordinate of the current position
* @param y the y coordinate of thecurrent position
* @param xspeed the speed in the x axis
* @param yspeed the speed in the y axis
* @param img struct that stores the information about the image to display
* @param map address of the allocated memory to where the image was read
*/
typedef struct {
int x, y;
int xspeed, yspeed;
xpm_image_t img;
uint8_t *map;
} Sprite;
/**
* @brief Creates a sprite object
*
* @param xpm an xpm-like pixamp to display
* @param x the x coordinate of the starting position
* @param y the y coordinate of the starting position
* @param xspeed the speed in the x axis
* @param yspeed the speed in the y axis
* @return Sprite*, created sprite. NULL when xpm is invalid
*/
Sprite* create_sprite(xpm_map_t xpm, int x, int y, int xspeed, int yspeed);
/**
* @brief draws a sprite on the screen
*
* @param sp a pointer to the sprite to be drawn
* @return int, 0 if successful, anything else if otherwise
*/
int draw_sprite(Sprite* sp);
/**
* @brief draws a sprite on the screen with rotation
*
* @param sp a pointer to the sprite to be drawn
* @param angle the current angle (integer) in degrees (from 0 to 359)
* @return int, 0 if successful, anything else if otherwise
*/
int draw_sprite_with_rotation(Sprite* sp, uint16_t angle);
/**
* @brief Clears the screen in the location of a normal sprite, painting the background in its place
*
* @param sp a pointer to the sprite to be cleared
* @param background a pointer to the sprite to be drawn
* @return int
*/
int clear_sprite_with_cover(Sprite* sp, Sprite* background);
/**
* @brief Clears the screen in the location of a rotating sprite, painting the background in its place
*
* @param sp a pointer to the sprite to be cleared
* @param angle angle of the sprite
* @param background a pointer to the sprite to be drawn
* @return int
*/
int clear_sprite_with_rotation(Sprite* sp, uint16_t angle, Sprite* background);
/**
* @brief Updates the coordinates of a sprite
*
* @param sp a pointer to the sprite to be updated
*/
void update_sprite(Sprite* sp);
/**
* @brief clears the screen in the location of the sprite, that is, paints it black
*
* @param sp a pointer to the sprite to be cleared
*/
void clear_sprite(Sprite* sp);
/**
* @brief checks the collision between two sprites, regarding their size
*
* @param sp main sprite
* @param obstacle obstacle's sprite
* @return int 0 if there were no collisions, 1 if otherwise
*/
int check_collision_by_size(Sprite *sp, Sprite *obstacle);
/**
* @brief checks the collision between two sprites, one of them being the groind (uses ground color)
*
* @param sp main sprite
* @param bakground background sprite
* @param forbidden_color the color that will determine the collision (ground color)
* @return int 0 if there were no collisions, 1 if otherwise
*/
int check_collision_with_background(Sprite *sp, Sprite *background, uint32_t forbidden_color);
/**
* @brief unallocates memory from the sprite
*
* @param sp pointer to the sprite to be destroyed
*/
void destroy_sprite(Sprite *sp);
/**
* @brief converts an angle in degrees to radians
*
* @param degrees angle in degrees
* @return double angle in radians
*/
double degrees_to_radians(double degrees);
/**
* @}
*/
#endif /* _LCOM_SPRITE_H */
|
try:
import wpilib
except ImportError:
from pyfrc import wpilib
class MyRobot(wpilib.SimpleRobot):
state = 1
def __init__ (self):
super().__init__()
print("Matt the fantastic ultimate wonderful humble person")
wpilib.SmartDashboard.init()
#self.digitalInput=wpilib.DigitalInput(4)
self.CANJaguar = wpilib.CANJaguar(1)
self.gyro = wpilib.Gyro(1)
self.joystick=wpilib.Joystick(1)
self.joystick2=wpilib.Joystick(2)
self.jaguar=wpilib.Jaguar(1)
self.accelerometer=wpilib.ADXL345_I2C(1, wpilib.ADXL345_I2C.kRange_2G)
self.solenoid=wpilib.Solenoid(7)
self.solenoid2=wpilib.Solenoid(8)
self.p=1
self.i=0
self.d=0
wpilib.SmartDashboard.PutBoolean('Soleinoid 1', False)
wpilib.SmartDashboard.PutBoolean('Soleinoid 2', False)
#self.pid = wpilib.PIDController(self.p, self.i, self.d, self.gyro, self.jaguar)
self.sensor = wpilib.AnalogChannel(5)
self.ballthere = False
#self.jaguar2=wpilib.Jaguar(2)
#self.jaguar3=wpilib.Jaguar(3)
#self.jaguar4=wpilib.Jaguar(4)
#self.drive = wpilib.RobotDrive(self.jaguar, self.jaguar2, self.jaguar3, self.jaguar4)#self.jaguar4=wpilib.Jaguar(4)
#self.drive.SetSafetyEnabled(False)
def OperatorControl(self):
#yself.pid.Enable()
print("MyRobot::OperatorControl()")
wpilib.GetWatchdog().SetEnabled(False)
#dog = wpilib.GetWatchdog()
#dog.setEnabled(True)
#dog.SetExpiration(10)
while self.IsOperatorControl() and self.IsEnabled():
#dog.Feed()
#self.drive.MecanumDrive_Cartesian(self.Joystick.GetY(), self.Joystick.GetX(), self.Joystick2.GetX(), 0)
self.FromOperatorControl()
wpilib.Wait(0.01)
def FromOperatorControl(self):
self.CANJaguar.Set((self.joystick.GetY()))
def PIDMove(self):
self.pid.SetSetpoint(10)
''' This was the old, huge while loop.
def OldWhileLoop(self):
wpilib.SmartDashboard.PutNumber('GyroAngle', self.gyro.GetAngle())
wpilib.SmartDashboard.PutNumber('the getVoltage', self.sensor.GetVoltage())
wpilib.SmartDashboard.PutNumber('boolean ballthere', self.ballthere)
wpilib.SmartDashboard.PutNumber('soleinoid 1', self.solenoid.Get())
wpilib.SmartDashboard.PutNumber('soleinoid 2', self.solenoid2.Get())
self.solenoid.Set(wpilib.SmartDashboard.GetBoolean('Soleinoid 1'))
self.solenoid2.Set(wpilib.SmartDashboard.GetBoolean('Soleinoid 2'))
self.PIDMove()
self.OpticalThingy()
axis=self.accelerometer.GetAccelerations()
wpilib.SmartDashboard.PutNumber('Acceleration Axis X', axis.XAxis)
wpilib.SmartDashboard.PutNumber('Acceleration Axis Y', axis.YAxis)
wpilib.SmartDashboard.PutNumber('Acceleration Axis Z', axis.ZAxis)
'''
def OpticalThingy(self):
if self.sensor.GetVoltage()>1:
self.ballthere=True
if self.sensor.GetVoltage()<1:
self.ballthere=False
def run():
robot = MyRobot()
robot.StartCompetition()
return robot
if __name__ == '__main__':
wpilib.run()
|
ansible-playbook -i inventory create-kubeadm-cluster.yml --tags=init
|
<gh_stars>1-10
// Generated by script, don't edit it please.
import createSvgIcon from '../createSvgIcon';
import WaitSvg from '@rsuite/icon-font/lib/flow/Wait';
const Wait = createSvgIcon({
as: WaitSvg,
ariaLabel: 'wait',
category: 'flow',
displayName: 'Wait'
});
export default Wait;
|
#!/bin/bash
## PREREQUISITES:
### install somehow bundletool
DIR_KEYSTORE=~/src/dmsl/play-keystores
KS=$DIR_KEYSTORE/anyplace.jks
KS_PASS=$DIR_KEYSTORE/keystore.pwd
KEY_PASS=$KS_PASS
IN=logger-release.aab
APKS=logger.apks
bundletool build-apks \
--bundle=$IN --output=$APKS \
--ks=$KS --ks-pass=file:$KS_PASS \
--ks-key-alias=logger --key-pass=file:$KEY_PASS
mkdir -p apks
bundletool extract-apks \
--apks=$APKS \
--output-dir=./apks/ \
--device-spec=device.json
|
#!/usr/bin/env bash
#
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C.UTF-8
export HOST=x86_64-apple-darwin16
export PIP_PACKAGES="zmq"
export GOAL="install"
export DANECOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror --with-boost-process"
export CI_OS_NAME="macos"
export NO_DEPENDS=1
export OSX_SDK=""
export CCACHE_SIZE=300M
export RUN_SECURITY_TESTS="true"
|
/*jshint -W079*/
var Promise = require('bluebird')
/*jshint +W079*/
var _ = require('lodash')
var fs = require('fs')
var path = require('path')
var rjConfig = require('../config/config').rjsConfig
/**
* 识别通配符下所有的文件
*/
var detectWildcards = Promise.coroutine(function* (basePath, keys) {
// 读取通配符下所有的文件
var fileList = yield Promise.map(keys, function(path) {
return fs.readdirAsync(basePath + '/' + path.replace('*', ''))
})
var ret = _.zipObject(keys, fileList)
_.each(ret, function(val, key) {
// 加上目录名
ret[key] = val.map(function(file) {
// 这里一定要去掉文件后缀名,坑爹
return key.replace('*', '') + file.replace('.js', '')
})
})
return ret
})
/**
* 将配置对象中的通配符全部替换
*/
var replaceWildcards = Promise.coroutine(function* () {
// 先找到所有的通配符项目
var list = [
[rjConfig, 'insertRequire']
]
var wildcards = []
_.each(rjConfig.modules, function(mod) {
list.push([mod, 'include'])
list.push([mod, 'insertRequire'])
})
_.each(list, function(item) {
var mod = item[0]
var field = item[1]
if (!Array.isArray(mod[field])) return
mod[field].forEach(function(path) {
if (path.indexOf('/*') > -1) {
wildcards.push(path)
}
})
})
var jsPath = path.resolve(rjConfig.appDir + '/' + rjConfig.baseUrl + '/')
console.log('prebuild: 当前JS目录为' + jsPath)
var pathMap = yield detectWildcards(jsPath, wildcards)
console.log('prebuild: 通配符配置映射表')
console.log(pathMap)
// 遍历配置,将通配符替换为获取真实文件地址
_.each(list, function(item) {
var mod = item[0]
var field = item[1]
if (!Array.isArray(mod[field])) return
_.each(wildcards, function(path) {
var index = mod[field].indexOf(path)
if (index > -1) {
[].splice.apply(mod[field], _.flatten([index, 1, pathMap[path]]))
}
})
})
})
module.exports = Promise.coroutine(function* () {
yield replaceWildcards()
})
|
function publishTweets(client, timestampMs) {
const tweetsToPublish = [];
const tweetQueue = [];
client.lastId = client.tweets[0].id;
for (let idx = 0; idx < client.tweets.length; idx++) {
const tweet = client.tweets[idx];
if (tweet.timestamp_ms <= timestampMs) {
tweetsToPublish.push(tweet);
} else {
tweetQueue.push(tweet);
}
}
client.tweets = tweetQueue;
client.tweetQueue = tweetQueue;
return tweetsToPublish;
}
|
package main
// file generated by
// github.com/mh-cbon/http-clienter
// do not edit
import (
"errors"
httper "github.com/mh-cbon/httper/lib"
"net/http"
)
// HTTPClientControllerRPC is an http-clienter of *Controller.
// Controller of some resources.
type HTTPClientControllerRPC struct {
Base string
}
// NewHTTPClientControllerRPC constructs an http-clienter of *Controller
func NewHTTPClientControllerRPC() *HTTPClientControllerRPC {
ret := &HTTPClientControllerRPC{}
return ret
}
// GetByID constructs a request to GetByID
func (t HTTPClientControllerRPC) GetByID(urlID int) (*http.Request, error) {
return nil, errors.New("todo")
}
// UpdateByID constructs a request to UpdateByID
func (t HTTPClientControllerRPC) UpdateByID(urlID int, reqBody *Tomate) (*http.Request, error) {
return nil, errors.New("todo")
}
// DeleteByID constructs a request to DeleteByID
func (t HTTPClientControllerRPC) DeleteByID(REQid int) (*http.Request, error) {
return nil, errors.New("todo")
}
// TestVars1 constructs a request to TestVars1
func (t HTTPClientControllerRPC) TestVars1(w http.ResponseWriter, r *http.Request) (*http.Request, error) {
return nil, errors.New("todo")
}
// TestCookier constructs a request to TestCookier
func (t HTTPClientControllerRPC) TestCookier(c httper.Cookier) (*http.Request, error) {
return nil, errors.New("todo")
}
// TestSessionner constructs a request to TestSessionner
func (t HTTPClientControllerRPC) TestSessionner(s httper.Sessionner) (*http.Request, error) {
return nil, errors.New("todo")
}
// TestRPCer constructs a request to TestRPCer
func (t HTTPClientControllerRPC) TestRPCer(id int) (*http.Request, error) {
return nil, errors.New("todo")
}
|
#!/usr/bin/env bash
echo stopping IotFrontend
docker stop IotFrontend
echo removing IotFrontend
docker rm IotFrontend
echo list active docker containers
docker ps
|
$(() => {
var totalexpenses = 0
var totalincome = 0
var theme = 0
//var balance = 0
$('#usernamebtn').on('click', () => {
console.log('按到了喔')
var name = $('#username').val()
$('#title').text(name + "'s Money Manager")
})
//change the theme
$('#colorbtn').on('click', () => {
if (theme == 0) {
$('#bgcolor').css({ "background-color": "#d6e5fa" })
$('#blncard').css({ "background-color": "#d6e5fa" })
$('#blncard').css({ "color": "#3c6f9c" })
//$('#bgcolor').css({ "background-color": "#ffc6c7" })
$('#title1').css({ "color": "#3c6f9c" })
$('#title').css({ "color": "#3c6f9c" })
theme = 1
} else if (theme == 1) {
$('#bgcolor').css({ "background-color": "#ffd369" })
$('#blncard').css({ "background-color": "#ffd369" })
$('#blncard').css({ "color": "black" })
$('#title1').css({ "color": "black" })
$('#title').css({ "color": "black" })
theme = 0
}
$('#gsbtn').toggleClass("btn-dark")
$('#usbtn').toggleClass("btn-dark")
$('#hebtn').toggleClass("btn-dark")
$('#colorbtn').toggleClass("btn-outline-dark")
$('#usernamebtn').toggleClass("btn-outline-dark")
})
$('#exbtn').on('click', () => {
console.log('按到了喔')
// 輸入
var addtotalexpenses = $('#exmoney').val()
var expdate = $('#exdate').val()
if ($('#excategory').val() == 01) {
$('#food').append("$ " + addtotalexpenses + " " + "/" + " " + expdate + "<br>")
} else if ($('#excategory').val() == 02) {
$('#bills').append("$ " + addtotalexpenses + " " + "/" + " " + expdate + "<br>")
} else if ($('#excategory').val() == 03) {
$('#tax').append("$ " + addtotalexpenses + " " + "/" + " " + expdate + "<br>")
} else if ($('#excategory').val() == 04) {
$('#transportation').append("$ " + addtotalexpenses + " " + "/" + " " + expdate + "<br>")
} else if ($('#excategory').val() == 05) {
$('#entertainment').append("$ " + addtotalexpenses + " " + "/" + " " + expdate + "<br>")
} else if ($('#excategory').val() == 06) {
$('#sport').append("$ " + addtotalexpenses + " " + "/" + " " + expdate + "<br>")
} else if ($('#excategory').val() == 07) {
$('#pet').append("$ " + addtotalexpenses + " " + "/" + " " + expdate + "<br>")
} else if ($('#excategory').val() == 08) {
$('#health').append("$ " + addtotalexpenses + " " + "/" + " " + expdate + "<br>")
} else if ($('#excategory').val() == 09) {
$('#others').append("$ " + addtotalexpenses + " " + "/" + " " + expdate + "<br>")
}
addtotalexpenses = Number(addtotalexpenses)
totalexpenses += addtotalexpenses
var exprogress = totalexpenses / (totalexpenses + totalincome) * 100
$('#exprog').css({ "width": exprogress + "%" })
$('#exprog').html("<div>" + exprogress + " %</div>")
$('#inprog').css({ "width": (100 - exprogress) + "%" })
$('#inprog').html("<div>" + (100 - exprogress) + " %</div>")
$('#bln').html("<p>Balance: " + (totalincome - totalexpenses) + "</p>")
// 輸出
$('#extotal').val(totalexpenses)
$('#exmoney').val("")
$('#excategory').val("00")
$('exdate').val()
})
$('#inbtn').on('click', () => {
console.log('按到了喔')
// 輸入
var addtotalincome = $('#inmoney').val()
var incdate = $('#indate').val()
if ($('#incategory').val() == 11) {
$('#salary').append("$ " + addtotalincome + " " + "/" + " " + incdate + "<br>")
} else if ($('#incategory').val() == 12) {
$('#awards').append("$ " + addtotalincome + " " + "/" + " " + incdate + "<br>")
} else if ($('#incategory').val() == 13) {
$('#grants').append("$ " + addtotalincome + " " + "/" + " " + incdate + "<br>")
} else if ($('#incategory').val() == 14) {
$('#refunds').append("$ " + addtotalincome + " " + "/" + " " + incdate + "<br>")
} else if ($('#incategory').val() == 15) {
$('#investments').append("$ " + addtotalincome + " " + "/" + " " + incdate + "<br>")
} else if ($('#incategory').val() == 16) {
$('#lottery').append("$ " + addtotalincome + " " + "/" + " " + incdate + "<br>")
} else if ($('#incategory').val() == 17) {
$('#others').append("$ " + addtotalincome + " " + "/" + " " + incdate + "<br>")
}
addtotalincome = Number(addtotalincome)
totalincome += addtotalincome
var inprogress = totalincome / (totalincome + totalexpenses) * 100
$('#inprog').css({ "width": inprogress + "%" })
$('#inprog').html("<div>" + inprogress + " %</div>")
$('#exprog').css({ "width": (100 - inprogress) + "%" })
$('#exprog').html("<div>" + (100 - inprogress) + " %</div>")
$('#bln').html("<p>Balance: " + (totalincome - totalexpenses) + "</p>")
// 輸出
$('#intotal').val(totalincome)
$('#inmoney').val("")
$('#incategory').val("00")
})
})
|
<filename>SRC/UTILS/LFDS/liblfds6.1.1/liblfds611/src/lfds611_abstraction/lfds611_abstraction_cas.c
/*
* Copyright (c) 2015, EURECOM (www.eurecom.fr)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are those
* of the authors and should not be interpreted as representing official policies,
* either expressed or implied, of the FreeBSD Project.
*/
#include "lfds611_abstraction_internal_body.h"
/****************************************************************************/
#if (defined _WIN32 && defined _MSC_VER)
/* TRD : 64 bit and 32 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
_WIN32 indicates 64-bit or 32-bit Windows
_MSC_VER indicates Microsoft C compiler
*/
static LFDS611_INLINE lfds611_atom_t
lfds611_abstraction_cas (
volatile lfds611_atom_t * destination,
lfds611_atom_t exchange,
lfds611_atom_t compare)
{
lfds611_atom_t rv;
assert (destination != NULL);
// TRD : exchange can be any value in its range
// TRD : compare can be any value in its range
LFDS611_BARRIER_COMPILER_FULL;
rv = (lfds611_atom_t) _InterlockedCompareExchangePointer ((void *volatile *)destination, (void *)exchange, (void *)compare);
LFDS611_BARRIER_COMPILER_FULL;
return (rv);
}
#endif
/****************************************************************************/
#if (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1 && __GNUC_PATCHLEVEL__ >= 0)
/* TRD : any OS on any CPU with GCC 4.1.0 or better
GCC 4.1.0 introduced the __sync_*() atomic intrinsics
__GNUC__ / __GNUC_MINOR__ / __GNUC_PATCHLEVEL__ indicates GCC and which version
*/
static LFDS611_INLINE lfds611_atom_t
lfds611_abstraction_cas (
volatile lfds611_atom_t * destination,
lfds611_atom_t exchange,
lfds611_atom_t compare)
{
lfds611_atom_t rv;
assert (destination != NULL);
// TRD : exchange can be any value in its range
// TRD : compare can be any value in its range
// TRD : note the different argument order for the GCC instrinsic to the MSVC instrinsic
LFDS611_BARRIER_COMPILER_FULL;
rv = (lfds611_atom_t) __sync_val_compare_and_swap (destination, compare, exchange);
LFDS611_BARRIER_COMPILER_FULL;
return (rv);
}
#endif
|
#!/bin/bash
image_path="/home/lurps/ba_schraven/datasets/obj_detection_frames"
result_path="/home/lurps/ba_schraven/results/yolo/obj_detection"
WD="/home/lurps/catkin_ws/src/obj_detection/darknet"
cmd="./darknet detector test custom/OfficeHomeDataset_smallv2.data custom/OfficeHomeDataset_smallv2/yolov2-tiny-deploy.cfg custom/OfficeHomeDataset_smallv2/yolov2-tiny_190000.weights"
find $image_path -name *.jpg | while read line; do
file_name=$(basename $line);
result_file=$(echo $file_name | sed 's/.jpg/.txt/g');
result=$result_path"/"$result_file;
cd $WD;
echo $(pwd);
echo $result_file;
$cmd $line;
cp -v $WD/predictions.jpg $result_path/$file_name;
cp -v $WD/prediction_details.txt $result;
done;
|
const net = require('net');
const pg = require('pg');
// PORT IN WHICH THE SERVER IS LISTENING
const PORT = 3000;
// DATABASE CONFIGURATION
const config = {
user: 'username',
password: 'password',
database: 'mydb',
host: 'localhost',
port: 5432
};
let pool;
const initializePool = () => {
// CREATE A CONNECTION POOL
pool = new pg.Pool(config);
// CONNECT TO THE DATABASE
pool.connect((err, client, done) => {
if (err) throw err;
console.log('Connected to the postgres server!');
});
};
// tcp server
const server = net.createServer((socket) => {
console.log('Incoming connection!');
socket.on('data', (data) => {
// PARSE THE INCOMING DATA
const jsonData = JSON.parse(data);
// INSERT INTO THE DATABASE
pool.query(`INSERT INTO mytable (name, age) VALUES ($1, $2) RETURNING *`, [jsonData.name, jsonData.age], (err, res) => {
if (err) throw err;
console.log(`Data inserted with ID: ${res.rows[0].id}`);
});
});
});
// START LISTENING FOR CONNECTIONS
server.listen({ port: PORT }, () => {
initializePool();
console.log(`Server listening for connections on port ${PORT}`);
});
|
import { COLORS, TRANSPARENT_BLACK } from '../colors';
describe('COLORS', () => {
it('should have expected values', () => {
expect(COLORS).toMatchSnapshot();
});
});
describe('TRANSPARENT_BLACK', () => {
it('should have expected values', () => {
expect(TRANSPARENT_BLACK).toMatchSnapshot();
});
});
|
for i in {1..100}; do
../../bin/arcyon query > /dev/null
done
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
#!/usr/bin/env bats
load helpers
image="${IMAGE_NAME}:${CUDA_VERSION}-devel-${OS}${IMAGE_TAG_SUFFIX}"
function setup() {
check_runtime
}
@test "check_architecture" {
narch=${ARCH}
if [[ ${ARCH} == "arm64" ]]; then
narch="aarch64"
fi
docker pull ${image}
docker_run --rm --gpus 0 ${image} bash -c "[[ \$(uname -m) == ${narch} ]] || false"
[ "$status" -eq 0 ]
}
|
import requests
import re
def fetch_emails(url):
page = requests.get(url)
emails = re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", page.text)
for email in emails:
print(email)
|
<reponame>brunohaveroth/leads2b-project
/**
* 400 (Bad Request) Response
*
* Usage:
* return res.badRequest();
* return res.badRequest(data);
*
* @param {String|Object} data
**/
module.exports = function sendBadRequest(data) {
return this.res.status(400).send(data);
};
|
#!/bin/bash
dieharder -d 3 -g 403 -S 994591671
|
<filename>src/pcam5c/gpio.cpp
/*
* MIT License
*
* Copyright (c) 2021 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "gpio.hpp"
#include <boost/filesystem.hpp>
#include <boost/program_options.hpp>
#include <boost/system/system_error.hpp>
#include <filesystem>
#include <iostream>
#include <optional>
gpio::~gpio()
{
if ( auto_unexport_ )
unexport();
}
gpio::gpio( uint32_t num, bool unexport ) : num_( num )
, auto_unexport_( unexport )
{
__export( num );
}
bool
gpio::__export( uint32_t num )
{
boost::filesystem::path dir = std::string( "/sys/class/gpio/gpio" ) + std::to_string( num );
boost::system::error_code ec;
auto path = dir / "value";
if ( boost::filesystem::exists( path, ec ) ) {
path_ = path.string();
return true;
} else {
if ( !boost::filesystem::exists( dir, ec ) ) {
std::ofstream of ( "/sys/class/gpio/export" );
of << num_;
}
if ( boost::filesystem::exists( dir, ec ) ) {
auto direction = dir / "direction";
if ( boost::filesystem::exists( direction, ec ) ) {
std::ofstream of ( direction );
of << "out";
}
if ( boost::filesystem::exists( path, ec ) ) {
path_ = path.string();
return true;
}
}
}
return false;
}
bool
gpio::unexport()
{
boost::system::error_code ec;
boost::filesystem::path dir = std::string( "/sys/class/gpio/gpio" ) + std::to_string( num_ );
if ( boost::filesystem::exists( dir, ec ) ) {
std::ofstream of ( "/sys/class/gpio/unexport" );
of << num_;
}
return !boost::filesystem::exists( dir, ec );
}
bool
gpio::operator << ( bool flag )
{
auto outf = std::ofstream( path_ );
if ( outf ) {
outf << ( flag ? "1" : "0" ) << std::endl;
return true;
} else {
return false;
}
}
int
gpio::read()
{
auto inf = std::ifstream( path_ );
if ( inf ) {
char value;
inf >> value;
if ( value >= '0' )
return value - '0';
return value;
}
return -1;
}
#if 0
int
main( int argc, char **argv )
{
namespace po = boost::program_options;
po::variables_map vm;
po::options_description description( argv[ 0 ] );
{
description.add_options()
( "help,h", "Display this help message" )
( "gpio,d", po::value< uint32_t >()->default_value(960), "gpio number" )
( "replicates,r", po::value< uint32_t >(), "toggle replicates" )
( "set", po::value< bool >(), "gpio set to value[0|1]" )
( "read", "read value" )
;
po::positional_options_description p;
p.add( "args", -1 );
po::store( po::command_line_parser( argc, argv ).options( description ).positional(p).run(), vm );
po::notify(vm);
}
if ( vm.count( "help" ) ) {
std::cerr << description;
return 0;
}
uint32_t num = vm[ "gpio" ].as< uint32_t >();
boost::filesystem::path dir = std::string( "/sys/class/gpio/gpio" ) + std::to_string( num );
boost::system::error_code ec;
if ( !boost::filesystem::exists( dir, ec ) ) {
std::ofstream of ( "/sys/class/gpio/export" );
of << num;
}
boost::filesystem::path direction = dir / "direction";
if ( boost::filesystem::exists( direction, ec ) ) {
std::ofstream of ( direction );
of << "out";
}
boost::filesystem::path value = dir / "value";
gpio_pin gpio( num, value.string().c_str() );
bool readback = vm.count( "read" );
bool flag( false );
if ( vm.count( "set" ) ) {
flag = vm[ "set" ].as< bool >();
gpio << flag;
}
if ( readback ) {
int f = gpio.read();
if ( f >= '0' && f < '1' )
std::cout << static_cast< char >(f) << std::endl;
else
std::cout << value << " = " << f << std::endl;
}
if ( vm.count( "replicates" ) ) {
auto replicates = vm[ "replicates" ].as< uint32_t >();
while ( replicates-- ) {
gpio << flag;
flag = !flag;
}
}
return 0;
}
#endif
|
$("element").mouseover(function(){
$(this).css("background-color", "red");
});
|
from setuptools import setup, find_packages
setup(
name='connectome_atrophy',
version='0.1',
description='A toolbox for mapping brain image analysis to a connectome',
license='Apache License',
maintainer='<NAME>, <NAME>, <NAME>',
maintainer_email='<EMAIL>; <EMAIL>; <EMAIL>',
include_package_data=True,
packages = find_packages(include=('Main', 'Main.*')),
install_requires=[
'nibabel',
'numpy',
'pandas',
'nilearn',
'matplotlib'
],
)
|
import requests
def toggle_telemetry(domain, token, telemetry=True):
headers = {
'Authorization': f'Bearer {token}',
'Content-Type': 'application/json'
}
url = f'https://{domain}/api/v2/tenants/settings'
data = {
'flags': {
'enable_telemetry': telemetry
}
}
response = requests.patch(url, headers=headers, json=data)
if response.status_code != 200:
raise Exception(f'Failed to toggle telemetry. Status code: {response.status_code}, Response: {response.text}')
|
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: edge.proto
package com.ciat.bim.server.edge.gen;
/**
* Protobuf type {@code edge.ConnectRequestMsg}
*/
public final class ConnectRequestMsg extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:edge.ConnectRequestMsg)
ConnectRequestMsgOrBuilder {
private static final long serialVersionUID = 0L;
// Use ConnectRequestMsg.newBuilder() to construct.
private ConnectRequestMsg(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
super(builder);
}
private ConnectRequestMsg() {
edgeRoutingKey_ = "";
edgeSecret_ = "";
}
@Override
@SuppressWarnings({"unused"})
protected Object newInstance(
UnusedPrivateParameter unused) {
return new ConnectRequestMsg();
}
@Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ConnectRequestMsg(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new NullPointerException();
}
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
String s = input.readStringRequireUtf8();
edgeRoutingKey_ = s;
break;
}
case 18: {
String s = input.readStringRequireUtf8();
edgeSecret_ = s;
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return EdgeProtos.internal_static_edge_ConnectRequestMsg_descriptor;
}
@Override
protected FieldAccessorTable
internalGetFieldAccessorTable() {
return EdgeProtos.internal_static_edge_ConnectRequestMsg_fieldAccessorTable
.ensureFieldAccessorsInitialized(
ConnectRequestMsg.class, Builder.class);
}
public static final int EDGEROUTINGKEY_FIELD_NUMBER = 1;
private volatile Object edgeRoutingKey_;
/**
* <code>string edgeRoutingKey = 1;</code>
* @return The edgeRoutingKey.
*/
@Override
public String getEdgeRoutingKey() {
Object ref = edgeRoutingKey_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
edgeRoutingKey_ = s;
return s;
}
}
/**
* <code>string edgeRoutingKey = 1;</code>
* @return The bytes for edgeRoutingKey.
*/
@Override
public com.google.protobuf.ByteString
getEdgeRoutingKeyBytes() {
Object ref = edgeRoutingKey_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(String) ref);
edgeRoutingKey_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int EDGESECRET_FIELD_NUMBER = 2;
private volatile Object edgeSecret_;
/**
* <code>string edgeSecret = 2;</code>
* @return The edgeSecret.
*/
@Override
public String getEdgeSecret() {
Object ref = edgeSecret_;
if (ref instanceof String) {
return (String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
edgeSecret_ = s;
return s;
}
}
/**
* <code>string edgeSecret = 2;</code>
* @return The bytes for edgeSecret.
*/
@Override
public com.google.protobuf.ByteString
getEdgeSecretBytes() {
Object ref = edgeSecret_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(String) ref);
edgeSecret_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (!getEdgeRoutingKeyBytes().isEmpty()) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, edgeRoutingKey_);
}
if (!getEdgeSecretBytes().isEmpty()) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, edgeSecret_);
}
unknownFields.writeTo(output);
}
@Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (!getEdgeRoutingKeyBytes().isEmpty()) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, edgeRoutingKey_);
}
if (!getEdgeSecretBytes().isEmpty()) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, edgeSecret_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@Override
public boolean equals(final Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof ConnectRequestMsg)) {
return super.equals(obj);
}
ConnectRequestMsg other = (ConnectRequestMsg) obj;
if (!getEdgeRoutingKey()
.equals(other.getEdgeRoutingKey())) return false;
if (!getEdgeSecret()
.equals(other.getEdgeSecret())) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + EDGEROUTINGKEY_FIELD_NUMBER;
hash = (53 * hash) + getEdgeRoutingKey().hashCode();
hash = (37 * hash) + EDGESECRET_FIELD_NUMBER;
hash = (53 * hash) + getEdgeSecret().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static ConnectRequestMsg parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static ConnectRequestMsg parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static ConnectRequestMsg parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static ConnectRequestMsg parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static ConnectRequestMsg parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static ConnectRequestMsg parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static ConnectRequestMsg parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static ConnectRequestMsg parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static ConnectRequestMsg parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static ConnectRequestMsg parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static ConnectRequestMsg parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static ConnectRequestMsg parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(ConnectRequestMsg prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@Override
protected Builder newBuilderForType(
BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code edge.ConnectRequestMsg}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
// @@protoc_insertion_point(builder_implements:edge.ConnectRequestMsg)
ConnectRequestMsgOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return EdgeProtos.internal_static_edge_ConnectRequestMsg_descriptor;
}
@Override
protected FieldAccessorTable
internalGetFieldAccessorTable() {
return EdgeProtos.internal_static_edge_ConnectRequestMsg_fieldAccessorTable
.ensureFieldAccessorsInitialized(
ConnectRequestMsg.class, Builder.class);
}
// Construct using ConnectRequestMsg.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@Override
public Builder clear() {
super.clear();
edgeRoutingKey_ = "";
edgeSecret_ = "";
return this;
}
@Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return EdgeProtos.internal_static_edge_ConnectRequestMsg_descriptor;
}
@Override
public ConnectRequestMsg getDefaultInstanceForType() {
return ConnectRequestMsg.getDefaultInstance();
}
@Override
public ConnectRequestMsg build() {
ConnectRequestMsg result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@Override
public ConnectRequestMsg buildPartial() {
ConnectRequestMsg result = new ConnectRequestMsg(this);
result.edgeRoutingKey_ = edgeRoutingKey_;
result.edgeSecret_ = edgeSecret_;
onBuilt();
return result;
}
@Override
public Builder clone() {
return super.clone();
}
@Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
Object value) {
return super.setField(field, value);
}
@Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, Object value) {
return super.setRepeatedField(field, index, value);
}
@Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
Object value) {
return super.addRepeatedField(field, value);
}
@Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof ConnectRequestMsg) {
return mergeFrom((ConnectRequestMsg)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(ConnectRequestMsg other) {
if (other == ConnectRequestMsg.getDefaultInstance()) return this;
if (!other.getEdgeRoutingKey().isEmpty()) {
edgeRoutingKey_ = other.edgeRoutingKey_;
onChanged();
}
if (!other.getEdgeSecret().isEmpty()) {
edgeSecret_ = other.edgeSecret_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@Override
public final boolean isInitialized() {
return true;
}
@Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
ConnectRequestMsg parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (ConnectRequestMsg) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private Object edgeRoutingKey_ = "";
/**
* <code>string edgeRoutingKey = 1;</code>
* @return The edgeRoutingKey.
*/
public String getEdgeRoutingKey() {
Object ref = edgeRoutingKey_;
if (!(ref instanceof String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
edgeRoutingKey_ = s;
return s;
} else {
return (String) ref;
}
}
/**
* <code>string edgeRoutingKey = 1;</code>
* @return The bytes for edgeRoutingKey.
*/
public com.google.protobuf.ByteString
getEdgeRoutingKeyBytes() {
Object ref = edgeRoutingKey_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(String) ref);
edgeRoutingKey_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <code>string edgeRoutingKey = 1;</code>
* @param value The edgeRoutingKey to set.
* @return This builder for chaining.
*/
public Builder setEdgeRoutingKey(
String value) {
if (value == null) {
throw new NullPointerException();
}
edgeRoutingKey_ = value;
onChanged();
return this;
}
/**
* <code>string edgeRoutingKey = 1;</code>
* @return This builder for chaining.
*/
public Builder clearEdgeRoutingKey() {
edgeRoutingKey_ = getDefaultInstance().getEdgeRoutingKey();
onChanged();
return this;
}
/**
* <code>string edgeRoutingKey = 1;</code>
* @param value The bytes for edgeRoutingKey to set.
* @return This builder for chaining.
*/
public Builder setEdgeRoutingKeyBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
edgeRoutingKey_ = value;
onChanged();
return this;
}
private Object edgeSecret_ = "";
/**
* <code>string edgeSecret = 2;</code>
* @return The edgeSecret.
*/
public String getEdgeSecret() {
Object ref = edgeSecret_;
if (!(ref instanceof String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
String s = bs.toStringUtf8();
edgeSecret_ = s;
return s;
} else {
return (String) ref;
}
}
/**
* <code>string edgeSecret = 2;</code>
* @return The bytes for edgeSecret.
*/
public com.google.protobuf.ByteString
getEdgeSecretBytes() {
Object ref = edgeSecret_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(String) ref);
edgeSecret_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <code>string edgeSecret = 2;</code>
* @param value The edgeSecret to set.
* @return This builder for chaining.
*/
public Builder setEdgeSecret(
String value) {
if (value == null) {
throw new NullPointerException();
}
edgeSecret_ = value;
onChanged();
return this;
}
/**
* <code>string edgeSecret = 2;</code>
* @return This builder for chaining.
*/
public Builder clearEdgeSecret() {
edgeSecret_ = getDefaultInstance().getEdgeSecret();
onChanged();
return this;
}
/**
* <code>string edgeSecret = 2;</code>
* @param value The bytes for edgeSecret to set.
* @return This builder for chaining.
*/
public Builder setEdgeSecretBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
edgeSecret_ = value;
onChanged();
return this;
}
@Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:edge.ConnectRequestMsg)
}
// @@protoc_insertion_point(class_scope:edge.ConnectRequestMsg)
private static final ConnectRequestMsg DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new ConnectRequestMsg();
}
public static ConnectRequestMsg getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser<ConnectRequestMsg>
PARSER = new com.google.protobuf.AbstractParser<ConnectRequestMsg>() {
@Override
public ConnectRequestMsg parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ConnectRequestMsg(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser<ConnectRequestMsg> parser() {
return PARSER;
}
@Override
public com.google.protobuf.Parser<ConnectRequestMsg> getParserForType() {
return PARSER;
}
@Override
public ConnectRequestMsg getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
|
/*
TKGTOOLS stands for Tgpp Key Generator Tools(MILENAGE defined in 3GPP TS 35.205).
It implemente f1 - f5, f1*, f5* functions defined in 3GPP TS 35.205/35.206.
These functions are also known as MILENAGE Algorithm Set.
Test data could be find in TS 35.208.
Specification is here: https://www.3gpp.org/ftp/Specs/archive/35_series
*/
package tkgtools
import "bytes"
import "encoding/binary"
import "crypto/hmac"
import "crypto/sha256"
var S [256]byte = [256]byte{
99,124,119,123,242,107,111,197, 48, 1,103, 43,254,215,171,118,
202,130,201,125,250, 89, 71,240,173,212,162,175,156,164,114,192,
183,253,147, 38, 54, 63,247,204, 52,165,229,241,113,216, 49, 21,
4,199, 35,195, 24,150, 5,154, 7, 18,128,226,235, 39,178,117,
9,131, 44, 26, 27,110, 90,160, 82, 59,214,179, 41,227, 47,132,
83,209, 0,237, 32,252,177, 91,106,203,190, 57, 74, 76, 88,207,
208,239,170,251, 67, 77, 51,133, 69,249, 2,127, 80, 60,159,168,
81,163, 64,143,146,157, 56,245,188,182,218, 33, 16,255,243,210,
205, 12, 19,236, 95,151, 68, 23,196,167,126, 61,100, 93, 25,115,
96,129, 79,220, 34, 42,144,136, 70,238,184, 20,222, 94, 11,219,
224, 50, 58, 10, 73, 6, 36, 92,194,211,172, 98,145,149,228,121,
231,200, 55,109,141,213, 78,169,108, 86,244,234,101,122,174, 8,
186,120, 37, 46, 28,166,180,198,232,221,116, 31, 75,189,139,138,
112, 62,181,102, 72, 3,246, 14, 97, 53, 87,185,134,193, 29,158,
225,248,152, 17,105,217,142,148,155, 30,135,233,206, 85, 40,223,
140,161,137, 13,191,230, 66,104, 65,153, 45, 15,176, 84,187, 22,
}
var Xtime[256]byte = [256]byte{
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,
32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62,
64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94,
96, 98,100,102,104,106,108,110,112,114,116,118,120,122,124,126,
128,130,132,134,136,138,140,142,144,146,148,150,152,154,156,158,
160,162,164,166,168,170,172,174,176,178,180,182,184,186,188,190,
192,194,196,198,200,202,204,206,208,210,212,214,216,218,220,222,
224,226,228,230,232,234,236,238,240,242,244,246,248,250,252,254,
27, 25, 31, 29, 19, 17, 23, 21, 11, 9, 15, 13, 3, 1, 7, 5,
59, 57, 63, 61, 51, 49, 55, 53, 43, 41, 47, 45, 35, 33, 39, 37,
91, 89, 95, 93, 83, 81, 87, 85, 75, 73, 79, 77, 67, 65, 71, 69,
123,121,127,125,115,113,119,117,107,105,111,109, 99, 97,103,101,
155,153,159,157,147,145,151,149,139,137,143,141,131,129,135,133,
187,185,191,189,179,177,183,181,171,169,175,173,163,161,167,165,
219,217,223,221,211,209,215,213,203,201,207,205,195,193,199,197,
251,249,255,253,243,241,247,245,235,233,239,237,227,225,231,229,
}
/*
"class" TKGTOOLS contains r1-r5, c1-c5 value which are used in f1-f5, f1*, f5* functions.
*/
type TKGTOOLS struct{
R1 uint8
R2 uint8
R3 uint8
R4 uint8
R5 uint8
C1 [16]byte
C2 [16]byte
C3 [16]byte
C4 [16]byte
C5 [16]byte
roundKeys [11][4][4]byte
}
/*
Use this function by creating a TKGTOOLS "object".
And with TKGTOOLS "object", you can call F1, F2345, F1star and F5star functions.
Additional, r1-r5, c1-c5 values used in functions is defined in "object".
And default value of r1-r5, c1-c5 is below:
inst.R1 = 64
inst.R2 = 0
inst.R3 = 32
inst.R4 = 64
inst.R5 = 96
inst.C1 = [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
inst.C2 = [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}
inst.C3 = [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}
inst.C4 = [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04}
inst.C5 = [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08}
Modify it if necessary after NewTKGTOOLS function call.
All functions need byte array pointer, not byte array copy, that make functions runs in a quick way.
*/
func NewTKGTOOLS() *TKGTOOLS{
inst := new(TKGTOOLS)
/*
Default r1 - r5, c1 - c5 in 3gpp TS35.206,
r1 = 64; r2 = 0; r3 = 32; r4 = 64; r5 = 96
c1 = 0, c2 = 1, c3 = 2, c4 = 4, c4 = 8
*/
inst.R1 = 64
inst.R2 = 0
inst.R3 = 32
inst.R4 = 64
inst.R5 = 96
inst.C1 = [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
inst.C2 = [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}
inst.C3 = [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}
inst.C4 = [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04}
inst.C5 = [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08}
return inst
}
func _ringShiftLeft128Bits(data *[16]byte, nbit uint8){
var i uint8
tmp := *data
leftShiftBytes := nbit / 8
leftShiftBits := nbit % 8
for i=0; i<16; i++{
(*data)[(i+(16-leftShiftBytes))%16] = tmp[i]
}
mostLeftByte := (*data)[0]
for i=0; i<15;i++{
(*data)[i] = (*data)[i]<<leftShiftBits
t := (*data)[i+1]
t >>= 8 - leftShiftBits
(*data)[i] |= t
}
(*data)[15] = ((*data)[15])<<leftShiftBits
mostLeftByte >>= 8 - leftShiftBits
(*data)[15] |= mostLeftByte
return
}
func (tp *TKGTOOLS)_rijndaelKeySchedule(key *[16]byte){
for i:=0; i<16; i++{
tp.roundKeys[0][i&0x03][i>>2] = key[i]
}
roundConst := byte(1)
for i:=1; i<11; i++{
tp.roundKeys[i][0][0] = S[tp.roundKeys[i-1][1][3]] ^ tp.roundKeys[i-1][0][0] ^ roundConst
tp.roundKeys[i][1][0] = S[tp.roundKeys[i-1][2][3]] ^ tp.roundKeys[i-1][1][0]
tp.roundKeys[i][2][0] = S[tp.roundKeys[i-1][3][3]] ^ tp.roundKeys[i-1][2][0]
tp.roundKeys[i][3][0] = S[tp.roundKeys[i-1][0][3]] ^ tp.roundKeys[i-1][3][0]
for j:=0; j<4; j++{
tp.roundKeys[i][j][1] = tp.roundKeys[i-1][j][1] ^ tp.roundKeys[i][j][0]
tp.roundKeys[i][j][2] = tp.roundKeys[i-1][j][2] ^ tp.roundKeys[i][j][1]
tp.roundKeys[i][j][3] = tp.roundKeys[i-1][j][3] ^ tp.roundKeys[i][j][2]
}
roundConst = Xtime[roundConst]
}
}
func _keyAdd(state *[4][4]byte, roundKeys *[11][4][4]byte, round int){
for i:=0; i<4; i++{
for j:=0; j<4; j++{
state[i][j] ^= roundKeys[round][i][j]
}
}
}
func _byteSub(state *[4][4]byte) int{
for i:=0; i<4; i++{
for j:=0; j<4; j++{
state[i][j] = S[state[i][j]]
}
}
return 0
}
func _shiftRow(state *[4][4]byte){
var temp byte
/* left rotate row 1 by 1 */
temp = state[1][0]
state[1][0] = state[1][1]
state[1][1] = state[1][2]
state[1][2] = state[1][3]
state[1][3] = temp
/* left rotate row 2 by 2 */
temp = state[2][0]
state[2][0] = state[2][2]
state[2][2] = temp
temp = state[2][1]
state[2][1] = state[2][3]
state[2][3] = temp
/* left rotate row 3 by 3 */
temp = state[3][0]
state[3][0] = state[3][3]
state[3][3] = state[3][2]
state[3][2] = state[3][1]
state[3][1] = temp
return
}
/* MixColumn transformation*/
func _mixColumn(state *[4][4]byte){
var temp, tmp, tmp0 byte
/* do one column at a time */
for i:=0; i<4;i++{
temp = state[0][i] ^ state[1][i] ^ state[2][i] ^ state[3][i]
tmp0 = state[0][i]
/* Xtime array does multiply by x in GF2^8 */
tmp = Xtime[state[0][i] ^ state[1][i]]
state[0][i] ^= temp ^ tmp
tmp = Xtime[state[1][i] ^ state[2][i]]
state[1][i] ^= temp ^ tmp
tmp = Xtime[state[2][i] ^ state[3][i]]
state[2][i] ^= temp ^ tmp
tmp = Xtime[state[3][i] ^ tmp0]
state[3][i] ^= temp ^ tmp
}
return
}
func (tp *TKGTOOLS)_rijndaelEncrypt(input *[16]byte, output *[16]byte){
var state [4][4]byte
/* initialise state array from input byte string */
for i:=0; i<16; i++{
state[i & 0x3][i>>2] = input[i]
}
/* add first round_key */
_keyAdd(&state, &tp.roundKeys, 0);
/* do lots of full rounds */
r := 1
for r=1; r<=9; r++{
_byteSub(&state)
_shiftRow(&state)
_mixColumn(&state)
_keyAdd(&state, &tp.roundKeys, r)
}
/* final round */
_byteSub(&state);
_shiftRow(&state);
_keyAdd(&state, &tp.roundKeys, r)
/* produce output byte string from state array */
for i:=0; i<16; i++{
output[i] = state[i & 0x3][i>>2]
}
return
}
func (tp *TKGTOOLS)_computeOPc(op *[16]byte, op_c *[16]byte){
tp._rijndaelEncrypt(op, op_c)
for i:=0; i<16; i++{
op_c[i] ^= op[i]
}
return
}
/*
Function F1 is used for mac_a calculation.
Input is key, rand, sqn, amf, op / opc
Call it like:
tkg = tkgtools.NewTKGTOOLS()
tkg.F1(&key, &rand, &sqn , &amf, &mac_a, &op, nil)
// tkg.F1(&key, &rand, &sqn , &amf, &mac_a, nil, &opc)
Transfer op if you have, or opc if you have, keep another one as nil.
*/
func (tp *TKGTOOLS)F1(key *[16]byte, rand *[16]byte, sqn *[6]byte, amf *[2]byte, mac_a *[8]byte, op *[16]byte, opc *[16]byte){
var op_c [16]byte
var temp [16]byte
var in1 [16]byte
var out1 [16]byte
var rijndaelInput [16]byte
var i uint8
tp._rijndaelKeySchedule(key)
if opc==nil{
tp._computeOPc(op, &op_c)
}else{
op_c = *opc
}
for i=0; i<16; i++{
rijndaelInput[i] = rand[i] ^ op_c[i]
}
tp._rijndaelEncrypt(&rijndaelInput, &temp)
for i=0; i<6; i++{
in1[i] = sqn[i]
in1[i+8] = sqn[i]
}
for i=0; i<2; i++{
in1[i+6] = amf[i]
in1[i+14] = amf[i]
}
/* XOR op_c and in1, rotate by r1=64, and XOR *
* on the constant c1 (which is all zeroes) */
for i=0; i<16; i++{
rijndaelInput[i] = in1[i] ^ op_c[i]
}
_ringShiftLeft128Bits(&rijndaelInput, tp.R1)
for i=0; i<16; i++{
rijndaelInput[i] ^= tp.C1[i]
}
/* XOR on the value temp computed before */
for i=0; i<16; i++{
rijndaelInput[i] ^= temp[i]
}
tp._rijndaelEncrypt(&rijndaelInput, &out1)
for i=0; i<16; i++{
out1[i] ^= op_c[i]
}
for i=0; i<8; i++{
mac_a[i] = out1[i]
}
return
}
/*
Function F2345 is used for res, ck, ik and ak calculation.
Input is key, rand, op / opc
Call it like:
tkg = tkgtools.NewTKGTOOLS()
tkg.F2345(&key, &rand, &res, &ck, &ik, &ak, &op, nil)
// tkg.F2345(&key, &rand, &res, &ck, &ik, &ak, nil, &opc)
Transfer op if you have, or opc if you have, keep another one as nil.
*/
func (tp *TKGTOOLS)F2345 (key *[16]byte, rand *[16]byte, res *[8]byte, ck *[16]byte, ik *[16]byte, ak *[6]byte, op *[16]byte, opc *[16]byte){
var op_c [16]byte
var temp [16]byte
var out [16]byte
var rijndaelInput [16]byte
var i uint8
tp._rijndaelKeySchedule(key)
if opc==nil{
tp._computeOPc(op, &op_c)
}else{
op_c = *opc
}
for i=0; i<16; i++{
rijndaelInput[i] = rand[i] ^ op_c[i]
}
tp._rijndaelEncrypt(&rijndaelInput, &temp);
/* To obtain output block OUT2: XOR OPc and TEMP, *
* rotate by r2=0, and XOR on the constant c2 (which *
* is all zeroes except that the last bit is 1). */
for i=0; i<16; i++{
rijndaelInput[i] = temp[i] ^ op_c[i]
}
_ringShiftLeft128Bits(&rijndaelInput, tp.R2)
for i=0; i<16; i++{
rijndaelInput[i] ^= tp.C2[i]
}
tp._rijndaelEncrypt(&rijndaelInput, &out)
for i=0; i<16; i++{
out[i] ^= op_c[i]
}
for i=0; i<8; i++{
res[i] = out[i+8]
}
for i=0; i<6; i++{
ak[i] = out[i]
}
/* To obtain output block OUT3: XOR OPc and TEMP, *
* rotate by r3=32, and XOR on the constant c3 (which *
* is all zeroes except that the next to last bit is 1). */
for i=0; i<16; i++{
rijndaelInput[i] = temp[i] ^ op_c[i]
}
_ringShiftLeft128Bits(&rijndaelInput, tp.R3)
for i=0; i<16; i++{
rijndaelInput[i] ^= tp.C3[i]
}
tp._rijndaelEncrypt(&rijndaelInput, &out)
for i=0; i<16; i++{
out[i] ^= op_c[i]
}
for i=0; i<16; i++{
ck[i] = out[i]
}
/* To obtain output block OUT4: XOR OPc and TEMP, *
* rotate by r4=64, and XOR on the constant c4 (which *
* is all zeroes except that the 2nd from last bit is 1). */
for i=0; i<16; i++{
rijndaelInput[i] = temp[i] ^ op_c[i]
}
_ringShiftLeft128Bits(&rijndaelInput, tp.R4)
for i=0; i<16; i++{
rijndaelInput[i] ^= tp.C4[i]
}
tp._rijndaelEncrypt(&rijndaelInput, &out)
for i=0; i<16; i++{
out[i] ^= op_c[i]
}
for i=0; i<16; i++{
ik[i] = out[i]
}
return
}
/*
Function F1 is used for mac_s calculation.
Input is key, rand, sqn, amf, op / opc
Call it like:
tkg = tkgtools.NewTKGTOOLS()
tkg.F1star(&key, &rand, &sqn , &amf, &mac_s, &op, nil)
// tkg.F1star(&key, &rand, &sqn , &amf, &mac_s, nil, &opc)
Transfer op if you have, or opc if you have, keep another one as nil.
*/
func (tp *TKGTOOLS)F1star(key *[16]byte, rand *[16]byte, sqn *[6]byte, amf *[2]byte, mac_s *[8]byte, op *[16]byte, opc *[16]byte){
var op_c [16]byte
var temp[16]byte
var in1[16]byte
var out1[16]byte
var rijndaelInput[16]byte
var i uint8
tp._rijndaelKeySchedule(key);
if opc==nil{
tp._computeOPc(op, &op_c)
}else{
op_c = *opc
}
for i=0; i<16; i++{
rijndaelInput[i] = rand[i] ^ op_c[i]
}
tp._rijndaelEncrypt(&rijndaelInput, &temp)
for i=0; i<6; i++{
in1[i] = sqn[i]
in1[i+8] = sqn[i]
}
for i=0; i<2; i++{
in1[i+6] = amf[i]
in1[i+14] = amf[i]
}
/* XOR op_c and in1, rotate by r1=64, and XOR *
* on the constant c1 (which is all zeroes) */
for i=0; i<16; i++{
rijndaelInput[i] = in1[i] ^ op_c[i]
}
_ringShiftLeft128Bits(&rijndaelInput, tp.R1)
for i=0; i<16; i++{
rijndaelInput[i] ^= tp.C1[i]
}
/* XOR on the value temp computed before */
for i=0; i<16; i++{
rijndaelInput[i] ^= temp[i]
}
tp._rijndaelEncrypt(&rijndaelInput, &out1)
for i=0; i<16; i++{
out1[i] ^= op_c[i]
}
for i=0; i<8; i++{
mac_s[i] = out1[i+8]
}
return
}
/*
Function F5star is used for ak calculation.
Input is key, rand, op / opc
Call it like:
tkg = tkgtools.NewTKGTOOLS()
tkg.F5star(&key, &rand, &ak, &op, nil)
// tkg.F5star(&key, &rand, &ak, nil, &opc)
Transfer op if you have, or opc if you have, keep another one as nil.
*/
func (tp *TKGTOOLS)F5star(key *[16]byte, rand *[16]byte, ak *[6]byte, op *[16]byte, opc *[16]byte){
var op_c [16]byte
var temp [16]byte
var out [16]byte
var rijndaelInput [16]byte
var i uint8
tp._rijndaelKeySchedule(key)
if opc==nil{
tp._computeOPc(op, &op_c)
}else{
op_c = *opc
}
for i=0; i<16; i++{
rijndaelInput[i] = rand[i] ^ op_c[i]
}
tp._rijndaelEncrypt(&rijndaelInput, &temp)
/* To obtain output block OUT5: XOR OPc and TEMP, *
* rotate by r5=96, and XOR on the constant c5 (which *
* is all zeroes except that the 3rd from last bit is 1). */
for i=0; i<16; i++{
rijndaelInput[i] = temp[i] ^ op_c[i]
}
_ringShiftLeft128Bits(&rijndaelInput, tp.R5)
for i=0; i<16; i++{
rijndaelInput[i] ^= tp.C5[i]
}
tp._rijndaelEncrypt(&rijndaelInput, &out)
for i=0; i<16; i++{
out[i] ^= op_c[i]
}
for i=0; i<6; i++{
ak[i] = out[i]
}
return
}
/*
ResStar is for 5G AKA.
Check 3GPP TS 33.501 A.4 for detail.
*/
func _intToBytes(n int) []byte {
data := int64(n)
byteBuf := bytes.NewBuffer([]byte{})
binary.Write(byteBuf, binary.BigEndian, data)
return byteBuf.Bytes()
}
func _bytesCombine(pBytes ...[]byte)[]byte{
return bytes.Join(pBytes, []byte(""))
}
func (tp *TKGTOOLS)ResStar(serviceNwName string, rand *[16]byte, res *[8]byte, ck *[16]byte, ik *[16]byte)[]byte{
secret := _bytesCombine((*ck)[:], (*ik)[:])
snnBytes := []byte(serviceNwName)
lsnn := len(snnBytes)
lsnnBytes := _intToBytes(lsnn)[6:]
message := _bytesCombine([]byte{0x6B}, snnBytes, lsnnBytes, (*rand)[:], []byte{0x00, 0x10}, (*res)[:], []byte{0x00, 0x08})
hash := hmac.New(sha256.New, secret)
hash.Write(message)
sum := hash.Sum(nil)
return sum[len(sum)-16:]
}
|
exports.up = function(knex, Promise) {
return knex.schema.createTable('auth', users => {
users.increments('userId');
users.string('username', 128)
.notNullable()
.unique();
users.string('password', 128).notNullable();
})
.createTable('users', users => {
users.increments('infoKey');
users.integer('userId')
.unsigned()
.notNullable()
.references('userId')
.inTable('auth')
.onDelete('CASCADE')
.onUpdate('CASCADE');
users.string('firstName', 128).notNullable();
users.string('lastName', 128).notNullable();
users.string('email', 128).notNullable();
users.string('phoneNumber', 12).notNullable();
})
.createTable('tables', table => {
table.increments('tableId');
table.string('restaurant', 128).notNullable();
table.decimal('amountDue').notNullable();
})
.createTable('tableUsersPaid', table => {
table.integer('tableId')
.unsigned()
.notNullable()
.references('tableId')
.inTable('tables')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table.integer('userId')
.unsigned()
.notNullable()
.references('userId')
.inTable('auth')
.onDelete('CASCADE')
.onUpdate('CASCADE');
table.boolean('paid').notNullable().defaultTo(false);
});
};
exports.down = function(knex, Promise) {
return knex.schema
.dropTableIfExists('tableUsersPaid')
.dropTableIfExists('tables')
.dropTableIfExists('users')
.dropTableIfExists('auth');
};
|
export interface Product {
id?: number;
name: string;
franchise: string;
price: number;
description: string;
height: number;
width: number;
weight: number;
distributor: string;
reference: string;
stock: number;
actual_stock?: number;
image?: string;
imagefull?: string;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.