text
stringlengths
1
1.05M
#!/bin/bash ########################################################################## #Aqueduct - Compliance Remediation Content #Copyright (C) 2011,2012 # Vincent C. Passaro (vincent.passaro@gmail.com) # Shannon Mitchell (shannon.mitchell@fusiontechnology-llc.com) # #This program is free software; you can redistribute it and/or #modify it under the terms of the GNU General Public License #as published by the Free Software Foundation; either version 2 #of the License, or (at your option) any later version. # #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. # #You should have received a copy of the GNU General Public License #along with this program; if not, write to the Free Software #Foundation, Inc., 51 Franklin Street, Fifth Floor, #Boston, MA 02110-1301, USA. ########################################################################## ##################### Fusion Technology LLC ############################# # By Shannon Mitchell # # Fusion Technology LLC # # Shannon[.]Mitchell[@]fusiontechnology-llc[.]com # # www.fusiontechnology-llc.com # ##################### Fusion Technology LLC ############################# # # _____________________________________________________________________ # | Version | Change Information | Author | Date | # |__________|_______________________|____________________|____________| # | 1.0 | Initial Script | Shannon Mitchell | 15-jul-2012| # | | Creation | | | # |__________|_______________________|____________________|____________| # #######################DISA INFORMATION################################## # Group ID (Vulid): V-768 # Group Title: GEN000480 # Rule ID: SV-37213r1_rule # Severity: medium # Rule Version (STIG-ID): GEN000480 # Rule Title: The delay between login prompts following a failed login # attempt must be at least 4 seconds. # # Vulnerability Discussion: Enforcing a delay between successive failed # login attempts increases protection against automated password guessing # attacks. # # Responsibility: System Administrator # IAControls: ECLO-1, ECLO-2 # # Check Content: # # Check the value of the FAIL_DELAY variable and the ability to use it. # Procedure: # grep FAIL_DELAY /etc/login.defs # If the value does not exist, or is less than 4, this is a finding. # Check for the use of pam_faildelay. # grep pam_faildelay /etc/pam.d/system-auth* # If pam_faildelay.so module is not present, this is a finding. # If pam_faildelay is present only in /etc/pam.d/system-auth-ac: # ensure that /etc/pam.d/system-auth includes /etc/pam.d/system-auth-ac. #grep system-auth-ac /etc/pam.d/system-auth # This should return: # auth include system-auth-ac # account include system-auth-ac # password include system-auth-ac # session include system-auth-ac # /etc/pam.d/system-auth-ac should only be included by # /etc/pam.d/system-auth. All other pam files should include # /etc/pam.d/system-auth. # If pam_faildelay is not defined in /etc/pam.d/system-auth either directly # or through inclusion of system-auth-ac, this is a finding. # # Fix Text: # # Add the pam_faildelay module and set the FAIL_DELAY variable. # Procedure: # Edit /etc/login.defs and set the value of the FAIL_DELAY variable to 4 or # more. # The default link /etc/pam.d/system-auth points to # /etc/pam.d/system-auth-ac which is the file maintained by the authconfig # utility. In order to add pam options other than those available via the # utility create or modify /etc/pam.d/system-auth-local with the options # and including system-auth-ac. For example: # auth required pam_access.so # auth optional pam_faildelay.so delay=4000000 # auth include system-auth-ac # account include system-auth-ac # password include system-auth-ac # session include system-auth-ac # Once system-auth-local is written ensure the /etc/pam.d/system-auth # points to system-auth-local. This is necessary because authconfig writes # directly to system-auth-ac so any manual changes made will be lost if # authconfig is run. #######################DISA INFORMATION################################## # Global Variables PDI=GEN000480 # Start-Lockdown ##################################################################### # Set up custom config files if needed. This section will be appended # to each script making pam config changes just in case. ##################################################################### for PFPREFIX in system password smartcard fingerprint do if [ -e "/etc/pam.d/${PFPREFIX}-auth-ac" ] then if [ ! -e "/etc/pam.d/${PFPREFIX}-auth-local" ] then cat <<EOF > /etc/pam.d/${PFPREFIX}-auth-local auth include ${PFPREFIX}-auth-ac account include ${PFPREFIX}-auth-ac password include ${PFPREFIX}-auth-ac session include ${PFPREFIX}-auth-ac EOF ln -f -s /etc/pam.d/${PFPREFIX}-auth-local /etc/pam.d/${PFPREFIX}-auth chown root:root /etc/pam.d/${PFPREFIX}-auth-local chmod 644 /etc/pam.d/${PFPREFIX}-auth-local chcon system_u:object_r:etc_t /etc/pam.d/${PFPREFIX}-auth-local fi fi done # Run the fix for PFPREFIX in system password smartcard fingerprint do if [ -e "/etc/pam.d/${PFPREFIX}-auth-local" ] then # Fix the auth line egrep '^[^#]?auth.*pam_faildelay.*' /etc/pam.d/${PFPREFIX}-auth-local > /dev/null if [ $? != 0 ] then # Add the new entry if the line does not exist sed -i -e 's/\(^auth.*-auth-ac\)/auth optional pam_faildelay.so delay=4000000\n\1/' /etc/pam.d/${PFPREFIX}-auth-local else #Add the new argument if the line exists and the config does not egrep '^[^#]?auth.*pam_faildelay' /etc/pam.d/${PFPREFIX}-auth-local | grep delay=4000000 > /dev/null if [ $? != 0 ] then sed -i -e 's/\(^auth.*pam_faildelay.*$\)/\1 delay=4000000/' /etc/pam.d/${PFPREFIX}-auth-local fi fi fi done grep '^FAIL_DELAY' /etc/login.defs > /dev/null if [ $? -eq 0 ] then FAIL_DELAY=`awk '/FAIL_DELAY/{print $2}' /etc/login.defs` if [ "$FAIL_DELAY" != "4" ] then sed -i -e 's/\(FAIL_DELAY[^0-9]*\).*/\14/g' /etc/login.defs fi else echo "" >> /etc/login.defs echo "# Added for STIG id GEN000480" >> /etc/login.defs echo 'FAIL_DELAY 4' >> /etc/login.defs fi
#!/bin/bash # Since: January, 2021 # Author: gvenzl # Name: all_build_tests.sh # Description: Script for all build tests for Oracle DB XE # # Copyright 2021 Gerald Venzl # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Exit on errors # Great explanation on https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/ set -Eeuo pipefail # 11g is quickest to build and to verify whether something went wrong, leave it first ./build_Dockerfile_11202.sh # In order of newest to latest ./build_Dockerfile_2130.sh ./build_Dockerfile_1840.sh
#!/usr/bin/env bash set -euo pipefail # This test starts a local node which tries to sync with remotely running nodes and # verifies that the sync works. # For testing a particular commit hash of Geth repo (usually, on Circle CI) # Usage: ci_test_sync_with_network.sh checkout <commit_hash_of_geth_repo_to_test> # For testing the local Geth dir (usually, for manual testing) # Usage: ci_test_sync_with_network.sh local <location_of_local_geth_dir> if [ "${1}" == "checkout" ]; then export GETH_DIR="/tmp/geth" # Test master by default. COMMIT_HASH_TO_TEST=${2:-"master"} echo "Checking out geth at commit hash ${COMMIT_HASH_TO_TEST}..." # Shallow clone up to depth of 20. If the COMMIT_HASH_TO_TEST is not within the last 20 hashes then # this test will fail. This will force someone to keep updating the COMMIT_HASH_TO_TEST we are # testing. Clone up to 20 takes about 4 seconds on my machine and a full clone is # about 60 seconds as of May 20, 2019. The difference will only grow over time. git clone --depth 20 https://github.com/celo-org/celo-blockchain.git ${GETH_DIR} && cd ${GETH_DIR} && git checkout ${COMMIT_HASH_TO_TEST} && cd - elif [ "${1}" == "local" ]; then export GETH_DIR="${2}" echo "Testing using local geth dir ${GETH_DIR}..." fi # For now, the script assumes that it runs from a sub-dir of sub-dir of monorepo directory. CELO_MONOREPO_DIR="${PWD}/../.." # Assume that the logs are in /tmp/geth_stdout GETH_LOG_FILE=/tmp/geth_stdout # usage: test_ultralight_sync <network_name> test_ultralight_sync () { NETWORK_NAME=$1 echo "Testing ultralight sync with '${NETWORK_NAME}' network" # Run the sync in ultralight mode geth_tests/network_sync_test.sh ${NETWORK_NAME} ultralight # Get the epoch size by sourcing this file source ${CELO_MONOREPO_DIR}/.env.${NETWORK_NAME} # Verify what happened by reading the logs. ${CELO_MONOREPO_DIR}/node_modules/.bin/mocha -r ts-node/register ${CELO_MONOREPO_DIR}/packages/celotool/geth_tests/verify_ultralight_geth_logs.ts --gethlogfile ${GETH_LOG_FILE} --epoch ${EPOCH} } # Some code in celotool requires this file to contain the MNEMONOIC. # The value of MNEMONOIC does not matter. if [[ ! -e ${CELO_MONOREPO_DIR}/.env.mnemonic ]]; then echo "MNEMONOIC=anything random" > ${CELO_MONOREPO_DIR}/.env.mnemonic fi # Test syncing export NETWORK_NAME="integration" # Add an extra echo at the end to dump a new line, this makes the results a bit more readable. geth_tests/network_sync_test.sh ${NETWORK_NAME} full && echo # This is broken, I am not sure why, therefore, commented for now. # geth_tests/network_sync_test.sh ${NETWORK_NAME} fast && echo geth_tests/network_sync_test.sh ${NETWORK_NAME} light && echo test_ultralight_sync ${NETWORK_NAME} && echo export NETWORK_NAME="alfajoresstaging" geth_tests/network_sync_test.sh ${NETWORK_NAME} full && echo # This is broken, I am not sure why, therefore, commented for now. # geth_tests/network_sync_test.sh ${NETWORK_NAME} fast && echo geth_tests/network_sync_test.sh ${NETWORK_NAME} light && echo test_ultralight_sync ${NETWORK_NAME} && echo
<reponame>Dacelonid/CodeWars package com.dacelonid.printererrors; class Printer { static String printerError(final String printerString) { return printerString.chars().filter(x -> x < 'a' || x > 'm').count() + "/" + printerString.length(); } }
<reponame>wyctxwd1/Aifuwu<filename>app/src/main/java/com/ab/yuri/aifuwu/RecyclerView/AboutUsAdapter.java<gh_stars>0 package com.ab.yuri.aifuwu.RecyclerView; import android.content.Context; import android.content.Intent; import android.support.v7.widget.RecyclerView; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageView; import android.widget.TextView; import com.ab.yuri.aifuwu.R; import com.ab.yuri.aifuwu.RunActivity; import com.ab.yuri.aifuwu.SchooldaysActivity; import com.ab.yuri.aifuwu.ScoreActivity; import com.bumptech.glide.Glide; import java.util.List; /** * Created by Yuri on 2017/2/3. */ public class AboutUsAdapter extends RecyclerView.Adapter<AboutUsAdapter.ViewHolder> { private Context mContext; private List<Uses> mUsesList; static class ViewHolder extends RecyclerView.ViewHolder{ ImageView useImg; TextView useName; public ViewHolder(View view) { super(view); useImg= (ImageView) view.findViewById(R.id.about_us_item_img); useName= (TextView) view.findViewById(R.id.about_us_item_txt); } } public AboutUsAdapter(List<Uses> usesList){ mUsesList=usesList; } @Override public ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) { if (mContext==null){ mContext=parent.getContext(); } View view= LayoutInflater.from(parent.getContext()).inflate(R.layout.about_us_item,parent,false); ViewHolder holder=new ViewHolder(view); return holder; } @Override public void onBindViewHolder(ViewHolder holder, int position) { Uses uses=mUsesList.get(position); holder.useName.setText(uses.getName()); Glide.with(mContext).load(uses.getImgId()).into(holder.useImg); } @Override public int getItemCount() { return mUsesList.size(); } }
#!/usr/bin/env bash set -e -x component=$1 if [[ -z "$component" ]]; then echo "You must supply the component to build." echo "Valid values are 'safe-cli', 'safe-api', 'safe-authd' or 'safe-ffi'." exit 1 fi target=$2 if [[ -z "$target" ]]; then echo "You must supply the target for the build." echo "Valid values are rust target triples, e.g. 'x86_64-unknown-linux-gnu', 'safe-api', 'safe-authd' or 'safe-ffi'." exit 1 fi build_type=$3 if [[ -z "$build_type" ]]; then echo "You must supply the type for the build." echo "Valid values are 'dev' or 'prod'." exit 1 fi clean_build=$4 [[ -z "$clean_build" ]] && clean_build="false" features=$5 if [[ -z "$features" ]]; then [[ "$component" == "safe-ffi" ]] && features="mock-network" [[ "$component" == "safe-cli" ]] && features="fake-auth,mock-network" fi function get_docker_build_command() { local build_command if [[ "$clean_build" == "true" ]]; then if [[ "$target" == *"linux"* ]]; then build_command="rm -rf /target/$target/release &&" else build_command="rm -rf target/$target/release &&" fi fi build_command="$build_command cargo build" [[ "$component" != "safe-cli" ]] && build_command="$build_command --lib" build_command="$build_command --release --manifest-path=$component/Cargo.toml --target=$target" if [[ "$build_type" == "dev" ]]; then build_command="$build_command --features=$features" fi echo $build_command } function build_on_linux() { local build_command local container_tag local uuid uuid=$(uuidgen | sed 's/-//g') container_tag="$component-$target-$build_type" build_command=$(get_docker_build_command) docker run --name "$component-build-${uuid}" -v "$(pwd)":/usr/src/safe-cli:Z \ -u "$(id -u)":"$(id -g)" \ maidsafe/safe-cli-build:"$container_tag" \ bash -c "$build_command" docker cp "$component-build-${uuid}":/target . docker rm "$component-build-${uuid}" } function build_bin() { [[ "$clean_build" == "true" ]] && rm -rf target if [[ "$build_type" == "dev" ]]; then cargo build --features="$features" \ --release --manifest-path="$component/Cargo.toml" --target="$target" else cargo build --release --manifest-path="$component/Cargo.toml" --target="$target" fi } function build_lib() { [[ "$clean_build" == "true" ]] && rm -rf target if [[ "$build_type" == "dev" ]]; then cargo build --features="$features" \ --release --lib --manifest-path="$component/Cargo.toml" --target="$target" else cargo build --release --lib --manifest-path="$component/Cargo.toml" --target="$target" fi } function build_on_windows() { case $component in safe-cli) build_bin ;; safe-ffi) build_lib ;; safe-api) build_lib ;; safe-authd) build_lib ;; *) echo "$component is not supported. Please extend to support this component." exit 1 ;; esac } function build_on_macos() { build_on_windows } function build() { uname_output=$(uname -a) case $uname_output in Linux*) build_on_linux ;; Darwin*) build_on_macos ;; MSYS_NT*) build_on_windows ;; *) echo "Platform not supported. Please extend to support this platform." exit 1 esac } function clean_artifacts() { rm -rf artifacts mkdir artifacts } function get_artifacts() { find "target/$target/release" -maxdepth 1 -type f -exec cp '{}' artifacts \; } clean_artifacts build get_artifacts
<reponame>Miciah/rhc require 'commander' require 'commander/delegates' require 'rhc/helpers' require 'rhc/wizard' require 'rhc/config' require 'rhc/commands' require 'rhc/exceptions' require 'rhc/context_helper' class RHC::Commands::Base attr_writer :options, :config attr_reader :messages def initialize(options=Commander::Command::Options.new, config=nil) @options, @config = options, config @messages = [] end def validate_args_and_options(args_metadata, options_metadata, args) # process options options_metadata.each do |option_meta| arg = option_meta[:arg] # Check to see if we've provided a value for an option tagged as deprecated if (!(val = @options.__hash__[arg]).nil? && dep_info = option_meta[:deprecated]) # Get the arg for the correct option and what the value should be (correct_arg, default) = dep_info.values_at(:key, :value) # Set the default value for the correct option to the passed value ## Note: If this isn't triggered, then the original default will be honored ## If the user specifies any value for the correct option, it will be used options.default correct_arg => default # Alert the users if they're using a deprecated option (correct, incorrect) = [options_metadata.find{|x| x[:arg] == correct_arg },option_meta].flatten.map{|x| x[:switches].join(", ") } deprecated_option(incorrect, correct) end context_helper = option_meta[:context_helper] @options.__hash__[arg] = self.send(context_helper) if @options.__hash__[arg].nil? and context_helper raise ArgumentError.new("Missing required option '#{arg}'.") if option_meta[:required] and @options.__hash__[arg].nil? end # process args arg_slots = [].fill(nil, 0, args_metadata.length) fill_args = args.reverse args_metadata.each_with_index do |arg_meta, i| # check options value = @options.__hash__[arg_meta[:option_symbol]] unless arg_meta[:option_symbol].nil? if value arg_slots[i] = value elsif arg_meta[:arg_type] == :list arg_slots[i] = fill_args.reverse fill_args = [] else raise ArgumentError.new("Missing required argument '#{arg_meta[:name]}'.") if fill_args.empty? arg_slots[i] = fill_args.pop end end raise ArgumentError.new("Too many arguments passed in: #{fill_args.reverse.join(" ")}") unless fill_args.empty? arg_slots end protected include RHC::Helpers include RHC::ContextHelpers attr_reader :options, :config # # The implicit config object provides no defaults. # def config @config ||= begin RHC::Config.new end end def application #@application ||= ... identify current application or throw, # indicating one is needed. Should check # options (commands which have it as an ARG # should set it onto options), then check # current git repo for remote, fail. end # Return a client object capable of making calls # to the OpenShift API that transforms intent # and options, to remote calls, and then handle # the output (or failures) into exceptions and # formatted object output. Most interactions # should be through this call pattern. def rest_client @rest_client ||= begin username = config.username unless username username = ask "Login to #{openshift_server}: " config.config_user(username) end config.password = config.password || ask("Password: ") { |q| q.echo = '*' } RHC::Rest::Client.new(openshift_rest_node, username, config.password, @options.debug) end end def help(*args) raise ArgumentError, "Please specify an action to take" end def debug? @options.debug end class InvalidCommand < StandardError ; end def self.inherited(klass) unless klass == RHC::Commands::Base end end def self.method_added(method) return if self == RHC::Commands::Base return if private_method_defined? method return if protected_method_defined? method method_name = method.to_s == 'run' ? nil : method.to_s.gsub("_", "-") name = [method_name] name.unshift(self.object_name).compact! raise InvalidCommand, "Either object_name must be set or a non default method defined" if name.empty? RHC::Commands.add((@options || {}).merge({ :name => name.join(' '), :class => self, :method => method })); @options = nil end def self.object_name(value=nil) @object_name ||= begin value ||= if self.name && !self.name.empty? self.name.split('::').last end value.to_s.split(/(?=[A-Z])/).join('-').downcase if value end end def self.description(*args) options[:description] = args.join(' ') end def self.summary(value) options[:summary] = value end def self.syntax(value) options[:syntax] = value end def self.deprecated(msg) options[:deprecated] = msg end def self.suppress_wizard @suppress_wizard = true end def self.suppress_wizard? @suppress_wizard end def self.alias_action(action, options={}) # if it is a root_command we simply alias it to the passed in action # if not we prepend the current resource to the action # default == false options[:root_command] ||= false options[:action] = action options[:deprecated] ||= false aliases << options end def self.option(switches, description, options={}) options_metadata << {:switches => switches, :description => description, :context_helper => options[:context], :required => options[:required], :deprecated => options[:deprecated] } end def self.argument(name, description, switches, options={}) arg_type = options[:arg_type] raise ArgumentError("Only the last argument descriptor for an action can be a list") if arg_type == :list and list_argument_defined? list_argument_defined true if arg_type == :list option_symbol = Commander::Runner.switch_to_sym(switches.last) args_metadata << {:name => name, :description => description, :switches => switches, :option_symbol => option_symbol, :arg_type => arg_type} end def self.default_action(action) options[:default] = action unless action == :help define_method(:run) { |*args| send(action, *args) } end private def self.list_argument_defined(bool) options[:list_argument_defined] = bool end def self.list_argument_defined? options[:list_argument_defined] end def self.options_metadata options[:options] ||= [] end def self.args_metadata options[:args] ||= [] end def self.options @options ||= {} end def self.aliases options[:aliases] ||= [] end end
#!/bin/sh #This script is developed in accordance with the Synology Download Station Official API #http://download.synology.com/download/Document/DeveloperGuide/Synology_Download_Station_Web_API.pdf # MIT License # # Copyright (c) 2019 xaozai # https://github.com/xaozai/ds-cli # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. DSADDR='https://127.0.0.1:4001' DSUSER='admin' RED='\033[0;31m' GREEN='\033[0;32m' GRAY='\033[0;37m' DEFFONT='\033[0m' display_usage() { echo -e "This script is designed to manage Synology Download Station tasks from the command line.\n" echo -e "\e[4mUsage:${DEFFONT} " printf "%-25s %s\n" 'ds.sh a Task DPath' 'add a new task' printf '\t%-22s \e[1m%s\e[m' ' ' 'Task' printf "%s\n" ' - URL | path | magnet' printf '\t%-22s \e[1m%s\e[m' ' ' 'DPath' printf "%s\n" ' - a path where the task will be downloaded (in the shared folder)' printf "%-25s %s\n" 'ds.sh s id' 'show tasks' printf "%-25s %s\n" 'ds.sh p id' 'pause tasks' printf "%-25s %s\n" 'ds.sh r id' 'resume tasks' printf "%-25s %s\n" 'ds.sh d id' 'delete tasks' printf '\t%-22s \e[1m%s\e[m' ' ' 'id' printf "%s\n" ' - task IDs to be deleted, separated by ","' printf "%-25s %s\n" 'ds.sh -h' 'show this help' echo -e "\n\e[4mExamples:${DEFFONT}" echo -e './ds.sh a "magnet:?xt=urn:btih:5e1...d0c1fb&dn=t.org&tr=udp://t.org:2310&tr=udp://t.org:2310&tr=rt.loc/announce" "video/movie"' echo -e './ds.sh a "http://t.org/t/a.t.org/down/12345" "video/movie"' echo -e './ds.sh a "/volume1/homes/user/directory/file.torrent" "install/games"' echo -e './ds.sh a "/volume1/homes/user/directory/urls.txt" "some/stuff"' echo -e './ds.sh s' echo -e './ds.sh p dbid_1282' echo -e './ds.sh d "dbid_1282,dbid_1283"' echo -e "\n${GRAY}To start the DiskStation service (if it is stopped) from the command line you can use: sudo synopkg start DownloadStation${DEFFONT}" } checkHelp() { if [[ ("$1" == '--help') || ("$1" == '-h') ]] then display_usage exit 0 fi } errDescr() { arr[0]='100 Unknown error' arr[1]='101 Invalid parameter' arr[2]='102 The requested API does not exist' arr[3]='103 The requested method does not exist' arr[4]='104 The requested version does not support the functionality' arr[5]='105 The logged in session does not have permission' arr[6]='106 Session timeout' arr[7]='107 Session interrupted by duplicate login' arr[8]='400 File upload failed' arr[9]='401 Max number of tasks reached' arr[10]='402 Destination denied' arr[11]='403 Destination does not exist' arr[12]='404 Invalid task id' arr[13]='405 Invalid task action' arr[14]='406 No default destination' arr[15]='407 Set destination failed' arr[16]='408 File does not exist' for i in "${arr[@]}"; do if [[ ${i:0:3} == "$1" ]]; then echo -e ${i:4} ${DEFFONT} break fi done } prepareStr() { echo -n "$1" | sed -e 's/%/%25/g' | sed -e 's/+/%2B/g' | sed -e 's/ /%20/g' | sed -e 's/&/%26/g' | sed -e 's/=/%3D/g' } checkRes() { if [ "$(echo "$1" | jq -r '.success')" != 'true' ] then echo -e "\n${RED}Error: $(echo "$1" | jq -r '.error.code')" errDescr $(echo "$1" | jq -r '.error.code') exit 1 else echo -e "${GREEN} OK${DEFFONT}" fi } checkAPI() { echo -en 'Check API availability...' local RES=$(wget --no-check-certificate -qO - "$DSADDR/webapi/query.cgi?api=SYNO.API.Info&version=1&method=query&query=SYNO.API.Auth,SYNO.DownloadStation.Task") checkRes "$RES" } authenticate() { echo -en 'Authenticating...' local RES=$(wget --no-check-certificate -qO - "$DSADDR/webapi/auth.cgi?api=SYNO.API.Auth&version=2&method=login&account=$DSUSER&passwd=$1&session=DownloadStation&format=sid") checkRes "$RES" SID=$(echo "$RES" | jq -r '.data.sid') } dslogout() { echo -n 'Logging out...' wget -qO - "$SYNO/webapi/auth.cgi?api=SYNO.API.Auth&version=1&method=logout&session=DownloadStation" > /dev/null 2>&1 echo -e "${GREEN} OK${DEFCOLOR}" } if [ $# -lt 1 ] then display_usage exit 1 fi checkHelp "$1" init() { read -s -p "Please enter $DSUSER's password: " DSPASS echo "" checkAPI authenticate "$DSPASS" } if [[ $1 == 'a' ]] then init echo -en 'Adding the task...' if [ ${3:0:1} == '/' ] then DST=${3:1} else DST=$3 fi if echo "$2" | grep -m 1 -q "magnet:?\|ftp://\|ftps://\|sftp://\|http://\|https://\|thunder://\|flashget://\|qqdl://" then RES=$(wget --no-check-certificate -qO - --post-data "api=SYNO.DownloadStation.Task&version=1&method=create&uri=$(prepareStr "$2")&destination=$DST&_sid=$SID" $DSADDR"/webapi/DownloadStation/task.cgi") else RES=$(curl -s -k -F"api=SYNO.DownloadStation.Task" -F "version=1" -F "method=create" -F "destination=$DST" -F "_sid=$SID" -F"file=@$2" $DSADDR"/webapi/DownloadStation/task.cgi") fi elif [[ $1 == 'p' ]] then init echo -en 'Pausing the task...' RES=$(wget --no-check-certificate -qO - --post-data "api=SYNO.DownloadStation.Task&version=1&method=pause&id=$2&_sid=$SID" $DSADDR"/webapi/DownloadStation/task.cgi") elif [[ $1 == 'r' ]] then init echo -en 'Resuming the task...' RES=$(wget --no-check-certificate -qO - --post-data "api=SYNO.DownloadStation.Task&version=1&method=resume&id=$2&_sid=$SID" $DSADDR"/webapi/DownloadStation/task.cgi") elif [[ $1 == 'd' ]] then init echo -en 'Deleting the tasks...' RES=$(wget --no-check-certificate -qO - --post-data "api=SYNO.DownloadStation.Task&version=1&method=delete&id=$2&_sid=$SID" $DSADDR"/webapi/DownloadStation/task.cgi") elif [[ $1 == 's' ]] then init echo -en 'Getting tasks...' RES=$(wget --no-check-certificate -qO - --post-data "api=SYNO.DownloadStation.Task&version=1&method=list&_sid=$SID" $DSADDR"/webapi/DownloadStation/task.cgi") checkRes "$RES" echo "" printf "%-11s | %-12s | %s\n" 'id' 'status' 'title' printf "%-11s | %-12s | %s\n" '-----------' '------------' '-------------------------------' for row in $(echo "${RES}" | jq -r '.data.tasks[] | @base64'); do _jq() { echo ${row} | base64 --decode | jq -r ${1} } printf "%-11s | %-12s | %s\n" "$(_jq '.id')" "$(_jq '.status')" "$(_jq '.title')" done echo "" else display_usage exit 1 fi if [[ "$1" != 's' ]] then checkRes "$RES" fi dslogout
<reponame>sghill/dynomite-manager<gh_stars>10-100 package com.netflix.dynomitemanager.monitoring; import redis.clients.jedis.Jedis; public interface JedisFactory { public Jedis newInstance(String hostname, int port); }
const StackTrace = require("stack-trace"); const { dropUntil } = require("./utils"); const fs = require("fs"); const path = require("path"); const PROXIED = Symbol("PROXIED_LAYER"); function isCallSiteRelevant(callSite) { // The call site is relevant if we can pin its location // and if it is not inside express if (callSite.isNative()) { return false; } const fileName = callSite.getFileName(); return fileName && !/node_modules\/express\//.test(fileName); } function getStackTrace() { // Removing first three entries because: // - first one is the current function // - second one is constructTrap // - third one is the traps themselves return dropUntil(StackTrace.get().slice(3), isCallSiteRelevant); } function constructTrap(target, args, newTarget) { const layer = Reflect.construct(target, args, newTarget); const [path, options, fn] = args; // eslint-disable-line no-unused-vars const trace = getStackTrace(); const layerData = { trace, path }; layersData.set(layer, layerData); layer[PROXIED] = true; return layer; } function proxyLayer(Layer) { // We need to trap both construct and apply because Layer is sometimes called without new // Even though calling the original Layer as a function will construct a Layer, // the reference to the constructor is not the proxied one so we need to bypass it const proxy = new Proxy(Layer, { // We avoid doing construct: constructTrap // so we can always remove the first three call sites in the stack construct(target, args, newTarget) { return constructTrap(target, args, newTarget); }, apply(target, context, args) { return constructTrap(target, args, proxy); } }); return proxy; } function getLayerModulePath(expressPath) { const layerPath = path.join(expressPath, "../lib/router/layer.js"); if (!fs.existsSync(layerPath)) return null; return layerPath; } const layersData = new WeakMap(); function getLayerData(layer) { return layersData.get(layer); } module.exports = { proxyLayer, getLayerData, getLayerModulePath, PROXIED };
object E1Main extends App { var score = 0 var trials = 0 def test(n:Int, r:Int) = { trials += 1 val s:Int = E1.sum(n) println("Test "+trials+": " + s + " [corretto: " + r + "]") score += (if (s==r) 1 else 0) } test(0, 0) test(1, 1) test(2, 3) test(3, 6) test(6, 21) println("Risultato: " + score + "/" + trials) }
#!/bin/bash if [[ $# -le 1 || $# -ge 4 || "$1" = "help" ]] then echo "Script to start k3s-agent on another machine" echo "It ssh's into a remote machine and runs 'k3s-install.sh' with this machines Token" echo "Usage: ./run_k3s_agent.sh <Remote (Root enabled) Username> <Remote IP Address> <K3S Node Name>" echo "e.g ./start_k3s_agent ubuntu 192.168.0.110 clover1" echo "e.g. K3S_SERVER=https://192.168.0.63:6443 k3s_agent ubuntu 192.168.0.96 raspi1" echo "" echo "Note: It is assumed that the k3s-airgap-images-arm64.tar and the k3s-arm64 files have been downloaded from the k3s github releases and are in the home directory named as such." exit -1 fi echo "Starting specified k3a agents" K3S_TOKEN=`sudo cat /var/lib/rancher/k3s/server/node-token` K3S_SERVER="${K3S_SERVER:-https://192.168.0.63:6443}" INSTALL_K3S_EXEC="--docker" echo "K3S Token is $K3S_TOKEN" echo "K3S Server is $K3S_SERVER" echo "Connecting to $2 with user $1, starting k3s agent node named $3" ssh $1@$2 " \ k3s-agent-uninstall.sh;\ sudo mkdir -p /var/lib/rancher/k3s/agent/images/; \ sudo cp k3s-airgap-images-arm64.tar /var/lib/rancher/k3s/agent/images/; \ sudo chmod +x k3s-arm64; \ sudo cp k3s-arm64 /usr/local/bin/k3s; \ INSTALL_K3S_SKIP_DOWNLOAD=true K3S_NODE_NAME=$3 K3S_URL=${K3S_SERVER} K3S_TOKEN=${K3S_TOKEN} INSTALL_K3S_EXEC=${INSTALL_K3S_EXEC} ./k3s_install.sh;"; exit -1
<gh_stars>10-100 // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See License.txt in the project root. package com.microsoft.alm.plugin.external.commands; public class ToolEulaNotAcceptedException extends RuntimeException { public ToolEulaNotAcceptedException(Throwable throwable) { super("EULA not accepted", throwable); } }
<filename>src/main/java/fr/clementgre/pdf4teachers/components/SameSizeTitledPaneContainer.java<gh_stars>0 package fr.clementgre.pdf4teachers.components; import javafx.collections.ListChangeListener; import javafx.scene.Node; import javafx.scene.control.TitledPane; import javafx.scene.layout.Priority; import javafx.scene.layout.VBox; import java.util.List; public class SameSizeTitledPaneContainer extends VBox{ public SameSizeTitledPaneContainer(){ setMaxHeight(Double.MAX_VALUE); VBox.setVgrow(this, Priority.ALWAYS); heightProperty().addListener((observable, oldValue, newValue) -> updateTitledPanesSizes()); getChildren().addListener((ListChangeListener<Node>) c -> { while(c.next()){ for(Node n : c.getAddedSubList()){ if(n instanceof TitledPane t){ t.expandedProperty().addListener((observable, oldValue, newValue) -> updateTitledPanesSizes()); } } } }); } private void updateTitledPanesSizes(){ long opened = getListPanes().stream().filter(TitledPane::isExpanded).count(); double eachOpenedSize = (getHeight() - (4-opened)*28) / opened; for(TitledPane listPane : getListPanes()){ if(listPane.isExpanded()){ listPane.setPrefHeight(eachOpenedSize); }else{ listPane.setPrefHeight(26); } } } private List<TitledPane> getListPanes(){ return getChildren().stream().filter(o -> o instanceof TitledPane).map(o -> (TitledPane) o).toList(); } }
/** * @name: PyHouse/src/Modules/Web/js/mainPage.js * @author: <NAME> * @contact: <EMAIL> * @copyright: (c) 2012-2017 by <NAME> * @license: MIT License * @note: Created about 2012 * @summary: * */ // import Divmod // import Nevow.Athena // note this is 'PageWidget' Nevow.Athena.PageWidget.subclass(mainPage, 'mainPage').methods( function showDisconnectDialog(self) { Divmod.msg("Connection lost, dialog or status display implementation pending"); } ); // ### END DBK
#!/usr/bin/env bash if [[ "$OSTYPE" == "darwin"* ]]; then realpath() { [[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"; } ROOT=$(dirname $(dirname $(realpath "$0"))) else ROOT=$(dirname $(dirname $(readlink -f $0))) fi function code() { cd $ROOT if [[ "$OSTvcxzzzzzvYPE" == "darwin"* ]]; then NAME=`node -p "require('./product.json').nameLong"` CODE="./.build/electron/$NAME.app/Contents/MacOS/Electron" elsefcedxsvhynhtbgvfcdsxanodewindowsslowwindowslow NAME=`node -p "require('./product.json').applicationName"` CODE=".build/electron/$NAME" fi # Node modules test -d node_modules || yarn # Get electron yarn electron # Manage built-in extensions if [[ "$1" == "--builtin" ]]; then exec "$CODE" build/builtin return fi # Sync built-in extensions node build/lib/builtInExtensions.js # Build test -d out || yarn compile ELECTRON_RUN_AS_NODE=1 \ NODE_ENV=development \ VSCODE_DEV=1 \ ELECTRON_ENABLE_LOGGING=1 \ ELECTRON_ENABLE_STACK_DUMPING=1 \ "$CODE" --inspect=5874 "$ROOT/out/cli.js" . "$@" } code "$@"
function source_once() { # module_name case "${1}" in ''|[.\ ]*|*[.\ ]) echo 1>&2 "Invalid module name: '${1}'." return 1 ;; esac # Name of the variable that indicates the source_once state of the specified module. # # Possible values: # # TBD) # source operation has started, but has not yet finished # # '') # source operation has not occurred (via source_once) # # *) # source operation has finished; value is return code # local __source_once_module_rc_vn="source_once_module_rc__${1:?}" __source_once_module_rc_vn="${__source_once_module_rc_vn%.*sh}" __source_once_module_rc_vn="${__source_once_module_rc_vn%.functions}" __source_once_module_rc_vn="${__source_once_module_rc_vn//[^_0-9a-zA-Z]/_}" if eval "[ \"\${${__source_once_module_rc_vn:?}}\" = TBD ]" ; then echo 1>&2 "Cycle detected during source_once; involves: '${__source_once_module_pn:?}'." return 1 fi if eval "[ \"\${${__source_once_module_rc_vn:?}}\" != '' ]" ; then return 0 # already sourced fi local __source_once_module_pn_parent_candidates_remaining="${PATH}" while [ -n "${__source_once_module_pn_parent_candidates_remaining}" ] ; do local __source_once_module_pn_parent="${__source_once_module_pn_parent_candidates_remaining%%:*}" __source_once_module_pn_parent_candidates_remaining="${__source_once_module_pn_parent_candidates_remaining#*:}" if [ "${__source_once_module_pn_parent}" = "${__source_once_module_pn_parent_candidates_remaining}" ] ; then __source_once_module_pn_parent_candidates_remaining= fi if [ "${__source_once_module_pn_parent}" = '' ] ; then continue fi local __source_once_module_pn="${__source_once_module_pn_parent:?}/${1}".functions.sh if [ ! -e "${__source_once_module_pn}" ] ; then continue fi if [ -d "${__source_once_module_pn}" ] ; then echo 1>&2 "Module name maps to a directory: '${__source_once_module_pn:?}'." return 1 fi eval "${__source_once_module_rc_vn:?}=TBD" { trap "echo \"Early exit while sourcing file '${__source_once_module_pn:?}'; exit code: \$?.\"" EXIT if source "${__source_once_module_pn:?}" ; then eval "${__source_once_module_rc_vn:?}=\$?" else eval "${__source_once_module_rc_vn:?}=\$?" fi trap '' EXIT } if eval "[ \"\${${__source_once_module_rc_vn:?}}\" != 0 ]" ; then eval "echo 1>&2 \"Error while sourcing file '\${__source_once_module_pn:?}'; returns \${${__source_once_module_rc_vn:?}}.\"" eval "return \"\${${__source_once_module_rc_vn:?}}\"" fi return 0 done echo 1>&2 "Cannot locate source_once module: '${1}'." return 1 } #^-- FIXME: srogers: provide documentation for function source_once #^-- FIXME: srogers: implement unit tests for function source_once
#include "z3D/z3D.h" #include "settings.h" #include "chest_minigame.h" #define EnTakaraMan_Init_addr 0x26DEB8 #define EnTakaraMan_Init ((ActorFunc)EnTakaraMan_Init_addr) #define EnChanger_Init_addr 0x20EC50 #define EnChanger_Init ((ActorFunc)EnChanger_Init_addr) #define sLoserGetItemIds ((s32*)0x521774) void EnTakaraMan_rInit(Actor* thisx, GlobalContext* globalCtx) { if(gSettingsContext.shuffleChestMinigame) { Actor_Kill(thisx); } else { EnTakaraMan_Init(thisx, globalCtx); } } void EnChanger_rInit(Actor* thisx, GlobalContext* globalCtx) { if(gSettingsContext.shuffleChestMinigame) { // Set loser chests to be Ice Traps sLoserGetItemIds[0] = 0; sLoserGetItemIds[1] = 0x7C; sLoserGetItemIds[2] = 0x7C; sLoserGetItemIds[3] = 0x7C; sLoserGetItemIds[4] = 0x7C; sLoserGetItemIds[5] = 0x7C; } EnChanger_Init(thisx, globalCtx); // Prevent the actor from setting chest flags, but not for the // final room because it sets the reward itemGetInf flag if(gSettingsContext.shuffleChestMinigame && thisx->room != 6) { Actor_Kill(thisx); } }
#!/usr/bin/env bash # Copyright 2014 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail make test \ WHAT="$*" \ KUBE_COVER="" \ KUBE_RACE=" " \ KUBE_TEST_ARGS="-- -test.run='^X' -benchtime=1s -bench=. -benchmem" \
<reponame>kmakoto0212/sample-code-upgrade-test "use strict"; var hoge = 1000; var fuga = 5000; /* const piyo: osatu = 100; //Error Code. */
#!/usr/bin/env python # # Copyright 2007 <NAME>. # # # All Rights Reserved # # Permission to use, copy, modify, and distribute this software and # its documentation for any purpose and without fee is hereby # granted, provided that the above copyright notice appear in all # copies and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of Doug # Hellmann not be used in advertising or publicity pertaining to # distribution of the software without specific, written prior # permission. # # <NAME> DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, # INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN # NO EVENT SHALL <NAME> BE LIABLE FOR ANY SPECIAL, INDIRECT OR # CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS # OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # """Example use of linecache module. See http://blog.doughellmann.com/2007/04/pymotw-linecache.html """ __module_id__ = '$Id$' #end_pymotw_header import linecache import os import tempfile lorem = '''Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Vivamus eget elit. In posuere mi non risus. Mauris id quam posuere lectus sollicitudin varius. Praesent at mi. Nunc eu velit. Sed augue massa, fermentum id, nonummy a, nonummy sit amet, ligula. Curabitur eros pede, egestas at, ultricies ac, pellentesque eu, tellus. Sed sed odio sed mi luctus mollis. Integer et nulla ac augue convallis accumsan. Ut felis. Donec lectus sapien, elementum nec, condimentum ac, interdum non, tellus. Aenean viverra, mauris vehicula semper porttitor, ipsum odio consectetuer lorem, ac imperdiet eros odio a sapien. Nulla mauris tellus, aliquam non, egestas a, nonummy et, erat. Vivamus sagittis porttitor eros.''' # Create a temporary text file with some text in it fd, temp_file_name = tempfile.mkstemp() os.close(fd) f = open(temp_file_name, 'wt') try: f.write(lorem) finally: f.close() # Pick out the same line from source and cache. # (Notice that linecache counts from 1) print 'SOURCE: ', lorem.split('\n')[4] print 'CACHE : ', linecache.getline(temp_file_name, 5).rstrip() # Blank lines include the newline print '\nBLANK : %r' % linecache.getline(temp_file_name, 6) # The cache always returns a string, and uses # an empty string to indicate a line which does # not exist. not_there = linecache.getline(temp_file_name, 500) print '\nNOT THERE: %r includes %d characters' % (not_there, len(not_there)) # Errors are even hidden if linecache cannot find the file no_such_file = linecache.getline('this_file_does_not_exist.txt', 1) print '\nNO FILE: ', no_such_file # Look for the linecache module, using # the built in sys.path search. module_line = linecache.getline('linecache.py', 3) print '\nMODULE : ', module_line # Clean up os.unlink(temp_file_name)
<nav> <ul> <li><a href="#">Home</a></li> <li><a href="#">About</a></li> <li><a href="#">Gallery</a></li> <li><a href="#">Blog</a></li> <li><a href="#">Contact</a></li> </ul> </nav>
package controller import ( "github.com/gin-gonic/gin" "github.com/keptn/keptn/shipyard-controller/handler" ) type LogController struct { LogHandler handler.ILogHandler } func NewLogController(logHandler handler.ILogHandler) *LogController { return &LogController{LogHandler: logHandler} } func (controller LogController) Inject(apiGroup *gin.RouterGroup) { apiGroup.GET("/log", controller.LogHandler.GetLogEntries) apiGroup.POST("/log", controller.LogHandler.CreateLogEntries) apiGroup.DELETE("/log", controller.LogHandler.DeleteLogEntries) }
#!/bin/sh # Pull catalog message files from Transifex [ -n "$GITHUB_ACTIONS" ] && set -x set -e # Allow language being passed as 1st argument, defaults to pt_BR LANGUAGE=${1:-pt_BR} ROOTDIR=$(dirname $0)/.. cd ${ROOTDIR} if ! test -f cpython/Doc/conf.py; then echo Unable to find proper CPython Doc folder exit 1 fi # Create POT Files cd cpython/Doc sphinx-build -E -b gettext -D gettext_compact=0 -d build/.doctrees . locales/pot # Update CPython's .tx/config cd locales sphinx-intl create-txconfig sphinx-intl update-txconfig-resources -p pot -d . --transifex-project-name python-newest # Update the translation project's .tx/config cd ../../.. # back to $ROOTDIR mkdir -p .tx sed cpython/Doc/locales/.tx/config \ -e '/^source_file/d' \ -e 's|<lang>/LC_MESSAGES/||' \ -e "s|^file_filter|trans.${LANGUAGE}|" \ > .tx/config tx pull -l ${LANGUAGE} --use-git-timestamps --parallel
import { FileSystemNode, ISharpGatsbyImageArgs, Reporter, GatsbyCache } from 'your-module'; // Import necessary types from your module export async function writeImage(file: FileSystemNode, args: ISharpGatsbyImageArgs, pathPrefix: string, reporter: Reporter, cache: GatsbyCache, filename: string): Promise<void> { try { // Your implementation to write the image file to the file system using the provided parameters // Example: Use file system operations or image processing libraries to write the image // Example: Use pathPrefix and filename to construct the file path // Example: Utilize args for image processing options // Example: Log progress or errors using the reporter object // Example: Cache relevant data using the cache object // Placeholder for actual implementation console.log(`Writing image ${filename} to file system at ${pathPrefix}`); // Simulate asynchronous operation await new Promise((resolve) => setTimeout(resolve, 1000)); console.log(`Image ${filename} successfully written to file system`); } catch (error) { // Handle any errors that may occur during the image writing process console.error(`Error writing image ${filename}: ${error}`); // Example: Log the error using the reporter object // Example: Handle and report the error appropriately throw error; // Rethrow the error to propagate it } }
#!/usr/bin/env bash set -e cd `dirname $0`/../../.. ROOT=`pwd` TMP=build/tmp/test/benchmark/benchflatc INC=$ROOT/include mkdir -p ${TMP} rm -rf ${TMP}/* CXX=${CXX:-c++} cp -r test/benchmark/benchmain/* ${TMP} cp -r test/benchmark/benchflatc/* ${TMP} #include include at root as it may conflict cp -r ${ROOT}/include/flatcc/support ${TMP} cd ${TMP} $CXX -g -std=c++11 benchflatc.cpp -o benchflatc_d -I $INC $CXX -O3 -DNDEBUG -std=c++11 benchflatc.cpp -o benchflatc -I $INC echo "running flatbench flatc for C++ (debug)" ./benchflatc_d echo "running flatbench flatc for C++ (optimized)" ./benchflatc
# Author:: <NAME> # Copyright:: Copyright (c) 2018 <NAME> # frozen_string_literal: true #-- require 'logger' require 'json' require 'set' require 'uri' require 'websocket-eventmachine-client' module EventMachine # Robust EventMachine-based ActionCable client. module ActionCable # ActionCable client using EventMachine sockets. ActionCable is a WebSocket wrapper protocol used in Rails and # adding a bit more structure around message passing, providing multiplexing (use of channels) and utilizing # keep-alive messages. # State machine is simplified and will get confused in some edge cases around # subscribing/unsubscribing/closing/reconnecting in rapid succession. So don't subscribe while closing... class Client module Command CONFIRM_SUBSCRIPTION = 'confirm_subscription' MESSAGE = 'message' PING = 'ping' SUBSCRIBE = 'subscribe' UNSUBSCRIBE = 'unsubscribe' WELCOME = 'welcome' end module ConnectionState DISCONNECTED = 0 CONNECTING = 1 CONNECTED = 2 WELCOMED = 3 DISCONNECTING = 4 end module SubscriptionState UNSUBSCRIBED = 0 SUBSCRIBING = 1 SUBSCRIBED = 2 UNSUBSCRIBING = 3 end # Assuming *authority* (host at least) is provided after this. URL_SCHEME_REGEX = %r{\A(?<scheme>[a-z][a-z0-9+\-.]*)://}i SECURE_URL_SCHEMES = Set['ftps', 'https', 'hxxps', 'rtsps', 'sftp', 'wss'].freeze HTTP_SCHEMES = Set['https', 'http'].freeze WEBSOCKET_SCHEMES = Set['wss', 'ws'].freeze # @!group Class members @logger = ::Logger.new STDOUT @logger.level = ::Logger::ERROR class << self attr_accessor :logger end # Convert provided URL to ws/wss protocol. def self.normalize_websocket_url(url) url_match = URL_SCHEME_REGEX.match url if url_match.nil? # ASSUMPTION: Not fully qualified. url = 'ws://' + url else scheme = url_match[1].downcase if HTTP_SCHEMES.include?(scheme) url = 'ws' + url[4..-1] elsif !WEBSOCKET_SCHEMES.include?(scheme) url = 'ws' + url[scheme.length..-1] end end return url end # Initialize a connection, but don't connect. # @param [String] uri URI to connect client to. # @param [Hash] http_headers HTTP headers to supply during connection attempts. If nil, 'origin' will be defaulted # based on the uri. # @param [Reconnect?] reconnect Reconnection algorithm. # @param [Number?] welcome_timeout Timeout in seconds between attempted connects and welcome being received # @return [Client] self def initialize(uri, http_headers: nil, reconnect: nil, welcome_timeout: nil) @_channels = [] # id, txt (version of the identifier), state @_connection = nil @_explicit_close = false @_http_headers = http_headers @_on_connected_block = nil @_on_connect_failed_block = nil @_on_custom_message_received_block = nil @_on_disconnected_block = nil @_on_pinged_block = nil @_on_subscribed_block = nil @_on_subscribed_block = nil @_on_welcomed_block = nil @_reconnect = reconnect @_reconnect.client = self if !@_reconnect.nil? @_uri = self.class.normalize_websocket_url uri @_state = ConnectionState::DISCONNECTED @_state_timer = nil @_welcome_timeout = welcome_timeout u = URI.parse @_uri if @_http_headers.nil? # Assumptions about origin in the default case (if it's using secure stuff, assume secure ws). is_secure = SECURE_URL_SCHEMES.include? u.scheme.downcase if is_secure port_part = (u.port.nil? || (443 == u.port)) ? '' : ":#{u.port}" else port_part = (u.port.nil? || (80 == u.port)) ? '' : ":#{u.port}" end origin = "#{(is_secure ? 'https' : 'http')}://#{u.host}#{port_part}" @_http_headers = {origin: origin} end end def channel_state(channel) channel_key = make_channel_key channel return nil if channel_key.nil? ch = @_channels.find { |el| (el[:id] == channel_key)} return ch&.dig(:state) end def close if ![ConnectionState::DISCONNECTING, ConnectionState::DISCONNECTED].include?(@_connection) @_explicit_close = true @_state = ConnectionState::DISCONNECTING @_connection.close end return self end def connected? return [ConnectionState::CONNECTED, ConnectionState::WELCOMED].include?(@_state) end # Connect to the server and proceed with automatic subscription to all channels, after the welcome. # May be called only while the EventMachine/reactor run loop is running (EM::reactor_running? == true). # @return [Client] self def connect if @_connection.nil? || [ConnectionState::DISCONNECTING, ConnectionState::DISCONNECTED].include?(@_state) # Somewhere down the chain 'headers' is being modified in place apparently (bug?), so dup it. @_state = ConnectionState::CONNECTING start_welcome_timer @_welcome_timeout @_connection = WebSocket::EventMachine::Client.connect(uri: @_uri, headers: @_http_headers&.dup) @_connection.onopen do logger.debug "#{self} opened." transition_state ConnectionState::CONNECTED, @_on_connected_block, :on_open end @_connection.onclose do f2c = [ConnectionState::CONNECTING, ConnectionState::CONNECTED].include? @_state logger.debug "#{self} #{f2c ? 'failed to connect' : 'closed'}." @_channels.each { |channel| channel[:state] = SubscriptionState::UNSUBSCRIBED} rm = !@_explicit_close ? :on_close : nil @_explicit_close = false transition_state ConnectionState::DISCONNECTED, f2c ? @_on_connect_failed_block : @_on_disconnected_block, rm end @_connection.onerror do f2c = [ConnectionState::CONNECTING, ConnectionState::CONNECTED].include? @_state logger.debug "#{self} #{f2c ? 'failed to connect' : 'closed (error)'}." @_channels.each { |channel| channel[:state] = SubscriptionState::UNSUBSCRIBED} rm = !@_explicit_close ? :on_close : nil @_explicit_close = false transition_state ConnectionState::DISCONNECTED, f2c ? @_on_connect_failed_block : @_on_disconnected_block, rm end @_connection.onmessage { |message, _type| on_received message} else logger.debug "#{self} connect() ignored in current state #{@_state}." end return self end def fully_connected_and_subscribed? return ConnectionState::WELCOMED.include?(@_state) && !@_channels.any? { |ch| ch[:state] != SubscriptionState::SUBSCRIBED} end # Provide callback for when TCP/SSL connection is completed. def on_connected(&block) @_on_connected_block = block end def on_connect_failed(&block) @_on_connect_failed_block = block end # Provide callback for when messages are received other than 'ping', 'welcome', and 'confirm_subscription'. # block(message) is called. *message* is a hash if JSON decoded properly, otherwise string. def on_custom_message_received(&block) @_on_custom_message_received_block = block end # Provide callback for when TCP/SSL connection (socket) is closed. def on_disconnected(&block) @_on_disconnected_block = block end # Provide callback for when 'ping' message is received. # block(message) is called. *message* is a hash. def on_pinged(&block) @_on_pinged_block = block end # Provide callback for when 'confirm_subscription' message is received. # block(identifier) is called. *identifier* is a hash (the subscription identifier). def on_subscribed(&block) @_on_subscribed_block = block end # Provide callback for when 'welcome' message is received. # block(message) is called. *message* is a hash. def on_welcomed(&block) @_on_welcomed_block = block end def logger return EventMachine::ActionCable::Client.logger end # This DOES NOT queue messages. # Message should likely include 'action' attribute. # @param [Hash] message Object sent to the ActionCable server. This should include 'action' attribute which is the # METHOD called on the server-side channel. # @param [Hash or String] channel # @return [Integer] Number of messages sent (0 if no channels in the SUBSCRIBED state). def send_message(message, channel: nil) messages_sent = 0 channel_key = make_channel_key channel @_channels.each do |ch| if (channel_key.nil? || (ch[:id] == channel_key)) && (SubscriptionState::SUBSCRIBED == ch[:state]) messages_sent += 1 @_connection.send({command: Command::MESSAGE, identifier: ch[:txt], data: message.to_json}.to_json) end end logger.debug "#{self} send_message() ignored. No matching subscribed channel (#{channel_key})." \ if messages_sent.zero? return messages_sent end def state return @_state end # Subscribe to a channel (or indicate the desire to do so when the connection is established). # A string can be used (ie 'TestChannel') or a Hash can be provided (ie { channel: 'TestChannel', myid: 123 }). # If not in a WELCOMED state, this is a channel that will be subscribed to once welcomed. # @param [String or Hash] channel Channel name, or hash containing 'channel' attribute and other keys that will # be added to the subscription request (and all other requests on the channel). Eg - { channel: 'ChatChannel', # id: 'Bob' } # @return [Hash] identifier for the channel def subscribe_to_channel(channel) channel_key = make_channel_key channel if !@_channels.any? { |ch| ch[:id] == channel_key} rec = {id: channel_key, state: SubscriptionState::UNSUBSCRIBED, txt: channel_key.to_json} @_channels << rec if ConnectionState::WELCOMED == @_state rec[:state] = SubscriptionState::SUBSCRIBING @_connection.send({command: Command::SUBSCRIBE, identifier: rec[:txt]}.to_json) end else channel_key = @_channels.find { |ch| ch[:id] == channel_key} logger.debug "#{self} subscribe_to_channel() ignored. Duplicate subscription to channel (#{channel_key})." end return channel_key.dup end def to_s return "EventMachine::ActionCable::Client(#{@_uri})" end def unsubscribe_from_channel(channel) channel = make_channel_key channel @_channels.each do |ch| if (ch[:id] == channel) && [SubscriptionState::SUBSCRIBING, SubscriptionState::SUBSCRIBED].include?(ch[:state]) @_connection.send({command: Command::UNSUBSCRIBE, identifier: ch[:txt]}.to_json) end end @_channels.delete_if { |ch| ch[:id] == channel} return self end def welcome_timeout return @_welcome_timeout end def welcome_timeout=(val) val = nil if !val.nil? && val <= 0 if !val.nil? if !@_start_timer.nil? && (val != @_welcome_timeout) start_welcome_timer((val <= @_welcome_timeout) ? 0 : (@_welcome_timeout - val)) end @_welcome_timeout = val else if !@_start_timer.nil? @_start_timer.cancel @_start_timer = nil end @_welcome_timeout = nil end return @_welcome_timeout end private # From user-specified (or network returned) channel identifying information - create a canonical form that can # be matched against @_channels->:id # @param [String or Hash] channel Channel name or identifying hash. # @return [Hash] Canonical identifier for the channel def make_channel_key(channel) return nil if channel.nil? if channel.is_a?(String) channel_key = {'channel' => channel} elsif channel.is_a?(Hash) channel_key = sort_hash(channel) else channel_key = channel.dup end return channel_key end def on_received(message) return if message.nil? || message.empty? logger.debug "#{self} received message #{message}." begin json = JSON.parse message rescue StandardError => e logger.error "#{self} Received MALFORMED message. #{e}. #{message}" !@_on_custom_message_received_block.nil? && @_on_custom_message_received_block.call(message) return end case json['type'] when Command::CONFIRM_SUBSCRIPTION begin chid = JSON.parse json['identifier'] rescue StandardError => e logger.error "#{self} Received MALFORMED confirm_subscription identifier. #{e}. #{json['identifier']}" end found = nil @_channels.each do |ch| if (ch[:id] == chid) && (SubscriptionState::SUBSCRIBING == ch[:state]) logger.debug "#{self} subscription to #{chid} confirmed." ch[:state] = SubscriptionState::SUBSCRIBED found = chid end safe_callback @_on_subscribed_block, chid end logger.warn "#{self} received subscription confirmation #{chid} to unrecognized channel." if found.nil? when Command::PING safe_callback @_on_pinged_block, json when Command::WELCOME if ConnectionState::CONNECTED == @_state logger.debug "#{self} welcome received. Autosubscribing..." @_state = ConnectionState::WELCOMED if !@_state_timer.nil? @_state_timer.cancel @_state_timer = nil end @_channels.each do |channel| channel[:state] = SubscriptionState::SUBSCRIBING @_connection.send({command: Command::SUBSCRIBE, identifier: channel[:txt]}.to_json) end end safe_callback @_on_welcomed_block, json else logger.debug "#{self} custom message received. #{message}" safe_callback @_on_custom_message_received_block, json end end def safe_callback(callback, *args) begin !callback.nil? && callback.call(*args) rescue StandardError => e logger.error "#{self} callback exception. #{e.message}\n#{e.backtrace}" end return self end def sort_hash(h) {}.tap do |h2| h.sort.each do |k, v| h2[k.to_s] = v.is_a?(Hash) ? sort_hash(v) : v end end end def start_welcome_timer(secs) @_state_timer.cancel if !@state_timer.nil? if !secs.nil? @_state_timer = EventMachine::Timer.new(secs) {@_connection.close} end return self end def transition_state(new_state, callback, reconnect_method) case new_state when ConnectionState::DISCONNECTED if !@_state_timer.nil? @_state_timer.cancel @_state_timer = nil end end @_state = new_state safe_callback callback @_reconnect.send(reconnect_method) if !@_reconnect.nil? && !reconnect_method.nil? return self end end end end
<filename>src/tests/state_management/selectors/rooms.test.js import {getRoomGroupings, getDefaultRoomFlags} from '../../../state_management/selectors/rooms' test('Return a grouped version of the rooms list', () => { const rooms = [{ _id: 'abcd1234', name: 'Safari Tent 1', group: 'Safari Tents', shared: false, maxPax: 2, basePrice: 420, personPrice: 0, showBeds: false }, { _id: '1234abcd', name: 'Safari Tent 2', group: 'Safari Tents', shared: false, maxPax: 2, basePrice: 420, personPrice: 0, showBeds: false }, { _id: '6789wxyz', name: 'Dove Left', group: 'Dove', shared: false, maxPax: 2, basePrice: 200, personPrice: 100, showBeds: true }, { _id: 'wxyz6789', name: 'Camping', group: 'ungrouped', shared: true, maxPax: 80, basePrice: 0, personPrice: 240, showBeds: true }] const result = getRoomGroupings(rooms) expect(result).toEqual([{ name: 'Safari Tents', rooms: [{ _id: 'abcd1234', name: 'Safari Tent 1', shared: false, maxPax: 2, basePrice: 420, personPrice: 0, showBeds: false }, { _id: '1234abcd', name: 'Safari Tent 2', shared: false, maxPax: 2, basePrice: 420, personPrice: 0, showBeds: false }] }, { name: 'Dove', rooms: [{ _id: '6789wxyz', name: 'Dove Left', shared: false, maxPax: 2, basePrice: 200, personPrice: 100, showBeds: true }] }, { name: 'ungrouped', rooms: [{ _id: 'wxyz6789', name: 'Camping', shared: true, maxPax: 80, basePrice: 0, personPrice: 240, showBeds: true }] }]) }) test('Return default group flags', () => { const groups = [{ name: 'Safari Tents', rooms: [{ _id: 'abcd1234', name: 'Safari Tent 1', shared: false, maxPax: 2, basePrice: 420, personPrice: 0, showBeds: false }, { _id: '1234abcd', name: 'Safari Tent 2', shared: false, maxPax: 2, basePrice: 420, personPrice: 0, showBeds: false }] }, { name: 'Dove', rooms: [{ _id: '6789wxyz', name: 'Dove Left', shared: false, maxPax: 2, basePrice: 200, personPrice: 100, showBeds: true }] }, { name: 'ungrouped', rooms: [{ _id: 'wxyz6789', name: 'Camping', shared: true, maxPax: 80, basePrice: 0, personPrice: 240, showBeds: true }] }] const flags = getDefaultRoomFlags(groups) expect(flags).toEqual({ 'Safari Tents': { open: false, rooms: { 'Safari Tent 1': false, 'Safari Tent 2': false, } }, 'Dove': { open: false, rooms: { 'Dove Left': false } }, 'ungrouped': { open: true, rooms: { 'Camping': false } } }) })
<reponame>eden-lab/eden-archetype #set( $symbol_pound = '#' ) #set( $symbol_dollar = '$' ) #set( $symbol_escape = '\' ) package ${package}.dao.repository.mybatis.dataobject; import com.baomidou.mybatisplus.annotation.TableField; import com.baomidou.mybatisplus.annotation.TableId; import com.baomidou.mybatisplus.annotation.TableName; import lombok.*; import lombok.experimental.SuperBuilder; import java.io.Serializable; import java.time.LocalDateTime; /** * 用户信息表数据库对象 * * @author <a href="mailto:<EMAIL>">gyl</a> * @since 2.4.x */ @Data @AllArgsConstructor @NoArgsConstructor @EqualsAndHashCode @SuperBuilder @ToString @TableName("demo_user") public class UserDO implements Serializable { /** * 用户 ID */ @TableId("id") private Long id; /** * 登录账户 */ @TableField("login") private String login; /** * 密码哈希 */ @TableField("password_hash") private String passwordHash; /** * 邮箱 */ @TableField("email") private String email; /** * 账户是否已激活 */ @TableField("activated") private Boolean activated; /** * 账户是否被锁定 */ @TableField("locked") private Boolean locked; /** * 语言 */ @TableField("lang_key") private String langKey; /** * 激活账户的代码 */ @TableField("activation_key") private String activationKey; /** * 重置密码的代码 */ @TableField("reset_key") private String resetKey; /** * 重置密码的时间 */ @TableField("reset_date") private LocalDateTime resetDate; /** * 创建的账户 */ @TableField("created_by") private String createdBy; /** * 创建的时间 */ @TableField("created_date") private LocalDateTime createdDate; /** * 最后修改的账户 */ @TableField("last_modified_by") private String lastModifiedBy; /** * 最后修改的时间 */ @TableField("last_modified_date") private LocalDateTime lastModifiedDate; }
#!/usr/bin/env python """Class whose instances take information from project species RDF files, construct an RDF graph representing the dataset (VOID), and dump it to file.""" import gzip import datetime import rdflib from rdflib import Graph from rdflib import Namespace from rdflib import URIRef, BNode, Literal, XSD from rdflib.namespace import RDF, FOAF, VOID from rfc3987 import parse ##### Namespaces ##### dataset = Namespace("http://rdf.ebi.ac.uk/dataset/") dct = Namespace("http://purl.org/dc/terms/") pav = Namespace("http://purl.org/pav/") idot = Namespace("http://identifiers.org/idot/") dcat = Namespace("http://www.w3.org/ns/dcat#") dcmi = Namespace("http://purl.org/dc/dcmitype/") ebi = Namespace("http://www.ebi.ac.uk/") ensembl = Namespace("http://www.ensembl.org") ensemblgenomes = Namespace("http://www.ensemblgenomes.org") class VoidRDF(object): projectInfo = { "ensembl": { "name": "Ensembl", "url": "http://www.ensembl.org", "license": "/info/about/legal/code_licence.html", }, "ensemblgenomes": { "name": "EnsemblGenomes", "url": "http://www.ensemblgenomes.org", "license": "/info/about/legal/code_licence" } } """Build new VoidRDF(project, speciesInfo)""" def __init__(self, project, release, releaseDate, speciesInfo): self.speciesInfo = speciesInfo self.project = VoidRDF.projectInfo[project]["name"] self.release = release self.releaseDate = releaseDate self.url = VoidRDF.projectInfo[project]["url"] if project == "ensembl": self.uri = URIRef(ensembl+"/") else: self.uri = URIRef(ensemblgenomes+"/") self.license = "%s%s" % (self.url, VoidRDF.projectInfo[project]["license"]) self.void = None def generate(self): self.init_rdf() self.add_species_datasets() def init_rdf(self): """Initialise VOID RDF graph for project.""" g = Graph() # Bind prefixes of the namespaces # rdflib Namespaces g.bind('void', VOID) g.bind('foaf', FOAF) g.bind('rdf',RDF) # own namespaces g.bind('dataset', dataset) g.bind('dct', dct) g.bind('dcmi', dcmi) g.bind('pav', pav) g.bind('idot', idot) g.bind('dcat', dcat) g.bind('ebi', ebi) if "Genomes" not in self.project: g.bind('ensembl', ensembl) self.ds = dataset.ensembl else: g.bind('ensemblgenomes', ensemblgenomes) self.ds = dataset.ensemblgenomes date = str(self.releaseDate) ensemblVersion = self.ds + "/" + date ensemblDist = ensemblVersion + ".rdf" # Summary g.add( (self.ds, RDF.type, dcmi.Dataset) ) g.add( (self.ds, dct.title, Literal(self.project)) ) # As in the doc g.add( (self.ds, dct.description, Literal("%s genes and external references in RDF" % self.project)) ) # As in the doc g.add( (self.ds, FOAF.page, ebi.rdf+"/services/sparql")) # As in the doc g.add( (self.ds, dct.publisher, Literal("http://www.ebi.ac.uk/")) ) # As in the doc. From Specs? But could make sense to add license g.add( (self.ds, pav.hasCurrentVersion, ensemblVersion) ) # Version date uri g.add( (self.ds, dct.publisher, Literal(ebi)) ) # Version Level g.add( (ensemblVersion, RDF.type, dcmi.Dataset) ) g.add( (ensemblVersion, dct.title, Literal("%s v%d genes and external references in RDF" % (self.project, self.release))) ) #As in the doc g.add( (ensemblVersion, dct.issued, Literal(date, datatype=XSD.date)) ) g.add( (ensemblVersion, dct.isVersionOf, self.ds) ) g.add( (ensemblVersion, pav.version, Literal(self.release)) ) g.add( (ensemblVersion, dct.hasDistribution, ensemblDist) ) g.add( (ensemblVersion, dct.creator, ebi.ENSEMBL) ) g.add( (ensemblVersion, dct.publisher, Literal(ebi)) ) #As in the doc g.add( (ensemblVersion, dct.license, Literal("http://www.ebi.ac.uk/about/terms-of-use")) ) #As in the doc g.add( (ensemblVersion, dct.rights, Literal("Apache version 2.0")) ) g.add( (ensemblVersion, dct.description, Literal("%s release %d" % (self.project, self.release))) ) # Distribution g.add( (ensemblDist, RDF.type, VOID.Dataset) ) #As in the doc g.add( (ensemblDist, RDF.type, dcat.Distribution) ) #As in the doc################ g.add( (ol.ols,idt.preferredPrefix, Literal("ols") ) ### INTRODUCE prefix? Maybe not on version level but here for NAMED GRAPHS? OR come up with an own NamedGraph/ IRI tag g.add( (ensemblDist, dct.title, Literal("%s RDF data (release %d)" % (self.project, self.release))) ) g.add( (ensemblDist, dct.description, Literal("%s genes and external references in RDF (release %d)" % (self.project, self.release))) ) #As in the doc g.add( (ensemblDist, dct.creator, ebi.ENSEMBL) ) #As in the doc (but group is missing at the moment) g.add( (ensemblDist, dct.publisher, Literal(ebi)) ) #As in the doc g.add( (ensemblDist, dct.license, Literal("http://www.ebi.ac.uk/about/terms-of-use")) ) g.add( (ensemblDist, URIRef(dct + 'format'), Literal("text/turtle")) ) g.add( (ensemblDist, FOAF.page, ebi.rdf+"/services/sparql") ) # As in the doc g.add( (ensemblDist, VOID.dataDump, Literal('unknown') ) ) # As in the doc g.add( (ensemblDist, pav.version, Literal(date, datatype=XSD.date)) ) self.void = g def add_species_datasets(self): if self.void is None: raise ValueError("Can't add species datasets, graph not initialised") for species in self.speciesInfo: # species name is assumed to be in production format, e.g. homo_sapiens if not species["name"]: raise ValueError("Species data with no name") speciesName = species["name"] speciesId = URIRef(dataset + speciesName) title = " ".join(speciesName.split('_')).capitalize() description = "Empty for this" speciesVersion = speciesId + "/" + str(self.release) # Version number of current release speciesDist = speciesVersion + ".rdf" self.void.add( (self.ds, dct.hasPart, speciesId)) # Summary self.void.add( (speciesId, RDF.type, dcmi.Dataset)) self.void.add( (speciesId, dct.title, Literal(title)) ) self.void.add( (speciesId, dct.description, Literal("%s Ensembl RDF" % title)) ) self.void.add( (speciesId, dct.publisher, self.uri) ) self.void.add( (speciesId, pav.hasCurrentVersion, speciesVersion) ) # Version self.void.add( (speciesVersion, RDF.type, dct.Dataset) ) self.void.add( (speciesVersion, dct.title, Literal("Release %d" % self.release)) ) self.void.add( (speciesVersion, dct.issued, Literal(self.releaseDate, datatype=XSD.date)) ) self.void.add( (speciesVersion, dct.isVersionOf, speciesId) ) self.void.add( (speciesVersion, dct.hasDistribution, speciesDist) ) self.void.add( (speciesVersion, pav.version, Literal(self.release) ) ) self.void.add( (speciesVersion, dct.creator, self.uri) ) self.void.add( (speciesVersion, dct.publisher, self.uri) ) self.void.add( (speciesVersion, dct.description, Literal("Released on %s" % str(self.releaseDate))) ) # DistributionLevel self.void.add( (speciesDist, RDF.type, VOID.Dataset) ) # It is a void dataset self.void.add( (speciesDist, RDF.type, dcat.Distribution) ) self.void.add( (speciesDist, dct.title, Literal("%s %s" % (self.project, title)))) self.void.add( (speciesDist, dct.description, Literal("%s %s specific RDF" % (self.project, title)) ) ) self.void.add( (speciesDist, dct.creator, self.uri) ) self.void.add( (speciesDist, dct.publisher, self.uri) ) self.void.add( (speciesDist, dct.license, Literal(self.license)) ) self.void.add( (speciesDist, URIRef(dct + 'format'), Literal("text/turtle")) ) self.void.add( (speciesDist, pav.version, Literal(self.release)) ) # Version can be null sometimes! So need a check for that replace with something else in case # Here we add species subset to the distribution level self.void.add( (speciesDist, VOID.subset, speciesVersion + "/" + speciesName + "_core") ) self.void.add( (speciesDist, VOID.subset, speciesVersion + "/" + speciesName + "_xref") ) # Information about the subsets, namely data and xrefs per resource speciesPart1 = speciesVersion + "/" + speciesName + "_core" self.void.add( (speciesPart1, RDF.type, dct.Dataset) ) # Include the ftp folder release strategy whatever self.void.add( (speciesPart1, VOID.dataDump, Literal(species["rdf"]["core"])) ) #Include the ftp folder release strategy whatever self.void.add( (speciesPart1, dct.title, Literal(title)) ) self.void.add( (speciesPart1, dct.description, Literal("Core gene models and orthologies for %s" % title)) ) speciesPart2 = speciesVersion + "/" + speciesName + "_xref" self.void.add( (speciesPart2, RDF.type, dct.Dataset) ) self.void.add( (speciesPart2, VOID.dataDump, Literal(species["rdf"]["xrefs"])) ) # Include the ftp folder release strategy whatever self.void.add( (speciesPart2, dct.title, Literal(title)) ) self.void.add( (speciesPart2, dct.description, Literal("External references for %s" % title)) ) def write(self, fileName, zipped): """Dump VOID RDF graph to file.""" voidOutput = self.void.serialize(format = 'turtle') if zipped: with gzip.open(fileName, 'wb') as f: f.write(voidOutput) else: voidFile = open(fileName, 'w') voidFile.write(voidOutput) voidFile.close() def qc(self): """Quality control. This is basically a combination of SPARQL queries and python error msg in case the results is not what we expect. These rules are done by SPARQL statements and corresponding python error msg in case of faulty query results. And outline of the rules that shall be enfored here can be found in the pdf at github https://github.com/EBISPOT/RDF-platform.""" if self.void is None: raise ValueError("Cannot check VOID, graph not created") ### Summary Level ### # Get all summary levels listOfSummaryLevelNodes=[] qres=self.void.query('''SELECT ?a ?b ?c WHERE { ?a ?b <http://purl.org/dc/dcmitype/Dataset>. ?a <http://purl.org/pav/hasCurrentVersion> ?c }''') if len(qres)==0: raise AttributeError('No Summary Level found! Summary level is defined through the attributes <http://purl.org/dc/dcmitype/Dataset> and <http://purl.org/pav/hasCurrentVersion>') #Turn all the summary level subjects into string and add them to a list for row in qres: listOfSummaryLevelNodes.append("<"+str(row.a)+">") #Let's run through the list of summary level subjects and do some test for entity in listOfSummaryLevelNodes: # First check: connectivity: Check for every summary node if there is a Version level and if this version level has a distribution level query='SELECT ?d ?x ?z WHERE {'+entity+' <http://purl.org/pav/hasCurrentVersion> ?d . ?d <http://purl.org/dc/terms/hasDistribution> ?x. ?x <http://rdfs.org/ns/void#dataDump> ?z }' qres=self.void.query(query) # If result is empty, that means connectivity is not given! if (len(qres)==0): # print "No data Dump found through first run, now looking for a Subsets!" query='SELECT ?a ?b ?c WHERE {'+entity+' <http://purl.org/pav/hasCurrentVersion> ?d . ?d <http://purl.org/dc/terms/hasDistribution> ?x. ?x <http://rdfs.org/ns/void#subset> ?z }' check2=self.void.query(query) if (len(check2)==0): # If I am nice guy, you could investigate further where it failed exactly raise AttributeError('Connectivity between Summary, Version and Distribution level is not given! '+entity+" has to have the attribute <http://purl.org/pav/hasCurrentVersion>, its version level <http://purl.org/dc/terms/hasDistribution> and the distribution level is idendified by <http://rdfs.org/ns/void#dataDump> or has to have a subset <http://rdfs.org/ns/void#subset> with a dataDump!") # Second Check, Necessary attributes - Check if the necessary attributes for the summary level are given (title, publish, description besides the things checked above) listOfPredicats=[] listOfObjects=[] query='Select ?b ?c {'+entity+'?b ?c}' qres=self.void.query(query) for row in qres: #listOfPredicats.append(str(row.b)) #listOfObjects.append(str(row.c)) listOfPredicats.append(row.b.encode("utf-8")) listOfObjects.append(row.c.encode("utf-8")) if "http://purl.org/dc/terms/title" not in listOfPredicats: raise AttributeError('Title of type http://purl.org/dc/terms/title is missing in '+entity) if "http://purl.org/dc/terms/publisher" not in listOfPredicats: raise AttributeError('Publisher of type http://purl.org/dc/terms/publisher is missing in '+entity) if "http://purl.org/dc/terms/description" not in listOfPredicats: raise AttributeError('Description of type http://purl.org/dc/terms/description is missing in '+entity) ### Negative Check if "http://rdfs.org/ns/void#dataDump" in listOfPredicats: raise AttributeError('dataDump of type http://rdfs.org/ns/void#dataDump MUST NOT be present on summary level - '+entity) if "http://purl.org/dc/terms/creator" in listOfPredicats: raise AttributeError('Creator of type http://purl.org/dc/terms/creator MUST NOT be present on summary level '+entity) parse(listOfObjects[listOfPredicats.index("http://purl.org/dc/terms/publisher")], rule='IRI') # Third Check: Check all summary level with a hasPart relationship if these references sub summary level exist! # 1: Get all Summary Levels(TopLevel) with a hasPart relationship qres=self.void.query('''SELECT DISTINCT ?a WHERE { ?a <http://purl.org/dc/terms/hasPart> ?x. ?a ?b <http://purl.org/dc/dcmitype/Dataset>. ?a <http://purl.org/pav/hasCurrentVersion> ?y}''') listOfAllTopLevels=[] for row in qres: listOfAllTopLevels.append("<"+row.a+">") listOfAllHasParts=[] # 2:Go Through the hasPart References for every toplevel for topLevel in listOfAllTopLevels: listOfAllHasParts=[] #Get a list of the has part summary levels the top level is referencing to qres=self.void.query('SELECT DISTINCT ?x WHERE {'+topLevel+'<http://purl.org/dc/terms/hasPart> ?x}') for row in qres: listOfAllHasParts.append("<"+row.x+">") # 3:Check for connectivity - do the referenced summarylevels exist? for subTopLevel in listOfAllHasParts: # Get the summary level that was reference via hasPart for the toplevel. If I can not find it, something is wrong qres=self.void.query('SELECT DISTINCT ?b WHERE {'+subTopLevel+' <http://purl.org/pav/hasCurrentVersion> ?b}') if len(qres)==0: raise AttributeError('Toplevel references via hasPart to '+subTopLevel+' but it does not exist') ### Version Level (ID: is dct:dataset, dct:isVersionOf) ### listOfVersionNodes=[] qres=self.void.query('''SELECT ?a WHERE {?a ?b <http://purl.org/dc/dcmitype/Dataset>. ?a <http://purl.org/dc/terms/isVersionOf> ?c}''') if (len(qres)==0): #If I am nice guy, you could investigate further where it failed exactly raise AttributeError('Could not find any version level - it is defined through <http://purl.org/dc/dcmitype/Dataset> and <http://purl.org/dc/terms/isVersionOf>') for row in qres: listOfVersionNodes.append("<"+str(row.a)+">") for entity in listOfVersionNodes: listOfPredicats=[] listOfObjects=[] query='Select ?b ?c {'+entity+'?b ?c}' qres=self.void.query(query) for row in qres: listOfPredicats.append(row.b.encode("utf-8")) listOfObjects.append(row.c.encode("utf-8")) if "http://purl.org/dc/terms/title" not in listOfPredicats: raise AttributeError('Title of type http://purl.org/dc/terms/title is missing in '+entity) if "http://purl.org/dc/terms/description" not in listOfPredicats: raise AttributeError('Description of type http://purl.org/dc/terms/description is missing in '+entity) if "http://purl.org/dc/terms/creator" not in listOfPredicats: raise AttributeError('Creator of type http://purl.org/dc/terms/creator is missing in '+entity) if "http://purl.org/dc/terms/publisher" not in listOfPredicats: raise AttributeError('Publisher of type http://purl.org/dc/terms/publisher is missing in '+entity) if "http://purl.org/pav/version" not in listOfPredicats: raise AttributeError('Version of type http://purl.org/pav/version is missing in '+entity) ###Negative test if "http://rdfs.org/ns/void#dataDump" in listOfPredicats: raise AttributeError('dataDump of type http://rdfs.org/ns/void#dataDump MUST NOT be present on version level - '+entity) parse(listOfObjects[listOfPredicats.index("http://purl.org/dc/terms/publisher")], rule='IRI') parse(listOfObjects[listOfPredicats.index("http://purl.org/dc/terms/creator")], rule='IRI') ### Distribution Level (ID: is void:dataset, dcatDistribution) ### # # has to have for my programm # void:dataDump # idot:preferredPrefix # # has to have # dct:creator # dct:title # void:dataDump ### List of ALL Distribution levels ListOfallDistributionLevels=[] #qres=self.void.query('''SELECT ?a WHERE {?a ?b <http://rdfs.org/ns/void#Dataset>}''') qres=self.void.query('''SELECT ?a WHERE { ?a ?b <http://rdfs.org/ns/void#Dataset>. ?a ?b <http://www.w3.org/ns/dcat#Distribution> FILTER( NOT EXISTS{ ?x <http://rdfs.org/ns/void#subset> ?a} ) }''') if len(qres)==0: raise AttributeError("No distribution level found! It is defined through the attribute <http://rdfs.org/ns/void#Dataset> and <http://www.w3.org/ns/dcat#Distribution>") for row in qres: ListOfallDistributionLevels.append("<"+str(row.a)+">") listOfPredicats=[] for entity in ListOfallDistributionLevels: listOfPredicats=[] listOfObjects=[] query='Select ?b ?c {'+entity+'?b ?c}' qres=self.void.query(query) for row in qres: listOfPredicats.append(row.b.encode("utf-8")) listOfObjects.append(row.c.encode("utf-8")) # listOfPredicats.append(str(row.b)) # listOfObjects.append(str(row.c)) #Subset/DataDump Test I handle in an own function because it needs more logic! self.recursive_subset_check(entity) if "http://purl.org/dc/terms/title" not in listOfPredicats: raise AttributeError('Title of type http://purl.org/dc/terms/title is missing in '+entity) if "http://purl.org/dc/terms/description" not in listOfPredicats: raise AttributeError('Description of type http://purl.org/dc/terms/description is missing in '+entity) if "http://purl.org/dc/terms/creator" not in listOfPredicats: raise AttributeError('Creator of type http://purl.org/dc/terms/creator is missing in '+entity) if "http://purl.org/dc/terms/publisher" not in listOfPredicats: raise AttributeError('Publisher of type http://purl.org/dc/terms/publisher is missing in '+entity) if "http://purl.org/dc/terms/license" not in listOfPredicats: raise AttributeError('Licence of type http://purl.org/dc/terms/license is missing in '+entity) if "http://purl.org/dc/terms/format" not in listOfPredicats: raise AttributeError('Format of type http://purl.org/dc/terms/format is missing in '+entity) # Negative test if "http://purl.org/dc/terms/isVersionOf" in listOfPredicats: raise AttributeError("isVersionOf of type <http://purl.org/dc/terms/isVersionOf> MUST NOT be present on distribution level! "+entity) # In a way DATE is missing parse(listOfObjects[listOfPredicats.index("http://purl.org/dc/terms/publisher")], rule='IRI') parse(listOfObjects[listOfPredicats.index("http://purl.org/dc/terms/creator")], rule='IRI') parse(listOfObjects[listOfPredicats.index("http://purl.org/dc/terms/license")], rule='IRI') ######################################################## #### List of ALL LinkedSets #Check for dataDump Link (and potentially other predicates in the future) ListOfAllLinkSets=[] qres=self.void.query('''SELECT ?a WHERE {?a ?b <http://rdfs.org/ns/void#Linkset>}''' ) for row in qres: ListOfAllLinkSets.append("<"+str(row.a)+">") for entity in ListOfAllLinkSets: # First check for connectivity: Is the subset connected to a void:Dataset? qres=self.void.query('SELECT ?a WHERE {?a ?x <http://rdfs.org/ns/void#Dataset>. ?a ?b '+entity+'}') if len(qres)!=1: raise AttributeError('Linkset is missing connection to a distribution level (which is defined through <http://rdfs.org/ns/void#Dataset>') # Check Numer two: Check predicated of the subset. At the moment, we only check for dataDump listOfPredicats=[] query='Select ?b ?c {'+entity+'?b ?c}' qres=self.void.query(query) for row in qres: listOfPredicats.append(str(row.b)) if "http://rdfs.org/ns/void#dataDump" not in listOfPredicats: raise AttributeError("dataDump of type http://rdfs.org/ns/void#dataDump is missing in "+entity) print("Looks good") def recursive_subset_check(self, entity): """A helper function to check recursivly if the last level of a subset has a dataDump link.""" # Check for subset qres=self.void.query('SELECT ?c WHERE {'+entity+' <http://rdfs.org/ns/void#subset> ?c }') # Does a subset Exist? If not, it has to have a dataDump! if len(qres)==0: subtest=self.void.query('SELECT ?a WHERE {'+entity+' <http://rdfs.org/ns/void#dataDump> ?x}') if len(subtest)==0: raise AttributeError('Could not find dataDump of type http://rdfs.org/ns/void#dataDump or no subset of type <http://rdfs.org/ns/void#subset> in '+entity) elif len(subtest)>1: raise AttributeError('More than one dataDump found in '+entity+' - this is not allowed!') elif len(subtest)==1: return True # A subset exists so we call the function recursivly for ever subset until we find a dataDump else: for row in qres: subentity="<"+row.c+">" self.recursive_subset_check(subentity)
// Generated by script, don't edit it please. import createSvgIcon from '../createSvgIcon'; import MinusSvg from '@rsuite/icon-font/lib/action/Minus'; const Minus = createSvgIcon({ as: MinusSvg, ariaLabel: 'minus', category: 'action', displayName: 'Minus' }); export default Minus;
# Want to train with wordnet hierarchy? Just set `--hierarchy=wordnet` below. MODEL_NAME="wrn28_10" for i in "CIFAR10 ${MODEL_NAME}_cifar10 1" "CIFAR100 ${MODEL_NAME}_cifar100 1" "TinyImagenet200 ${MODEL_NAME} 10"; do read dataset model weight <<< "${i}"; # 1. generate hieararchy nbdt-hierarchy --dataset=${dataset} --arch=${model} # 2. train with soft tree supervision loss python main.py --lr=0.01 --dataset=${dataset} --model=${model} --hierarchy=induced-${model} --pretrained --loss=SoftTreeSupLoss --tree-supervision-weight=${weight} # 3. evaluate with soft then hard inference for analysis in SoftEmbeddedDecisionRules HardEmbeddedDecisionRules; do python main.py --dataset=${dataset} --model=${model} --hierarchy=induced-${model} --loss=SoftTreeSupLoss --eval --resume --analysis=${analysis} --tree-supervision-weight=${weight} done done;
'use strict'; app.constant('CONFIG', { host: window.location.origin }); app.run(function($rootScope, $location, authService, utils, $timeout) { $rootScope.$on("$locationChangeStart", function(event, next, current) { if ($location.path() !== '/login') { if (!authService.isAuthed()) { $location.path('/login'); return; } $('.main-header').show(); $('.main-sidebar').hide(); } }); $rootScope.$on("$locationChangeSuccess", function() { $timeout(function() { utils.fixLayout(); }, 1000); }); }); app.config(function($routeProvider, $httpProvider){ $routeProvider /* Patient Screen Configuration */ .when('/patient', { templateUrl: "dist/partials/patient/view.html", controller: 'patientViewController', }) .when('/patient/new', { templateUrl: "dist/partials/patient/input.html", controller: 'patientInputController', }) .when('/patient/:id', { templateUrl: "dist/partials/patient/input.html", controller: 'patientInputController', }) /* Login Screen Configuration */ .when('/login', { templateUrl: "dist/partials/login/login.html", controller: 'loginController' }) .otherwise({ redirectTo: '/login' }); $httpProvider.interceptors.push('authInterceptor'); });
<reponame>yusabana/aws-utility #!/usr/bin/env ruby # execution command # ./smtp_send.rb port_str user password require 'net/smtp' port = 587 # 465 or 587 or 25 user = 'USER ACCESS KEY' pass = '<PASSWORD>' hello_domain = 'takapra.com' from = '<EMAIL>' to = '<EMAIL>' subject = 'test_hello ああああ' message = <<-MESSAGE aaaa bbbb ほげふが MESSAGE data = <<-DATA From: #{from} To: #{to} Subject: #{subject} Date: #{Time.now} #{message} PORT: #{port} DATA def smtp(port) Net::SMTP.new('email-smtp.us-west-2.amazonaws.com', port).tap do |smtp| if port == 465 smtp.enable_tls # 465ポート(SMTPS) elsif port == 587 || port == 25 smtp.enable_starttls # 587, 25ポート(STARTTLS) else raise end end end smtp = smtp(port) smtp.start(hello_domain, user, pass, :login) smtp.send_message(data, from, to) smtp.finish
#!/usr/bin/env bash set -e set -x export GH_USERNAME="jenkins-x-bot-test" export GH_OWNER="jenkins-x-bot-test" # fix broken `BUILD_NUMBER` env var export BUILD_NUMBER="$BUILD_ID" JX_HOME="/tmp/jxhome" KUBECONFIG="/tmp/jxhome/config" # lets avoid the git/credentials causing confusion during the test export XDG_CONFIG_HOME=$JX_HOME mkdir -p $JX_HOME/git jx --version # replace the credentials file with a single user entry echo "https://$GH_USERNAME:$GH_ACCESS_TOKEN@github.com" > $JX_HOME/git/credentials gcloud auth activate-service-account --key-file $GKE_SA # lets setup git git config --global --add user.name JenkinsXBot git config --global --add user.email jenkins-x@googlegroups.com echo "running the BDD tests with JX_HOME = $JX_HOME" # test configuration export SKIP_JENKINS_CHECK="yes" jx step bdd \ --use-revision \ --version-repo-pr \ --versions-repo https://github.com/jenkins-x/jenkins-x-versions.git \ --config jx/bdd/helm3/cluster.yaml \ --gopath /tmp \ --git-provider=github \ --git-username $GH_USERNAME \ --git-owner $GH_OWNER \ --git-api-token $GH_ACCESS_TOKEN \ --default-admin-password $JENKINS_PASSWORD \ --no-delete-app \ --no-delete-repo \ --tests install \ --tests test-verify-pods \ --tests test-upgrade-platform \ --tests test-upgrade-ingress \ --tests test-app-lifecycle \ --tests test-create-spring \ --tests test-quickstart-golang-http \ --tests test-import
/* * Copyright (c) 2021 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstring> #include <cstdlib> #include <string> namespace panda::terminate { #ifndef FUZZING_EXIT_ON_FAILED_ASSERT_FOR // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) #define FUZZING_EXIT_ON_FAILED_ASSERT_FOR "" #endif [[noreturn]] void Terminate(const char *file) { auto filepath = std::string(file); std::string libsTmp; char *replace = std::getenv("FUZZING_EXIT_ON_FAILED_ASSERT"); if ((replace != nullptr) && (std::string(replace) == "false")) { std::abort(); } char *libs = std::getenv("FUZZING_EXIT_ON_FAILED_ASSERT_FOR"); if (libs == nullptr) { libsTmp = std::string(FUZZING_EXIT_ON_FAILED_ASSERT_FOR); if (libsTmp.empty()) { std::abort(); } libs = libsTmp.data(); } char *lib = strtok(libs, ","); while (lib != nullptr) { if (filepath.find(lib) != std::string::npos) { std::exit(1); } lib = strtok(nullptr, ","); } std::abort(); } } // namespace panda::terminate
<gh_stars>1-10 const { run } = require("./runner"); const HEADERS = { "Content-Type": "application/json", }; exports.handler = async (event, context) => { // Handle CORS request, API gateway will add correct CORS headers if (event.requestContext.http.method === "OPTIONS") { return { statusCode: "200" }; } const { program, command } = JSON.parse(event.body); const result = await run(program, command).then( (result) => result, (error) => { console.error(error); return { error: true }; } ); return { statusCode: "200", headers: HEADERS, body: JSON.stringify(result), }; };
// Load and configure Express const express = require('express') const app = express() // Load the Todo model const Todo = require('./models/Todo') // Create the GET route app.get('/todos', async (req, res) => { try { const todos = await Todo.find({}) res.json(todos) } catch (err) { res.status(500).json({ message: err.message }) } }) // Create the POST route app.post('/todos', async (req, res) => { const todo = new Todo({ description: req.body.description }) try { const newTodo = await todo.save() res.status(201).json(newTodo) } catch (err) { res.status(400).json({ message: err.message }) } }) // Create the PUT route app.put('/todos/:id', getTodo, async (req, res) => { if (req.body.description != null) { res.todo.description = req.body.description } try { const updatedTodo = await res.todo.save() res.json(updatedTodo) } catch (err) { res.status(400).json({ message: err.message }) } }) // Create the DELETE route app.delete('/todos/:id', getTodo, async (req, res) => { try { await res.todo.remove() res.json({ message: 'Deleted Todo' }) } catch (err) { res.status(500).json({ message: err.message }) } }) async function getTodo(req, res, next) { try { todo = await Todo.findById(req.params.id) if (todo == null) { return res.status(404).json({ message: 'Cant find todo'}) } } catch(err){ return res.status(500).json({ message: err.message }) } res.todo = todo next() } app.listen(3000, () => { console.log('Server is running on port 3000') });
var arr = [1, 2, 3, 4]; for (var i = 0; i < arr.length; i++) { arr[i] *= 2; } console.log(arr); // Outputs [2, 4, 6, 8]
package vcs.citydb.wfs.operation.getpropertyvalue; import org.citydb.core.database.schema.path.SchemaPath; import org.citydb.core.query.Query; import vcs.citydb.wfs.operation.AbstractQueryExpression; public class QueryExpression extends AbstractQueryExpression { private String valueReference; private SchemaPath schemaPath; private long propertyOffset; public QueryExpression() { } public QueryExpression(QueryExpression other) { super(other); } public String getValueReference() { return valueReference; } public void setValueReference(String valueReference) { this.valueReference = valueReference; } public SchemaPath getSchemaPath() { return schemaPath; } public void setSchemaPath(SchemaPath schemaPath) { this.schemaPath = schemaPath; } public long getPropertyOffset() { return propertyOffset; } public void setPropertyOffset(long propertyOffset) { this.propertyOffset = Math.max(propertyOffset, 0); } @Override public void copyFrom(Query query) { super.copyFrom(query); if (query instanceof QueryExpression) { QueryExpression other = (QueryExpression) query; valueReference = other.valueReference; schemaPath = other.schemaPath; propertyOffset = other.propertyOffset; } } }
import React, { Component } from 'react' import { connect } from 'react-redux' import FormSubmissionEdit from 'components/manageredit/FormSubmissionEdit' import EditButton from 'components/manageredit/EditButton' import PropTypes from 'prop-types' class FormSubmissionEditContainer extends Component { constructor (props) { super(props) this.state = { showEdit: false, value: '' } this.saveQuote = this.saveQuote.bind(this) this.cancelQuote = this.cancelQuote.bind(this) this.showEditPopup = this.showEditPopup.bind(this) this.hideEditPopup = this.hideEditPopup.bind(this) this.updateValue = this.updateValue.bind(this) } showEditPopup () { this.setState({ showEdit: true }) } hideEditPopup () { this.setState({ showEdit: false }) } saveQuote () { // Save quote! this.hideEditPopup() } cancelQuote () { // Cancel quote! this.setState({ value: '' }, () => { this.hideEditPopup() }) } updateValue (e) { if (e && e.target) { this.setState({ value: e.target.value }) } } render () { const { showEdit, value } = this.state const { children: Component } = this.props let newProps = !showEdit ? { value } : {} return ( <div> <EditButton position={['absolute', 'right']} onClick={this.showEditPopup} title='update contact' iconModifier='update' /> <FormSubmissionEdit handleChange={this.updateValue} email={value} onSave={this.saveQuote} onCancel={this.cancelQuote} isOpen={showEdit} /> <Component {...newProps} /> </div> ) } } FormSubmissionEditContainer.propTypes = { children: PropTypes.func.isRequired } const mapStateToProps = (state, ownProps) => { return {} } const mapDispatchToProps = (dispatch, ownProps) => { return {} } export default connect( mapStateToProps, mapDispatchToProps )(FormSubmissionEditContainer)
<html> <body> <table> <tr> <td>A</td> <td>Q</td> </tr> <tr> <td>M</td> <td>X</td> </tr> <tr> <td>G</td> <td>H</td> </tr> <tr> <td>F</td> <td>T</td> </tr> <tr> <td>Y</td> <td>S</td> </tr> </table> </body> </html>
<gh_stars>100-1000 /* Copyright (C) 2008 <NAME> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. <NAME> <EMAIL> */ package ikvm.internal; import cli.System.Activator; import cli.System.Reflection.BindingFlags; import cli.System.Reflection.FieldInfo; import cli.System.Reflection.MethodInfo; import cli.System.Type; final class MonoUtils { static String unameProperty(String field) { Type syscallType = Type.GetType("Mono.Unix.Native.Syscall, Mono.Posix, Version=2.0.0.0, Culture=neutral, PublicKeyToken=<KEY>"); Type utsnameType = Type.GetType("Mono.Unix.Native.Utsname, Mono.Posix, Version=2.0.0.0, Culture=neutral, PublicKeyToken=<KEY>"); if (syscallType != null && utsnameType != null) { Object[] arg = new Object[] { Activator.CreateInstance(utsnameType) }; MethodInfo uname = syscallType.GetMethod("uname", new Type[] { utsnameType.MakeByRefType() }); FieldInfo fi = utsnameType.GetField(field); if (uname != null && fi != null) { uname.Invoke(null, arg); return (String)fi.GetValue(arg[0]); } } return null; } }
import torch.nn as nn from apex.normalization import FusedLayerNorm class CustomNLPModelLayer(nn.Module): def __init__(self, num_tokens, seq_len, dim, sparse_attn, depth, use_layer_norm): super(CustomNLPModelLayer, self).__init__() self.seq_len = seq_len self.token_emb = nn.Embedding(num_tokens, dim) self.pos_emb = nn.Embedding(seq_len, dim) self.token_emb.weight.data.normal_(0, 0.02) self.pos_emb.weight.data.normal_(0, 0.02) self.layers = nn.ModuleList([]) norm_class = nn.LayerNorm if use_layer_norm else FusedLayerNorm for _ in range(depth): self.layers.append(TransformerLayer(dim, sparse_attn, norm_class)) class TransformerLayer(nn.Module): def __init__(self, dim, sparse_attn, norm_class): super(TransformerLayer, self).__init__() # Implement the transformer layer here pass
<filename>src/main/java/com/google/teampot/api/MeetingEndpoint.java package com.google.teampot.api; import java.util.Date; import java.util.List; import com.google.teampot.api.exception.EntityNotFoundException; import com.google.teampot.api.exception.MeetingPollPastException; import com.google.teampot.model.Meeting; import com.google.teampot.model.MeetingPollProposedDate; import com.google.teampot.model.MeetingPollVote; import com.google.teampot.service.MeetingService; import com.google.teampot.service.UserService; import com.google.api.server.spi.config.ApiMethod; import com.google.api.server.spi.config.ApiMethod.HttpMethod; import com.google.api.server.spi.config.Named; import com.google.api.server.spi.response.UnauthorizedException; import com.google.appengine.api.oauth.OAuthRequestException; public class MeetingEndpoint extends BaseEndpoint { private static MeetingService meetingService = MeetingService.getInstance(); private static UserService userService = UserService.getInstance(); @ApiMethod( name = "meeting.list", path = "meeting", httpMethod = HttpMethod.GET ) public List<Meeting> list(com.google.appengine.api.users.User gUser, @Named("project") String projectId) throws OAuthRequestException, UnauthorizedException { userService.ensureEnabled(gUser); return meetingService.list(projectId); } @ApiMethod( name = "meeting.get", path = "meeting/{id}", httpMethod = HttpMethod.GET ) public Meeting get(@Named("id") String id, com.google.appengine.api.users.User gUser) throws OAuthRequestException,EntityNotFoundException, UnauthorizedException { userService.ensureEnabled(gUser); Meeting entity = meetingService.get(id); if (entity != null) return entity; else throw new EntityNotFoundException(id); } @ApiMethod( name = "meeting.save", path = "meeting", httpMethod = HttpMethod.POST ) public Meeting save(Meeting entity, com.google.appengine.api.users.User gUser) throws OAuthRequestException, UnauthorizedException { userService.ensureEnabled(gUser); meetingService.save(entity, userService.getUser(gUser)); return entity; } @ApiMethod( name = "meeting.remove", path = "meeting/{id}", httpMethod = HttpMethod.DELETE ) public void remove(@Named("id") String id, com.google.appengine.api.users.User gUser) throws OAuthRequestException, UnauthorizedException { userService.ensureEnabled(gUser); meetingService.remove(id, userService.getUser(gUser)); } @ApiMethod( name = "meeting.pollVote", path = "meeting/{id}/poll/vote", httpMethod = HttpMethod.POST ) public Meeting pollVote(@Named("id") String id, @Named("proposedStartDate") Long proposedStartDate, @Named("proposedEndDate") Long proposedEndDate, @Named("result") boolean result, com.google.appengine.api.users.User gUser) throws OAuthRequestException, MeetingPollPastException, EntityNotFoundException, UnauthorizedException { userService.ensureEnabled(gUser); Meeting meeting = meetingService.get(id); if (meeting == null) throw new EntityNotFoundException(id); if (meeting.isPast()) throw new MeetingPollPastException(id); MeetingPollProposedDate proposedDate = new MeetingPollProposedDate(); proposedDate.setStartTimestamp(proposedStartDate); proposedDate.setEndTimestamp(proposedEndDate); MeetingPollVote vote = new MeetingPollVote(); vote.setUser(userService.getUser(gUser)); vote.setProposedDate(proposedDate); vote.setResult(result); meetingService.pollVote(meeting, vote); return meeting; } }
#!/bin/bash -eux start=`date +%s` # remove old OVF file rm -rf ./output/vmware packer validate packer_vmware.json packer inspect packer_vmware.json packer build packer_vmware.json end=`date +%s` secs=$((end-start)) printf 'runtime = %02dh:%02dm:%02ds\n' $(($secs/3600)) $(($secs%3600/60)) $(($secs%60))
class SettingsManager: def __init__(self, file_path): self.file_path = file_path self.__config = {} # Initialize settings data as a dictionary self.load_settings() # Load settings data from the file upon initialization def update_setting(self, category, key, value): try: if category in self.__config: self.__config[category][key] = value else: self.__config[category] = {key: value} except Exception as e: raise ValueError(f"Failed to update setting: {e}") def save_settings(self): try: with open(self.file_path, 'w') as file: # Write the updated settings data to the file file.write(str(self.__config)) except Exception as e: raise IOError(f"Failed to save settings to file: {e}") def load_settings(self): try: with open(self.file_path, 'r') as file: # Load settings data from the file data = file.read() self.__config = eval(data) # Convert the string representation of dictionary to actual dictionary except Exception as e: raise IOError(f"Failed to load settings from file: {e}")
<filename>src/tests/createFakeElement.ts import { Measure } from '../measure' const createFakeElement = (measure: Measure): HTMLElement => { const element = document.createElement('div') element.style.position = 'absolute' element.style.left = `${measure.x}px`; element.style.top = `${measure.y}px`; element.style.width = `${measure.width}px`; element.style.height = `${measure.height}px`; element.getBoundingClientRect = () => ({ ...measure as DOMRect }) document.body.appendChild(element); return element; } export default createFakeElement
package intercept; import intercept.configuration.DefaultInterceptConfiguration; import intercept.configuration.InterceptConfiguration; import intercept.logging.ConsoleApplicationLog; import intercept.server.DefaultInterceptServer; import intercept.server.InterceptServer; import org.openqa.selenium.WebDriver; import org.openqa.selenium.htmlunit.HtmlUnitDriver; import java.util.ArrayList; import java.util.List; abstract class TestAsset { protected List<TestAsset> children; protected static InterceptServer interceptServer; public final static TestAsset intercept() { return new TestAsset() { InterceptConfiguration config; @Override public void itemConstruct(TestContext ctx) { ConsoleApplicationLog log = new ConsoleApplicationLog(); interceptServer = new DefaultInterceptServer(log); ctx.putIntercept(interceptServer); config = new DefaultInterceptConfiguration(log); interceptServer.start(config); } @Override public void itemClose() { if (interceptServer != null) { interceptServer.stop(config); } } }; } public static TestAsset HTMLUnit() { return new TestAsset() { WebDriver driver; @Override public void itemConstruct(TestContext ctx) { driver = ctx.configure(new HtmlUnitDriver()); ctx.driver = driver; } @Override public void itemClose() { if (driver != null) { driver.quit(); } driver = null; } }; } TestAsset() { this.children = new ArrayList<TestAsset>(); } protected abstract void itemConstruct(TestContext ctx); public void construct(TestContext ctx) { itemConstruct(ctx); for (TestAsset child : children) { child.construct(ctx); } } protected abstract void itemClose(); public void close() { for (TestAsset child : children) { child.close(); } itemClose(); } public TestAsset with(TestAsset child) { this.children.add(child); return this; } public static InterceptServer interceptInstance() { return interceptServer; } }
#!/bin/sh set -eu clang -fbracket-depth=999999 -march=native -mbmi2 -mtune=native -std=gnu11 -O3 -flto -fuse-ld=lld -fomit-frame-pointer -fwrapv -Wno-attributes -fno-strict-aliasing -Da24_hex='0x3039' -Da24_val='12345' -Da_minus_two_over_four_array='{0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x39}' -Dbitwidth='64' -Dlimb_weight_gaps_array='{64,64,64,64,64,64}' -Dmodulus_array='{0x03,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1}' -Dmodulus_bytes_val='43' -Dmodulus_limbs='6' -Dq_mpz='(1_mpz<<338) - 15' "$@"
import React from "react"; const Table = (props) => { const { data } =props; const renderTableData = () => { return data.map((el, i) => ( <tr key={i}> <td>{el.name}</td> <td>{el.age}</td> </tr> )); }; return ( <table> <tbody> {renderTableData()} </tbody> </table> ); }; export default Table;
<gh_stars>0 package output import ( "context" "time" "github.com/benthosdev/benthos/v4/internal/component/metrics" "github.com/benthosdev/benthos/v4/internal/component/output" "github.com/benthosdev/benthos/v4/internal/docs" "github.com/benthosdev/benthos/v4/internal/interop" "github.com/benthosdev/benthos/v4/internal/log" "github.com/benthosdev/benthos/v4/internal/message" "github.com/benthosdev/benthos/v4/internal/transaction" ) //------------------------------------------------------------------------------ func init() { Constructors[TypeSyncResponse] = TypeSpec{ constructor: fromSimpleConstructor(func(_ Config, _ interop.Manager, logger log.Modular, stats metrics.Type) (output.Streamed, error) { return NewAsyncWriter(TypeSyncResponse, 1, SyncResponseWriter{}, logger, stats) }), Summary: ` Returns the final message payload back to the input origin of the message, where it is dealt with according to that specific input type.`, Description: ` For most inputs this mechanism is ignored entirely, in which case the sync response is dropped without penalty. It is therefore safe to use this output even when combining input types that might not have support for sync responses. An example of an input able to utilise this is the ` + "`http_server`" + `. It is safe to combine this output with others using broker types. For example, with the ` + "`http_server`" + ` input we could send the payload to a Kafka topic and also send a modified payload back with: ` + "```yaml" + ` input: http_server: path: /post output: broker: pattern: fan_out outputs: - kafka: addresses: [ TODO:9092 ] topic: foo_topic - sync_response: {} processors: - bloblang: 'root = content().uppercase()' ` + "```" + ` Using the above example and posting the message 'hello world' to the endpoint ` + "`/post`" + ` Benthos would send it unchanged to the topic ` + "`foo_topic`" + ` and also respond with 'HELLO WORLD'. For more information please read [Synchronous Responses](/docs/guides/sync_responses).`, Categories: []string{ "Utility", }, Config: docs.FieldObject("", ""), } } //------------------------------------------------------------------------------ // SyncResponseWriter is a writer implementation that adds messages to a ResultStore located // in the context of the first message part of each batch. This is essentially a // mechanism that returns the result of a pipeline directly back to the origin // of the message. type SyncResponseWriter struct{} // ConnectWithContext is a noop. func (s SyncResponseWriter) ConnectWithContext(ctx context.Context) error { return nil } // WriteWithContext writes a message batch to a ResultStore located in the first // message of the batch. func (s SyncResponseWriter) WriteWithContext(ctx context.Context, msg *message.Batch) error { return transaction.SetAsResponse(msg) } // CloseAsync is a noop. func (s SyncResponseWriter) CloseAsync() {} // WaitForClose is a noop. func (s SyncResponseWriter) WaitForClose(time.Duration) error { return nil }
<reponame>20170415/commons-digester<gh_stars>0 package org.apache.commons.digester3.xmlrules; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import org.apache.commons.digester3.binder.CallParamBuilder; import org.apache.commons.digester3.binder.LinkedRuleBuilder; import org.apache.commons.digester3.binder.RulesBinder; import org.xml.sax.Attributes; /** * */ final class CallParamRule extends AbstractXmlRule { public CallParamRule( final RulesBinder targetRulesBinder, final PatternStack patternStack ) { super( targetRulesBinder, patternStack ); } /** * {@inheritDoc} */ @Override protected void bindRule( final LinkedRuleBuilder linkedRuleBuilder, final Attributes attributes ) throws Exception { final int paramIndex = Integer.parseInt( attributes.getValue( "paramnumber" ) ); final CallParamBuilder builder = linkedRuleBuilder.callParam().ofIndex( paramIndex ); final String attributeName = attributes.getValue( "attrname" ); final String fromStack = attributes.getValue( "from-stack" ); final String stackIndex = attributes.getValue( "stack-index" ); if ( attributeName == null ) { if ( stackIndex != null ) { builder.withStackIndex( Integer.parseInt( stackIndex ) ); } else if ( fromStack != null ) { builder.fromStack( Boolean.valueOf( fromStack ).booleanValue() ); } } else { if ( fromStack == null ) { builder.fromAttribute( attributeName ); } else { // specifying both from-stack and attribute name is not allowed throw new RuntimeException( "Attributes from-stack and attrname cannot both be present." ); } } } }
#!/bin/bash # # Copyright (c) 2019-2020 P3TERX <https://p3terx.com> # # This is free software, licensed under the MIT License. # See /LICENSE for more information. # # https://github.com/P3TERX/Actions-OpenWrt # File name: diy-part2.sh # Description: OpenWrt DIY script part 2 (After Update feeds) # # Modify default IP sed -i 's/192.168.1.1/10.0.0.1/g' package/base-files/files/bin/config_generate sed -i 's/5.10/15.4/g' target/linux/x86/Makefile
// // Created by <NAME> on 19/11/18. // #ifndef TMPL8_2018_01_CAMERA_H #define TMPL8_2018_01_CAMERA_H #include "precomp.h" class Camera { public: Camera(); float getFOV(); void adjustFOV(int y); mat4 getMatrix(); void setMatrix(mat4 matrix); float getSpeed(); void setSpeed(float f); void rotate(char axis, float theta); vec4 translate(vec3 d, float delta); private: mat4 cameraToWorldMatrix; vec4 cameraPosition = vec4(0,0,0,1); float fov = 90; float speed = 0.3f; vec3 rotationTheta; }; #endif //TMPL8_2018_01_CAMERA_H
#!/bin/bash # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. prefix=$1 save_dir=$2 mparts=$3 tgt_size=$4 shift 4 mkdir -p $save_dir let "last_part=$mparts -1" echo "$@" for i in $(seq 0 $last_part) do echo "python -m metaseq.scripts.reshard_mp $prefix $save_dir --part $i --target-ddp-size $tgt_size" jname=reshard_mp"$i"_ddp"$tgt_size" echo $jname srun --job-name=$jname \ --gpus-per-node=8 --nodes=1 --ntasks-per-node=1 --cpus-per-task=64 \ --output "$save_dir"/"$jname".log \ python -m metaseq.scripts.reshard_mp $prefix $save_dir --part $i --target-ddp-size $tgt_size & done echo "Waiting on slurm..." wait $(jobs -p) echo "Done"
<gh_stars>1-10 import {execute, subscribe} from 'graphql' import {PubSub} from 'graphql-subscriptions' import {SubscriptionServer} from 'subscriptions-transport-ws' import {execSchema} from './index' const pubsub = new PubSub() export function runSubscriptionServer(server) { return new SubscriptionServer( {execute, subscribe, schema: execSchema}, {server, path: '/subscriptions'}, ) } export default pubsub
def remove_duplicates(list_): new_list = [] for item in list_: if item not in new_list: new_list.append(item) return new_list list_ = [5, 2, 3, 5, 1, 2] print(remove_duplicates(list_))
#!/bin/bash # Sometimes wttr.in just stops working. No idea why. But when it does it # returns a big ugly string that messes up my tmux status bar. I'd rather it # not do that. So this is a simple catch that forces it to print nothing # instead when it can't fetch the weather. WEATHER=$(curl -s wttr.in?format="%c+%t&u") if [[ $WEATHER == *Unknown* ]] || [[ $WEATHER == *Sorry* ]]; then echo "" else echo "$WEATHER" fi
#!/usr/bin/env bash echo "Publishing plugin" ./gradlew clean publishPlugin notifyPluginPortal --stacktrace
#!/bin/bash # set -e php -v setup_mariadb_data_dir(){ test ! -d "$MARIADB_DATA_DIR" && echo "INFO: $MARIADB_DATA_DIR not found. creating ..." && mkdir -p "$MARIADB_DATA_DIR" # check if 'mysql' database exists if [ ! -d "$MARIADB_DATA_DIR/mysql" ]; then echo "INFO: 'mysql' database doesn't exist under $MARIADB_DATA_DIR. So we think $MARIADB_DATA_DIR is empty." echo "Copying all data files from the original folder /var/lib/mysql to $MARIADB_DATA_DIR ..." cp -R /var/lib/mysql/. $MARIADB_DATA_DIR else echo "INFO: 'mysql' database already exists under $MARIADB_DATA_DIR." fi rm -rf /var/lib/mysql ln -s $MARIADB_DATA_DIR /var/lib/mysql chown -R mysql:mysql $MARIADB_DATA_DIR test ! -d /run/mysqld && echo "INFO: /run/mysqld not found. creating ..." && mkdir -p /run/mysqld chown -R mysql:mysql /run/mysqld } start_mariadb(){ /etc/init.d/mariadb setup rc-service mariadb start rm -f /tmp/mysql.sock ln -s /var/run/mysqld/mysqld.sock /tmp/mysql.sock # create default database 'azurelocaldb' mysql -u root -e "CREATE DATABASE IF NOT EXISTS azurelocaldb; FLUSH PRIVILEGES;" } #unzip phpmyadmin setup_phpmyadmin(){ test ! -d "$PHPMYADMIN_HOME" && echo "INFO: $PHPMYADMIN_HOME not found. creating..." && mkdir -p "$PHPMYADMIN_HOME" cd $PHPMYADMIN_SOURCE tar -xf phpMyAdmin.tar.gz -C $PHPMYADMIN_HOME/ --strip-components=1 cp -R phpmyadmin-default.conf /etc/nginx/conf.d/default.conf cd / rm -rf $PHPMYADMIN_SOURCE if [ ! $WEBSITES_ENABLE_APP_SERVICE_STORAGE ]; then echo "INFO: NOT in Azure, chown for "$PHPMYADMIN_HOME chown -R www-data:www-data $PHPMYADMIN_HOME fi } #Get drupal from Git setup_drupal(){ cd $DRUPAL_PRJ GIT_REPO=${GIT_REPO:-https://github.com/azureappserviceoss/drupalcms-azure} GIT_BRANCH=${GIT_BRANCH:-linuxappservice-composer} echo "INFO: ++++++++++++++++++++++++++++++++++++++++++++++++++:" echo "REPO: "$GIT_REPO echo "BRANCH: "$GIT_BRANCH echo "INFO: ++++++++++++++++++++++++++++++++++++++++++++++++++:" echo "INFO: Clone from "$GIT_REPO git clone $GIT_REPO $DRUPAL_PRJ if [ "$GIT_BRANCH" != "master" ];then echo "INFO: Checkout to "$GIT_BRANCH git fetch origin git branch --track $GIT_BRANCH origin/$GIT_BRANCH && git checkout $GIT_BRANCH fi chmod a+w "$DRUPAL_PRJ/web/sites/default" mkdir -p "$DRUPAL_PRJ/web/sites/default/files" chmod a+w "$DRUPAL_PRJ/web/sites/default/files" if test ! -e "$DRUPAL_PRJ/web/sites/default/settings.php"; then #Test this time, after git pull, myabe drupal has already installed in repo. cp "$DRUPAL_PRJ/web/sites/default/default.settings.php" "$DRUPAL_PRJ/web/sites/default/settings.php" chmod a+w "$DRUPAL_PRJ/web/sites/default/settings.php" mv /usr/src/settings.redis.php "$DRUPAL_PRJ/web/sites/default/settings.redis.php" fi test -d "$DRUPAL_HOME" && mv $DRUPAL_HOME /home/bak/wwwroot_bak$(date +%s) ln -s $DRUPAL_PRJ/web/ $DRUPAL_HOME echo "INFO: Composer require drupal/redis..." cd $DRUPAL_PRJ && composer require drupal/redis } test ! -d "$DRUPAL_HOME" && echo "INFO: $DRUPAL_HOME not found. creating..." && mkdir -p "$DRUPAL_HOME" if [ ! $WEBSITES_ENABLE_APP_SERVICE_STORAGE ]; then echo "INFO: NOT in Azure, chown for "$DRUPAL_HOME chown -R www-data:www-data $DRUPAL_HOME fi echo "Setup openrc ..." && openrc && touch /run/openrc/softlevel DATABASE_TYPE=$(echo ${DATABASE_TYPE}|tr '[A-Z]' '[a-z]') if [ "${DATABASE_TYPE}" == "local" ]; then echo "Starting MariaDB and PHPMYADMIN..." echo 'mysql.default_socket = /run/mysqld/mysqld.sock' >> $PHP_CONF_FILE echo 'mysqli.default_socket = /run/mysqld/mysqld.sock' >> $PHP_CONF_FILE #setup MariaDB echo "INFO: loading local MariaDB and phpMyAdmin ..." echo "Setting up MariaDB data dir ..." setup_mariadb_data_dir echo "Setting up MariaDB log dir ..." test ! -d "$MARIADB_LOG_DIR" && echo "INFO: $MARIADB_LOG_DIR not found. creating ..." && mkdir -p "$MARIADB_LOG_DIR" chown -R mysql:mysql $MARIADB_LOG_DIR echo "Starting local MariaDB ..." start_mariadb echo "Granting user for phpMyAdmin ..." # Set default value of username/password if they are't exist/null. DATABASE_USERNAME=${DATABASE_USERNAME:-phpmyadmin} DATABASE_PASSWORD=${DATABASE_PASSWORD:-MS173m_QN} echo "phpmyadmin username: "$DATABASE_USERNAME echo "phpmyadmin password: "$DATABASE_PASSWORD mysql -u root -e "GRANT ALL ON *.* TO \`$DATABASE_USERNAME\`@'localhost' IDENTIFIED BY '$DATABASE_PASSWORD' WITH GRANT OPTION; FLUSH PRIVILEGES;" echo "Installing phpMyAdmin ..." setup_phpmyadmin fi # setup Drupal mkdir -p /home/bak if test ! -e "$DRUPAL_HOME/sites/default/settings.php"; then #Test this time, if WEBSITES_ENABLE_APP_SERVICE_STORAGE = true and drupal has already installed. echo "Installing Drupal ..." while test -d "$DRUPAL_PRJ" do echo "INFO: $DRUPAL_PRJ is exist, clean it ..." mv $DRUPAL_PRJ /home/bak/drupal_prj_bak$(date +%s) done test ! -d "$DRUPAL_PRJ" && echo "INFO: $DRUPAL_PRJ not found. creating..." && mkdir -p "$DRUPAL_PRJ" setup_drupal if [ ! $WEBSITES_ENABLE_APP_SERVICE_STORAGE ]; then echo "INFO: NOT in Azure, chown for "$DRUPAL_PRJ chown -R www-data:www-data $DRUPAL_PRJ fi fi # Set php-fpm listen type # By default, It's socket. # otherwise, It's port. LISTEN_TYPE=${LISTEN_TYPE:-socket} LISTEN_TYPE=$(echo ${LISTEN_TYPE}|tr '[A-Z]' '[a-z]') if [ "${LISTEN_TYPE}" == "socket" ]; then echo "INFO: creating /run/php/php7.0-fpm.sock ..." test -e /run/php/php7.0-fpm.sock && rm -f /run/php/php7.0-fpm.sock mkdir -p /run/php touch /run/php/php7.0-fpm.sock chown www-data:www-data /run/php/php7.0-fpm.sock chmod 777 /run/php/php7.0-fpm.sock else echo "INFO: PHP-FPM listener is 127.0.0.1:9000 ..." #/etc/nginx/conf.d/default.conf sed -i "s/unix:\/var\/run\/php\/php7.0-fpm.sock/127.0.0.1:9000/g" /etc/nginx/conf.d/default.conf #/usr/local/etc/php/conf.d/www.conf sed -i "s/\/var\/run\/php\/php7.0-fpm.sock/127.0.0.1:9000/g" /usr/local/etc/php/conf.d/www.conf #/usr/local/etc/php-fpm.d/zz-docker.conf sed -i "s/\/var\/run\/php\/php7.0-fpm.sock/9000/g" /usr/local/etc/php-fpm.d/zz-docker.conf fi cd $DRUPAL_HOME echo "Starting Redis ..." redis-server & echo "Starting SSH ..." rc-service sshd start echo "Starting php-fpm ..." php-fpm -D if [ "${LISTEN_TYPE}" == "socket" ]; then chmod 777 /run/php/php7.0-fpm.sock fi echo "Starting Nginx ..." mkdir -p /home/LogFiles/nginx if test ! -e /home/LogFiles/nginx/error.log; then touch /home/LogFiles/nginx/error.log fi /usr/sbin/nginx -g "daemon off;"
# The following functions assume they are called from project root. installUbuntuDependencies() { sudo apt-get install build-essential \ haskell-stack \ libasound2-dev \ libcap-dev \ libdrm-dev \ libegl1-mesa-dev \ libelogind-dev \ libfreetype6-dev \ libgbm-dev \ libgl1-mesa-dev \ libgles2 \ libgles2-mesa-dev \ libgudev-1.0-dev \ libinput-dev \ libpixman-1-dev \ libpulse-dev \ libssl-dev \ libsystemd-dev \ libudev-dev \ libwayland-dev \ libx11-dev \ libx11-xcb-dev \ libxcb-composite0-dev \ libxcb-image0-dev \ libxcb-render-util0-dev \ libxcb-render0-dev \ libxcb-xfixes0-dev \ libxcb-xinput-dev \ libxcursor-dev \ libxkbcommon-dev \ libxi-dev \ libxinerama-dev \ libxkbcommon-x11-dev \ libxrandr-dev \ meson \ pkg-config \ scons \ steam \ steam-devices \ wayland-protocols \ yasm \ libwlroots-dev \ epiphany \ sakura \ cabal-install \ wmctrl \ xdotool \ bash # libegl-dev # Not provided in disco dango # libxcb-iccm4-dev # Not provided in disco dango # steam-runtime # Not provided in disco dango # upgradeStack } installArchDependencies() { sudo pacman -S alsa-lib \ freetype2 \ glu \ libcap \ libdrm \ libglvnd \ libinput \ libudev0-shim \ libxcursor \ libxi \ libxinerama \ libxkbcommon \ libxkbcommon-x11 \ libxrandr \ mesa \ meson \ pixman \ pulseaudio \ scons \ stack steam \ systemd \ wayland \ wayland-protocols \ yasm \ wlroots \ # 0.5.0-1 epiphany \ sakura \ cabal-install # yaourt -S elogind # optional dependency so we omit to avoid dealing with yaourt upgradeStack } ubuntuAltTabReset() { gsettings reset org.gnome.desktop.wm.keybindings switch-applications gsettings get org.gnome.desktop.wm.keybindings switch-applications gsettings reset org.gnome.desktop.wm.keybindings switch-applications-backward gsettings get org.gnome.desktop.wm.keybindings switch-applications-backward } ubuntuAltTabDisable() { gsettings set org.gnome.desktop.wm.keybindings switch-applications "['']" gsettings set org.gnome.desktop.wm.keybindings switch-applications-backward "['']" } disableUbuntuSuperKey() { gsettings set org.gnome.mutter overlay-key '' } # Warning: installs to system! compileWlroots() { cd ./submodules/wlroots meson build sudo ninja -C build sudo ninja -C build install cd - } # Requires a Godot launch to generate api.json compileGodot() { cd ./submodules/godot cd ./modules/gdwlroots make all cd ../.. scons platform=x11 target=debug -j 8 if [ -e ./bin/godot.x11.tools.64 ]; then ./bin/godot.x11.tools.64 --gdnative-generate-json-api api.json fi cd ../.. } compileGodotHaskell() { cd ./submodules/godot-haskell cd classgen if [ -e ../../godot/api.json ]; then stack build stack exec godot-haskell-classgen ../../godot/api.json cd .. cp -r src src.bak rsync -a classgen/src/ src/ fi cd ../.. } compileGodotHaskellPlugin() { cd ./addons/godot-haskell-plugin stack build cd - } switchToNix() { cd ./addons/godot-haskell-plugin rm libgodot-haskell-plugin.so ln -s ../../result/bin/libgodot-haskell-plugin.so libgodot-haskell-plugin.so cd - } switchToLocal() { cd ./addons/godot-haskell-plugin rm libgodot-haskell-plugin.so ln -s ./dist-newstyle/build/x86_64-linux/ghc-8.10.7/godot-haskell-plugin-0.1.0.0/f/godot-haskell-plugin/build/godot-haskell-plugin/libgodot-haskell-plugin.so libgodot-haskell-plugin.so cd - } checkInstallCachix() { if command -v cachix; then echo "cachix already installed.." else nix-env -iA cachix -f https://cachix.org/api/v1/install fi } checkInstallNix() { if command -v nix; then echo "nix already installed.." else curl -L https://nixos.org/nix/install | sh . $HOME/.nix-profile/etc/profile.d/nix.sh fi } checkIfNixOS() { if [ -e /etc/NIXOS ]; then echo "true"; else echo "false"; fi } installSimula() { checkInstallNix checkInstallCachix cachix use simula curl https://www.wolframcloud.com/obj/george.w.singer/installMessage if [ -z $1 ]; then NIXPKGS_ALLOW_UNFREE=1 nix-build -Q default.nix --arg onNixOS "$(checkIfNixOS)" --arg devBuild "false" switchToNix # Useful for debug purposes elif [ "$1" = "i" ]; then switchToNix NIXPKGS_ALLOW_UNFREE=1 nix-instantiate -Q -K default.nix --arg onNixOS "$(checkIfNixOS)" --arg devBuild "true" switchToLocal else switchToNix NIXPKGS_ALLOW_UNFREE=1 nix-build -Q -K default.nix --arg onNixOS "$(checkIfNixOS)" --arg devBuild "true" switchToLocal fi } updateSimula() { checkInstallNix checkInstallCachix cachix use simula if [ -z $1 ]; then git pull origin master git submodule update --recursive NIXPKGS_ALLOW_UNFREE=1 nix-build -Q default.nix --arg onNixOS "$(checkIfNixOS)" --arg devBuild "false" switchToNix else switchToNix git pull origin dev git submodule update --recursive NIXPKGS_ALLOW_UNFREE=1 nix-build -Q -K default.nix --arg onNixOS "$(checkIfNixOS)" --arg devBuild "false" switchToNix fi } swapXpraNixToLocal() { sudo rm ./result/bin/xpra sudo ln -s $(which xpra) ./result/bin/xpra } # Experimental nsBuild* functions allow Simula developers to locally build # Simula modules inside a nix-shell nsBuildGodot() { cd ./submodules/godot local runCmd="wayland-scanner server-header ./modules/gdwlroots/xdg-shell.xml ./modules/gdwlroots/xdg-shell-protocol.h; wayland-scanner private-code ./modules/gdwlroots/xdg-shell.xml ./modules/gdwlroots/xdg-shell-protocol.c; scons -Q -j8 platform=x11 target=debug" if [ -z $1 ]; then nix-shell --run "$runCmd" else nix-shell --run "while inotifywait -qqre modify .; do $runCmd; done" fi cd - } nsCleanGodot() { cd ./submodules/godot local runCmd="scons --clean" nix-shell --run "$runCmd" cd - } # Updates godot-haskell to latest api.json generated from devBuildGodot nsBuildGodotHaskell() { cd ./submodules/godot nix-shell -Q --run "LD_LIBRARY_PATH=./modules/gdleapmotionV2/LeapSDK/lib/x64 $(../../utils/GetNixGL.sh) ./bin/godot.x11.tools.64 --gdnative-generate-json-api ./bin/api.json" cd - cd ./submodules/godot-haskell-cabal if [ -z $1 ]; then nix-shell -Q --attr env release.nix --run "./updateApiJSON.sh" elif [ $1 == "--profile" ]; then nix-shell -Q --attr env --arg profileBuild true release.nix --run "./updateApiJSON.sh" fi cd - } nsBuildGodotHaskellPlugin() { cd ./addons/godot-haskell-plugin if [ -z $1 ]; then nix-shell -Q --attr env shell.nix --run "../../result/bin/cabal build" elif [ $1 == "--profile" ]; then nix-shell -Q --attr env shell.nix --arg profileBuild true --run "../../result/bin/cabal --enable-profiling build --ghc-options=\"-fprof-auto -rtsopts -fPIC -fexternal-dynamic-refs\"" else nix-shell --attr env shell.nix --run "while inotifywait -qqre modify .; do ../../result/bin/cabal build; done" fi cd - } nsREPLGodotHaskellPlugin() { cd ./addons/godot-haskell-plugin nix-shell --attr env shell.nix --run "cabal repl" } nsBuildSimulaLocal() { installSimula 1 ./result/bin/cabal update nsBuildWlroots nsBuildGodot patchGodotWlroots nsBuildGodotHaskell "$1" nsBuildGodotHaskellPlugin "$1" switchToLocal } nsBuildWlroots() { cd ./submodules/wlroots if [ -d "./build" ]; then nix-shell -Q --run "ninja -C build" else nix-shell -Q --run "meson build; ninja -C build" fi cd - } updateEmail() { if [ -e ./email ]; then # .. do nothing .. echo "" else ./result/bin/dialog --title "SimulaVR" --backtitle "OPTIONAL: Provide email for important Simula updates & improved bug troubleshooting" --inputbox "Email: " 8 60 --output-fd 1 > ./email 2>&1 ./result/bin/curl --data-urlencode emailStr@email https://www.wolframcloud.com/obj/george.w.singer/emailMessage clear fi } #patch our Godot executable to point to our local build of wlroots patchGodotWlroots(){ PATH_TO_SIMULA_WLROOTS="`pwd`/submodules/wlroots/build/" OLD_RPATH="`./result/bin/patchelf --print-rpath submodules/godot/bin/godot.x11.tools.64`" if [[ $OLD_RPATH != $PATH_TO_SIMULA_WLROOTS* ]]; then #check if the current rpath contains our local simula wlroots build. If not, patchelf to add our path to the start of the executable's rpath echo "Patching godot.x11.tools to point to local wlroots lib" echo "Changing path to: $PATH_TO_SIMULA_WLROOTS:$OLD_RPATH" ./result/bin/patchelf --set-rpath "$PATH_TO_SIMULA_WLROOTS:$OLD_RPATH" submodules/godot/bin/godot.x11.tools.64 else echo "Not patching godot.x11.tools, already patched." fi } zenRR() { nix-shell --arg onNixOS $(checkIfNixOS) --arg devBuild true --run "sudo python3 ./utils/zen_workaround.py" }
#ifndef NEO_HUD_CHILDELEMENT_H #define NEO_HUD_CHILDELEMENT_H #ifdef _WIN32 #pragma once #endif class CNeoHudElements; class C_NEO_Player; #define NEO_HUD_ELEMENT_FREQ_CVAR_NAME(Name) cl_neo_hud_ ## Name ## _update_freq #ifndef xstr #define xstr(a) str(a) #endif #ifndef str #define str(a) #a #endif #define NEO_HUD_ELEMENT_FREQ_CVAR_DESCRIPTION "How often to update this HUD element, in seconds. 0: always update, <0: never update." #define NEO_HUD_ELEMENT_FREQ_CVAR_MINMAX_PARMS true, -1.0, true, 1.0 #define NEO_HUD_ELEMENT_FREQ_CVAR_FLAGS (FCVAR_ARCHIVE | FCVAR_USERINFO) #define NEO_HUD_ELEMENT_DECLARE_FREQ_CVAR(HudElementId, DefaultUpdateFrequencyInSeconds) ConVar NEO_HUD_ELEMENT_FREQ_CVAR_NAME(HudElementId)(xstr(NEO_HUD_ELEMENT_FREQ_CVAR_NAME(HudElementId)), #DefaultUpdateFrequencyInSeconds, NEO_HUD_ELEMENT_FREQ_CVAR_FLAGS, NEO_HUD_ELEMENT_FREQ_CVAR_DESCRIPTION, NEO_HUD_ELEMENT_FREQ_CVAR_MINMAX_PARMS); \ ConVar* CNEOHud_ ## HudElementId::GetUpdateFrequencyConVar() const { return &NEO_HUD_ELEMENT_FREQ_CVAR_NAME(HudElementId); } extern ConVar neo_cl_hud_ammo_enabled; class CNEOHud_ChildElement { DECLARE_CLASS_NOBASE(CNEOHud_ChildElement) public: CNEOHud_ChildElement(); virtual ~CNEOHud_ChildElement() { } protected: virtual void DrawNeoHudRoundedBox(const int x0, const int y0, const int x1, const int y1) const final; virtual void UpdateStateForNeoHudElementDraw() = 0; virtual void DrawNeoHudElement() = 0; virtual ConVar* GetUpdateFrequencyConVar() const = 0; void PaintNeoElement() { if (!engine->IsDrawingLoadingImage()) { if (ShouldUpdateYet()) { UpdateStateForNeoHudElementDraw(); } DrawNeoHudElement(); } } bool ShouldUpdateYet() { const float frequency = GetUpdateFrequency(); if (frequency < 0) { return false; } else if (frequency > 0) { const float deltaTime = gpGlobals->curtime - m_flLastUpdateTime; if (deltaTime < frequency) { return false; } else { m_flLastUpdateTime = gpGlobals->curtime; return true; } } else { return true; } } CNeoHudElements* GetRootNeoHud() const { return m_pNeoHud; } private: float GetUpdateFrequency() const { return GetUpdateFrequencyConVar()->GetFloat(); } private: CNeoHudElements* m_pNeoHud; vgui::HTexture m_hTex_Rounded_NE, m_hTex_Rounded_NW, m_hTex_Rounded_SE, m_hTex_Rounded_SW; int m_rounded_width, m_rounded_height; float m_flLastUpdateTime; private: CNEOHud_ChildElement(CNEOHud_ChildElement& other); }; #endif // NEO_HUD_CHILDELEMENT_H
#!/bin/bash MODEL="LSTM_ICNet_v5" MODEL_VARIANT="end2end" ARCHTIECTURE="semantic_segmentation" BATCH_SIZE="1" DATASET_1="cityscapes_sequence_4_color_19" RESULT_DIR="results/2020_02_07b_LSTM_ICNet_v5_cityscape_sequence_4_color_19_batch1_60k" # evalate model singularity exec --nv -B /media/ singularity/ubuntu1804_tensorflow1.14_cuda10.simg python inference.py --architecture=$ARCHTIECTURE --model=$MODEL --dataset=$DATASET_1 --pretrained-model=$RESULT_DIR --evaluation_set=val --weather=all_train --model-variant=$MODEL_VARIANT
VGhlIHF1aWNrIGJyb3duIGZveCBqdW1wcyBvdmVyIHRoZSBsYXp5IGRvZw==
#!/bin/sh # shellcheck disable=SC2103 # shellcheck disable=SC2003 # shellcheck disable=SC2006 # This script builds a stand-alone binary for the command line version of # ttfautohint, downloading any necessary libraries. # # Version 2019-Aug-14. # The MIT License (MIT) # Copyright (c) 2017 Werner Lemberg # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # User configuration. # All of the configuration variables below can be overridden by environment # variables of the same name. # # The build directory. ${BUILD:="$HOME/ttfautohint-build"} ${INST:="$BUILD/local"} # Excepted build binary path ${TTFAUTOHINT_BIN:="$INST/bin/ttfautohint"} # The library versions. ${FREETYPE_VERSION:="2.10.2"} ${HARFBUZZ_VERSION:="2.7.2"} ${TTFAUTOHINT_VERSION:="1.8.3"} # Necessary patches (lists of at most 10 URLs each separated by whitespace, # to be applied in order). ${FREETYPE_PATCHES:=""} ${HARFBUZZ_PATCHES:=""} ${TTFAUTOHINT_PATCHES:=""} # # Nothing to configure below this comment. # # Ensure script fails if any command executed fails set -e FREETYPE="freetype-$FREETYPE_VERSION" HARFBUZZ="harfbuzz-$HARFBUZZ_VERSION" TTFAUTOHINT="ttfautohint-$TTFAUTOHINT_VERSION" if test -d "$BUILD" -o -f "$BUILD"; then echo "Build directory \`$BUILD' must not exist." exit 1 fi mkdir "$BUILD" mkdir "$INST" cd "$BUILD" # Our environment variables. TA_CPPFLAGS="-I$INST/include" TA_CFLAGS="-g -O2" TA_CXXFLAGS="-g -O2" TA_LDFLAGS="-L$INST/lib -L$INST/lib64" # Detect how many CPU cores are available CORE_COUNT=$( (nproc --all || sysctl -n hw.ncpu) 2>/dev/null || echo 1 ) # # FreeType # echo "#####" echo "$FREETYPE" echo "#####" echo "" echo "### Downloading archive..." url_base="https://download.savannah.gnu.org/releases/freetype" curl -L -O "$url_base/$FREETYPE.tar.gz" echo "" echo "### Extracting archive..." tar -xzvf "$FREETYPE.tar.gz" && rm -f "$FREETYPE.tar.gz" if test -n "$FREETYPE_PATCHES"; then echo "" echo "### Downloading patches..." count=0 for i in $FREETYPE_PATCHES do curl -o ft-patch-$count.diff "$i" count=`expr $count + 1` done echo "" echo "### Applying patches..." cd "$FREETYPE" for i in ../ft-patch-*.diff do test -f "$i" || continue patch -p1 -N -r - < "$i" rm -f "$i" done cd .. fi echo "" echo "### Building..." cd "$FREETYPE" # The space in `PKG_CONFIG' ensures that the created `freetype-config' file # doesn't find a working pkg-config, falling back to the stored strings # (which is what we want). ./configure \ --without-bzip2 \ --without-png \ --without-zlib \ --without-harfbuzz \ --prefix="$INST" \ --enable-static \ --disable-shared \ --enable-freetype-config \ PKG_CONFIG=" " \ CFLAGS="$TA_CPPFLAGS $TA_CFLAGS" \ CXXFLAGS="$TA_CPPFLAGS $TA_CXXFLAGS" \ LDFLAGS="$TA_LDFLAGS" make -j "$CORE_COUNT" echo "" echo "### Installing..." make install cd .. rm -rf "$FREETYPE" echo "" echo "### Successfully installed FreeType." echo "" # # HarfBuzz # echo "#####" echo "$HARFBUZZ" echo "#####" echo "" echo "### Downloading archive..." url_base="https://github.com/harfbuzz/harfbuzz/releases/download" curl -L -O "$url_base/$HARFBUZZ_VERSION/$HARFBUZZ.tar.xz" echo "" echo "### Extracting archive..." tar -xvf "$HARFBUZZ.tar.xz" && rm -f "$HARFBUZZ.tar.xz" if test -n "$HARFBUZZ_PATCHES"; then echo "" echo "### Downloading patches..." count=0 for i in $HARFBUZZ_PATCHES do curl -o hb-patch-$count.diff "$i" count=`expr $count + 1` done echo "" echo "### Applying patches..." cd "$HARFBUZZ" for i in ../hb-patch-*.diff do test -f "$i" || continue patch -p1 -N -r - < "$i" rm -f "$i" done cd .. fi echo "" echo "### Building..." cd "$HARFBUZZ" # Value `true' for `PKG_CONFIG' ensures that XXX_CFLAGS and XXX_LIBS # get actually used. ./configure \ --disable-dependency-tracking \ --disable-gtk-doc-html \ --with-glib=no \ --with-cairo=no \ --with-fontconfig=no \ --with-icu=no \ --prefix="$INST" \ --enable-static \ --disable-shared \ CFLAGS="$TA_CPPFLAGS $TA_CFLAGS" \ CXXFLAGS="$TA_CPPFLAGS $TA_CXXFLAGS" \ LDFLAGS="$TA_LDFLAGS" \ PKG_CONFIG=true \ FREETYPE_CFLAGS="$TA_CPPFLAGS/freetype2" \ FREETYPE_LIBS="$TA_LDFLAGS -lfreetype" make -j "$CORE_COUNT" echo "" echo "### Installing..." make install cd .. rm -rf "$HARFBUZZ" echo "" echo "### Successfully installed HarfBuzz." echo "" # # ttfautohint # echo "#####" echo "$TTFAUTOHINT" echo "#####" echo "" echo "### Downloading archive..." url_base="https://download.savannah.gnu.org/releases/freetype" curl -L -O "$url_base/$TTFAUTOHINT.tar.gz" echo "" echo "### Extracting archive..." tar -xzvf "$TTFAUTOHINT.tar.gz" && rm -f "$TTFAUTOHINT.tar.gz" if test -n "$TTFAUTOHINT_PATCHES"; then echo "" echo "### Downloading patches..." count=0 for i in $TTFAUTOHINT_PATCHES do curl -o ta-patch-$count.diff "$i" count=`expr $count + 1` done cd "$TTFAUTOHINT" for i in ../ta-patch-*.diff do test -f "$i" || continue patch -p1 -N -r - < "$i" rm -f "$i" done cd .. fi echo "" echo "### Building..." cd "$TTFAUTOHINT" # Value `true' for `PKG_CONFIG' ensures that XXX_CFLAGS and XXX_LIBS # get actually used. ./configure \ --disable-dependency-tracking \ --without-qt \ --without-doc \ --prefix="$INST" \ --enable-static \ --disable-shared \ --with-freetype-config="$INST/bin/freetype-config" \ CFLAGS="$TA_CPPFLAGS $TA_CFLAGS" \ CXXFLAGS="$TA_CPPFLAGS $TA_CXXFLAGS" \ LDFLAGS="$TA_LDFLAGS" \ PKG_CONFIG=true \ HARFBUZZ_CFLAGS="$TA_CPPFLAGS/harfbuzz" \ HARFBUZZ_LIBS="$TA_LDFLAGS -lharfbuzz" make -j "$CORE_COUNT" LDFLAGS="$TA_LDFLAGS -all-static" echo "" echo "### Installing..." make install-strip cd .. rm -rf "$TTFAUTOHINT" echo "" echo "### Successfully installed ttfautohint." echo "" # # test for the expected path to the executable # if [ -f "$INST/bin/ttfautohint" ]; then echo "#####" echo "binary: $TTFAUTOHINT_BIN" echo "#####" else echo "ttfautohint executable was not found on the path $TTFAUTOHINT_BIN" 1>&2 exit 1 fi # eof
<gh_stars>10-100 package org.multibit.hd.ui.views.wizards.credentials; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import net.miginfocom.swing.MigLayout; import org.multibit.hd.ui.languages.MessageKey; import org.multibit.hd.ui.views.components.AbstractHardwareWalletComponentView; import org.multibit.hd.ui.views.components.Panels; import org.multibit.hd.ui.views.components.panels.PanelDecorator; import org.multibit.hd.ui.views.fonts.AwesomeIcon; import org.multibit.hd.ui.views.wizards.AbstractHardwareWalletWizard; import org.multibit.hd.ui.views.wizards.AbstractHardwareWalletWizardPanelView; import org.multibit.hd.ui.views.wizards.AbstractWizard; import javax.swing.*; /** * <p>Wizard to provide the following to UI:</p> * <ul> * <li>Ask the user to press "confirm" on their Trezor in response to an Encrypt message</li> * </ul> * * @since 0.0.1 *   */ public class CredentialsConfirmCipherKeyPanelView extends AbstractHardwareWalletWizardPanelView<CredentialsWizardModel, CredentialsConfirmCipherKeyPanelModel> { /** * @param wizard The wizard managing the states * @param panelName The panel name to filter events from components */ public CredentialsConfirmCipherKeyPanelView(AbstractHardwareWalletWizard<CredentialsWizardModel> wizard, String panelName) { super(wizard, panelName, AwesomeIcon.SHIELD, MessageKey.HARDWARE_PRESS_CONFIRM_TITLE); } @Override public void newPanelModel() { // Bind it to the wizard model in case of failure getWizardModel().setConfirmCipherKeyPanelView(this); } @Override public void initialiseContent(JPanel contentPanel) { contentPanel.setLayout( new MigLayout( Panels.migXYLayout(), "[]", // Column constraints "[]10[]" // Row constraints )); addCurrentHardwareDisplay(contentPanel); } @Override protected void initialiseButtons(AbstractWizard<CredentialsWizardModel> wizard) { PanelDecorator.addExitCancelUnlock(this, wizard); } @Override public void afterShow() { // Set the confirm text hardwareDisplayMaV.getView().setOperationText(MessageKey.HARDWARE_UNLOCK_OPERATION, getWizardModel().getWalletMode().brand()); // Show unlock message switch (getWizardModel().getWalletMode()) { case TREZOR: hardwareDisplayMaV.getView().setDisplayText(MessageKey.TREZOR_ENCRYPT_MULTIBIT_HD_UNLOCK_DISPLAY); break; case KEEP_KEY: hardwareDisplayMaV.getView().setDisplayText(MessageKey.KEEP_KEY_ENCRYPT_MULTIBIT_HD_UNLOCK_DISPLAY); break; default: throw new IllegalStateException("Unknown hardware wallet: " + getWizardModel().getWalletMode().name()); } // Reassure users that this is an unlock screen but rely on the Trezor buttons to do it getFinishButton().setEnabled(false); } @Override public boolean beforeHide(boolean isExitCancel) { // Don't block an exit if (isExitCancel) { return true; } // Defer the hide operation return false; } @Override public void updateFromComponentModels(Optional componentModel) { // No need to update the wizard it has the references } /** * @return The Trezor display view to avoid method duplication */ public AbstractHardwareWalletComponentView getHardwareDisplayView() { return hardwareDisplayMaV.getView(); } public void enableForFailedUnlock() { Preconditions.checkState(SwingUtilities.isEventDispatchThread(), "Must be on EDT"); getFinishButton().setEnabled(false); getExitButton().setEnabled(true); hardwareDisplayMaV.getView().setSpinnerVisible(false); } public void incorrectEntropy() { hardwareDisplayMaV.getView().incorrectEntropy(); } }
We can use transfer learning to scale the existing machine learning model for prediction on new types of data. The idea behind transfer learning is to leverage the knowledge gained from the existing model to build a model that can better predict on new data. To do this, we will start by fine-tuning the hyperparameters of the existing model using a grid search. We will use the results of the grid search to select the best set of hyperparameters for the model. Next, we will use the updated hyperparameters to retrain the model on a larger training set. The larger training set should contain more diverse types of data, which will help the model generalize better across the different types of data it must predict on. Finally, after we have retrained the model, we can use it for prediction on the new unseeen data. We can then evaluate its performance and adjust the hyperparameters accordingly as needed.
#!/bin/bash -e org=vaadin repo=vaadin-charts branch=master port=5199 # make folder (same as input, no checking!) mkdir ${repo} git clone https://github.com/$org/$repo.git --branch ${branch} --single-branch # switch to deploy branch pushd ${repo} >/dev/null git checkout --orphan deploy # remove all content git rm -rf -q . # user bower to install runtime deployment npm i -g bower bower cache clean $repo # make sure to clean cache before installing git show ${branch}:bower.json > bower.json echo "{ \"directory\": \"components\" } " > .bowerrc bower install bower install $org/$repo#$branch git checkout ${branch} -- demo rm -rf components/$repo/demo mv demo components/$repo/ # redirect to component folder echo "<META http-equiv="refresh" content=\"0;URL=components/$repo/\">" > index.html # generate the war file zip -r $repo.war . # copy and move war file to server scp -o StrictHostKeyChecking=no -P $port *.war dev@app.fi:$repo.war ssh -o StrictHostKeyChecking=no -p $port dev@app.fi mv $repo.war tomcat/webapps/ROOT.war # cleanup popd >/dev/null rm -rf $repo
# Created on 2018/12 # Author: <NAME> import os import time import numpy as np import torch from torch.utils.tensorboard import SummaryWriter class Solver(object): def __init__(self, data, model, optimizer, epochs, save_folder, checkpoint, continue_from, model_path, print_freq, early_stop, max_norm, lr, lr_override, log_dir, lamb, decay_period, config, multidecoder, decay): self.tr_loader = data['tr_loader'] self.cv_loader = data['cv_loader'] self.model = model self.optimizer = optimizer self.lr_override = lr_override # Training config self.epochs = epochs self.early_stop = early_stop self.max_norm = max_norm self.lamb = lamb self.decay_period = decay_period self.decay = decay self.multidecoder = multidecoder if multidecoder: from loss_multidecoder import cal_loss else: from loss_hungarian import cal_loss self.loss_func = cal_loss # save and load model self.save_folder = save_folder self.checkpoint = checkpoint self.continue_from = continue_from self.model_path = model_path self.config = config # logging self.print_freq = print_freq # visualizing loss using visdom self.tr_loss = torch.Tensor(self.epochs) self.cv_loss = torch.Tensor(self.epochs) self._reset() self.writer = SummaryWriter(log_dir) def _reset(self): # Reset load = self.continue_from and os.path.exists(self.continue_from) self.start_epoch = 0 self.val_no_impv = 0 self.prev_val_loss = float("inf") self.best_val_loss = float("inf") if load: # if the checkpoint model exists print('Loading checkpoint model %s' % self.continue_from) package = torch.load(self.continue_from) self.model.module.load_state_dict(package['state_dict']) if not self.lr_override: self.optimizer.load_state_dict(package['optim_dict']) print('load lr at %s' % str(self.optimizer.state_dict()['param_groups'])) else: print('lr override to %s' % str(self.optimizer.state_dict()['param_groups'])) self.start_epoch = int(package.get('epoch', 1)) self.tr_loss[:self.start_epoch] = package['tr_loss'][:self.start_epoch] self.cv_loss[:self.start_epoch] = package['cv_loss'][:self.start_epoch] self.val_no_impv = package.get('val_no_impv', 0) if 'random_state' in package: torch.set_rng_state(package['random_state']) self.prev_val_loss = self.cv_loss[self.start_epoch - 1] self.best_val_loss = min(self.cv_loss[:self.start_epoch]) # Create save folder os.makedirs(self.save_folder, exist_ok=True) self.halving = False def train(self): # Train model multi-epoches for epoch in range(self.start_epoch, self.epochs): if epoch % self.decay_period == (self.decay_period - 1): optim_state = self.optimizer.state_dict() for param_group in optim_state['param_groups']: param_group['lr'] = param_group['lr'] * self.decay self.optimizer.load_state_dict(optim_state) print('Learning rate adjusted to: %s' % str(optim_state['param_groups'])) self.writer.add_scalar('LR/lr', self.optimizer.state_dict()["param_groups"][0]["lr"], epoch) # Train one epoch print("Training...") self.model.train() # Turn on BatchNorm & Dropout start = time.time() tr_avg_loss, tr_avg_snr, tr_avg_acc = self._run_one_epoch(epoch) print('-' * 85) print('Train Summary | End of Epoch {0} | Time {1:.2f}s | ' 'Train Loss {2:.3f}'.format( epoch + 1, time.time() - start, tr_avg_loss)) print('-' * 85) # Cross validation print('Cross validation...') self.model.eval() # Turn off Batchnorm & Dropout val_loss, val_snr, val_acc = self._run_one_epoch(epoch, cross_valid=True) print('-' * 85) print('Valid Summary | End of Epoch {0} | Time {1:.2f}s | ' 'Valid Loss {2:.3f}'.format( epoch + 1, time.time() - start, val_loss)) print('-' * 85) self.writer.add_scalar('Loss/per_epoch_cv', val_loss, epoch) self.writer.add_scalar('SNR/per_epoch_cv', val_snr.mean(), epoch) self.writer.add_scalar('Accuracy/per_epoch_cv', val_acc, epoch) self.writer.add_scalar('snr2/per_epoch_cv', val_snr[0], epoch) self.writer.add_scalar('snr3/per_epoch_cv', val_snr[1], epoch) self.writer.add_scalar('snr4/per_epoch_cv', val_snr[2], epoch) self.writer.add_scalar('snr5/per_epoch_cv', val_snr[3], epoch) # Adjust learning rate (halving) if val_loss >= self.prev_val_loss: self.val_no_impv += 1 if self.val_no_impv >= 10 and self.early_stop: print("No improvement for 10 epochs, early stopping.") break else: self.val_no_impv = 0 self.prev_val_loss = val_loss # Save the best model self.tr_loss[epoch] = tr_avg_loss self.cv_loss[epoch] = val_loss package = self.model.module.serialize(self.model.module, self.optimizer, epoch + 1, tr_loss=self.tr_loss, cv_loss=self.cv_loss, val_no_impv = self.val_no_impv, random_state=torch.get_rng_state()) if val_loss < self.best_val_loss: self.best_val_loss = val_loss file_path = os.path.join(self.save_folder, self.model_path) torch.save(package, file_path) print("Find better validated model, saving to %s" % file_path) # Save model each epoch, nd make a copy at last.pth if self.checkpoint: file_path = os.path.join( self.save_folder, 'epoch%d.pth.tar' % (epoch + 1)) torch.save(package, file_path) print('Saving checkpoint model to %s' % file_path) # update config#.pth torch.save(package, os.path.join(self.save_folder, self.config + '.pth')) def _run_one_epoch(self, epoch, cross_valid=False): start = time.time() total_loss = 0 total_snr = np.zeros(4) total_accuracy = 0 data_loader = self.tr_loader if not cross_valid else self.cv_loader current_device = next(self.model.module.parameters()).device counts = np.zeros(4) for i, (padded_mixture, mixture_lengths, padded_source) in enumerate(data_loader): for tmp_ps in padded_source: counts[tmp_ps.size(0) - 2] += 1 B = len(padded_source) padded_mixture = padded_mixture.cuda(current_device) padded_source = [tmp_ps.cuda(current_device) for tmp_ps in padded_source] num_sources = torch.Tensor([tmps_ps.size(0) for tmps_ps in padded_source]).long() try: if not cross_valid: estimate_source_list, vad_list = self.model(padded_mixture, num_sources, True) else: with torch.no_grad(): estimate_source_list, vad_list = self.model(padded_mixture, num_sources, True) except Exception as e: print('forward prop failed', padded_mixture.shape, e) continue if not self.multidecoder: # [#stages, B, ...] estimate_source_list = estimate_source_list.transpose(0, 1) vad_list = vad_list.transpose(0, 1) loss = [] snr = [] accuracy = [] for (estimate_source, vad) in zip(estimate_source_list, vad_list): step_loss, step_snr, acc = \ self.loss_func(padded_source, estimate_source, mixture_lengths, vad, lamb=self.lamb) loss.append(step_loss) snr.append(step_snr) accuracy.append(acc) loss = torch.stack(loss) snr = torch.stack(snr) accuracy = torch.stack(accuracy) else: # if using multidecoder # list of B, each [num_stages, spks, T] estimate_sources = [estimate_source_list[k, :, :num_sources[k], :] for k in range(B)] loss = [] snr = [] accuracy = [] for idx in range(B): # list of [num_stages, spks, T] # [num_stages, num_decoders] vad = vad_list[idx] step_loss, step_snr, acc = \ self.loss_func(padded_source[idx], estimate_sources[idx], mixture_lengths[idx], vad, self.lamb) # [num_stages] loss.append(step_loss) snr.append(step_snr) accuracy.append(acc) total_snr[num_sources[idx] - 2] += step_snr[-1].item() loss = torch.stack(loss, dim=0).mean(dim=0) snr = torch.stack(snr, dim=0).mean(dim=0) accuracy = torch.stack(accuracy, dim=0).mean(dim=0) if not cross_valid: # training loss = loss.mean() snr = snr.mean() accuracy = accuracy.mean() else: loss = loss[-1] snr = snr[-1] accuracy = accuracy[-1] try: if not cross_valid: self.optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.max_norm) self.optimizer.step() except Exception as e: print('backprop failed', padded_mixture.shape, e) continue total_loss += loss.item() total_accuracy += accuracy.item() if i % self.print_freq == 0: print(f'Epoch {epoch + 1} | Iter {i + 1} | Average Loss {total_loss / (i + 1): .2f} | ' f'Current Loss {loss.item(): .2f} | Average SNR {str(total_snr / counts)} | ' f'Average accuracy {total_accuracy / (i + 1):.2f} | {1000 * (time.time() - start) / (i + 1):.2f} ms/batch', flush=True) mode = 'cv' if cross_valid else 'train' self.writer.add_scalar(f'Loss/{mode}', loss.item(), epoch*len(data_loader)+i) self.writer.add_scalar(f'SNR/{mode}', snr.item(), epoch*len(data_loader)+i) self.writer.add_scalar(f'Accuracy/{mode}', accuracy.item(), epoch*len(data_loader)+i) self.writer.add_scalar(f'snr2/{mode}', total_snr[0] / counts[0], epoch*len(data_loader)+i) self.writer.add_scalar(f'snr3/{mode}', total_snr[1] / counts[1], epoch*len(data_loader)+i) self.writer.add_scalar(f'snr4/{mode}', total_snr[2] / counts[2], epoch*len(data_loader)+i) self.writer.add_scalar(f'snr5/{mode}', total_snr[3] / counts[3], epoch*len(data_loader)+i) if i <= 20: self.writer.add_audio(f"Speech/{i}_original {mode}", padded_mixture[0], epoch, sample_rate=8000) output_example = estimate_sources[0][-1] for channel, example in enumerate(output_example): self.writer.add_audio(f"Speech/{i}_reconstructed {mode} {channel}", example / (example.max() - example.min()), epoch, sample_rate=8000) self.writer.add_text(f'counts/{mode}', str(counts), global_step=epoch) return total_loss / (i + 1), total_snr / counts, total_accuracy / (i + 1)
package ajson import ( "io" "strings" "testing" ) func TestBuffer_Token(t *testing.T) { tests := []struct { name string value string index int fail bool }{ {name: "simple", value: "@.length", index: 8, fail: false}, {name: "combined", value: "@['foo'].0.bar", index: 14, fail: false}, {name: "formula", value: "@['foo'].[(@.length - 1)].*", index: 27, fail: false}, {name: "filter", value: "@['foo'].[?(@.bar == 1 && @.baz < @.length)].*", index: 46, fail: false}, {name: "string", value: `@['foo)(]]"[[[.[?(@.bar \' == 1 && < @.length)'].*`, index: 50, fail: false}, {name: "part 1", value: "@.foo+@.bar", index: 5, fail: false}, {name: "part 2", value: "@.foo && @.bar", index: 5, fail: false}, {name: "part 3", value: "@.foo,3", index: 5, fail: false}, {name: "part 4", value: "@.length-1", index: 8, fail: false}, {name: "number 1", value: "1", index: 1, fail: false}, {name: "number 2", value: "1.3e2", index: 5, fail: false}, {name: "number 3", value: "-1.3e2", index: 6, fail: false}, {name: "number 4", value: "-1.3e-2", index: 7, fail: false}, {name: "string 1", value: "'1'", index: 3, fail: false}, {name: "string 2", value: "'foo \\'bar '", index: 12, fail: false}, {name: "string 3", value: `"foo \"bar "`, index: 12, fail: false}, {name: "fail 1", value: "@.foo[", fail: true}, {name: "fail 2", value: "@.foo[(]", fail: true}, {name: "fail 3", value: "'", fail: true}, {name: "fail 4", value: "'x", fail: true}, {name: "parentheses 0", value: "()", index: 2, fail: false}, {name: "parentheses 1", value: "(@)", index: 3, fail: false}, {name: "parentheses 2", value: "(", fail: true}, {name: "parentheses 3", value: ")", fail: true}, {name: "parentheses 4", value: "(x", fail: true}, {name: "parentheses 5", value: "((())", fail: true}, {name: "parentheses 6", value: "@)", index: 1, fail: false}, {name: "parentheses 7", value: "[)", fail: true}, {name: "parentheses 8", value: "[())", fail: true}, {name: "bracket 0", value: "[]", index: 2, fail: false}, {name: "bracket 1", value: "[@]", index: 3, fail: false}, {name: "bracket 2", value: "[", fail: true}, {name: "bracket 3", value: "]", fail: true}, {name: "bracket 4", value: "[x", fail: true}, {name: "bracket 5", value: "[[[]]", fail: true}, {name: "bracket 6", value: "@]", index: 1, fail: false}, {name: "bracket 7", value: "(]", fail: true}, {name: "bracket 8", value: "([]]", fail: true}, {name: "sign 1", value: "+X", index: 1}, {name: "sign 2", value: "-X", index: 1}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { buf := newBuffer([]byte(test.value)) err := buf.token() if !test.fail && err != nil && err != io.EOF { t.Errorf("Unexpected error: %s", err.Error()) } else if test.fail && (err == nil || err == io.EOF) { t.Errorf("Expected error, got nothing") } else if !test.fail && test.index != buf.index { t.Errorf("Wrong index: expected %d, got %d", test.index, buf.index) } }) } } func TestBuffer_RPN(t *testing.T) { tests := []struct { name string value string expected []string }{ {name: "example_1", value: "@.length", expected: []string{"@.length"}}, {name: "example_2", value: "1 + 2", expected: []string{"1", "2", "+"}}, {name: "example_3", value: "3 + 4 * 2 / (1 - 5)**2", expected: []string{"3", "4", "2", "*", "1", "5", "-", "2", "**", "/", "+"}}, {name: "example_4", value: "'foo' == pi", expected: []string{"'foo'", "pi", "=="}}, {name: "example_5", value: "pi != 'bar'", expected: []string{"pi", "'bar'", "!="}}, {name: "example_6", value: "3 + 4 * -2 / (-1 - 5)**-2", expected: []string{"3", "4", "-2", "*", "-1", "5", "-", "-2", "**", "/", "+"}}, {name: "example_7", value: "1.3e2 + sin(2*pi/3)", expected: []string{"1.3e2", "2", "pi", "*", "3", "/", "sin", "+"}}, {name: "example_8", value: "@.length-1", expected: []string{"@.length", "1", "-"}}, {name: "example_9", value: "@.length+-1", expected: []string{"@.length", "-1", "+"}}, {name: "example_10", value: "@.length/e", expected: []string{"@.length", "e", "/"}}, {name: "example_12", value: "123.456", expected: []string{"123.456"}}, {name: "example_13", value: " 123.456 ", expected: []string{"123.456"}}, {name: "1 /", value: "1 /", expected: []string{"1", "/"}}, {name: "1 + ", value: "1 + ", expected: []string{"1", "+"}}, {name: "1 -", value: "1 -", expected: []string{"1", "-"}}, {name: "1 * ", value: "1 * ", expected: []string{"1", "*"}}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { buf := newBuffer([]byte(test.value)) result, err := buf.rpn() if err != nil { t.Errorf("Unexpected error: %s", err.Error()) } else if !sliceEqual(test.expected, result) { t.Errorf("Error on RPN(%s): result doesn't match\nExpected: %s\nActual: %s", test.value, sliceString(test.expected), sliceString(result)) } }) } } func TestBuffer_RPNError(t *testing.T) { tests := []struct { value string }{ {value: "1 + / 1"}, {value: "1 * / 1"}, {value: "1 - / 1"}, {value: "1 / / 1"}, {value: "1 + * 1"}, {value: "1 * * 1"}, {value: "1 - * 1"}, {value: "1 / * 1"}, {value: "1e1.1 + 1"}, {value: "len('string)"}, {value: "'Hello ' + 'World"}, {value: "@.length + $['length')"}, {value: "2 + 2)"}, {value: "(2 + 2"}, {value: "e + q"}, {value: "foo(e)"}, {value: "++2"}, {value: ""}, } for _, test := range tests { t.Run(test.value, func(t *testing.T) { buf := newBuffer([]byte(test.value)) result, err := buf.rpn() if err == nil { t.Errorf("Expected error, nil given, with result: %v", strings.Join(result, ", ")) } }) } } func TestTokenize(t *testing.T) { tests := []struct { name string value string expected []string fail bool }{ {name: "example_1", value: "@.length", expected: []string{"@.length"}}, {name: "example_2", value: "1 + 2", expected: []string{"1", "+", "2"}}, {name: "example_3", value: "1+2", expected: []string{"1", "+", "2"}}, {name: "example_4", value: "1:", expected: []string{"1", ":"}}, {name: "example_5", value: ":2 :1", expected: []string{":", "2", ":", "1"}}, {name: "example_6", value: "1 ,2,'foo'", expected: []string{"1", ",", "2", ",", "'foo'"}}, {name: "example_7", value: "(@.length-1)", expected: []string{"(", "@.length", "-", "1", ")"}}, {name: "example_8", value: "?(@.length-1)", expected: []string{"?", "(", "@.length", "-", "1", ")"}}, {name: "example_9", value: "'foo'", expected: []string{"'foo'"}}, {name: "example_10", value: "$.foo[(@.length - 3):3:]", expected: []string{"$.foo[(@.length - 3):3:]"}}, {name: "example_11", value: "$..", expected: []string{"$.."}}, {name: "blank", value: "", expected: []string{}}, {name: "number", value: "1e", fail: true}, {name: "string", value: "'foo", fail: true}, {name: "fail", value: "@.[", fail: true}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { result, err := tokenize(test.value) if test.fail { if err == nil { t.Error("Expected error: nil given") } } else if err != nil { t.Errorf("Unexpected error: %s", err.Error()) } else if !sliceEqual(test.expected, result) { t.Errorf("Error on RPN(%s): result doesn't match\nExpected: %s\nActual: %s", test.value, sliceString(test.expected), sliceString(result)) } }) } } func TestBuffer_Current(t *testing.T) { buf := newBuffer([]byte{}) _, err := buf.current() if err != io.EOF { t.Error("Unexpected result: io.EOF expected") } } func TestBuffer_Numeric(t *testing.T) { tests := []struct { value string index int fail bool }{ {value: "1", index: 1, fail: false}, {value: "0", index: 1, fail: false}, {value: "1.3e2", index: 5, fail: false}, {value: "-1.3e2", index: 6, fail: false}, {value: "-1.3e-2", index: 7, fail: false}, {value: "..3", index: 0, fail: true}, {value: "e.", index: 0, fail: true}, {value: ".e.", index: 0, fail: true}, {value: "1.e1", index: 0, fail: true}, {value: "0.e0", index: 0, fail: true}, {value: "0+0", index: 1, fail: false}, {value: "0-1", index: 1, fail: false}, {value: "++1", index: 0, fail: true}, {value: "--1", index: 0, fail: true}, {value: "-+1", index: 0, fail: true}, {value: "+-1", index: 0, fail: true}, {value: "+", index: 0, fail: true}, {value: "-", index: 0, fail: true}, {value: ".", index: 0, fail: true}, {value: "e", index: 0, fail: true}, {value: "+a", index: 0, fail: true}, } for _, test := range tests { t.Run(test.value, func(t *testing.T) { buf := newBuffer([]byte(test.value)) err := buf.numeric(true) if !test.fail && err != nil && err != io.EOF { t.Errorf("Unexpected error: %s", err.Error()) } else if test.fail && (err == nil || err == io.EOF) { t.Errorf("Expected error, got nothing") } else if !test.fail && test.index != buf.index { t.Errorf("Wrong index: expected %d, got %d", test.index, buf.index) } }) } }
<filename>MBFaker Demo/MBFaker Demo/MBViewController.h // // MBViewController.h // MBFaker Demo // // Created by <NAME> on 11/9/12. // Copyright (c) 2012 <NAME>. All rights reserved. // #import <UIKit/UIKit.h> @interface MBViewController : UIViewController <UITableViewDataSource, UITableViewDelegate> @property (strong, nonatomic) IBOutlet UITableView *tableView; @property (strong, nonatomic) IBOutlet UIBarButtonItem *buttonAdd; - (IBAction)buttonAdd:(id)sender; @end
cat kir_setwan.sql | sed -e s/setwan/setda/g | sed -e s/"id_skpd = 1"/"id_skpd = 2"/g > kir_setda.sql cat kir_setwan.sql | sed -e s/setwan/dpupr/g | sed -e s/"id_skpd = 1"/"id_skpd = 3"/g > kir_dpupr.sql cat kir_setwan.sql | sed -e s/setwan/dishub/g | sed -e s/"id_skpd = 1"/"id_skpd = 4"/g > kir_dishub.sql cat kir_setwan.sql | sed -e s/setwan/dinkes/g | sed -e s/"id_skpd = 1"/"id_skpd = 5"/g > kir_dinkes.sql cat kir_setwan.sql | sed -e s/setwan/rsud/g | sed -e s/"id_skpd = 1"/"id_skpd = 6"/g > kir_rsud.sql cat kir_setwan.sql | sed -e s/setwan/disdik/g | sed -e s/"id_skpd = 1"/"id_skpd = 7"/g > kir_disdik.sql cat kir_setwan.sql | sed -e s/setwan/perpustakaan/g | sed -e s/"id_skpd = 1"/"id_skpd = 8"/g > kir_perpustakaan.sql cat kir_setwan.sql | sed -e s/setwan/sosial/g | sed -e s/"id_skpd = 1"/"id_skpd = 9"/g > kir_sosial.sql cat kir_setwan.sql | sed -e s/setwan/dpmd/g | sed -e s/"id_skpd = 1"/"id_skpd = 10"/g > kir_dpmd.sql cat kir_setwan.sql | sed -e s/setwan/dpppa/g | sed -e s/"id_skpd = 1"/"id_skpd = 11"/g > kir_dpppa.sql cat kir_setwan.sql | sed -e s/setwan/dukcatpil/g | sed -e s/"id_skpd = 1"/"id_skpd = 12"/g > kir_dukcatpil.sql cat kir_setwan.sql | sed -e s/setwan/pertanian/g | sed -e s/"id_skpd = 1"/"id_skpd = 13"/g > kir_pertanian.sql cat kir_setwan.sql | sed -e s/setwan/kehutanan/g | sed -e s/"id_skpd = 1"/"id_skpd = 14"/g > kir_kehutanan.sql cat kir_setwan.sql | sed -e s/setwan/dkp/g | sed -e s/"id_skpd = 1"/"id_skpd = 15"/g > kir_dkp.sql cat kir_setwan.sql | sed -e s/setwan/dkukmp/g | sed -e s/"id_skpd = 1"/"id_skpd = 16"/g > kir_dkukmp.sql cat kir_setwan.sql | sed -e s/setwan/distamben/g | sed -e s/"id_skpd = 1"/"id_skpd = 17"/g > kir_distamben.sql cat kir_setwan.sql | sed -e s/setwan/dpmptsp/g | sed -e s/"id_skpd = 1"/"id_skpd = 18"/g > kir_dpmptsp.sql cat kir_setwan.sql | sed -e s/setwan/bkd/g | sed -e s/"id_skpd = 1"/"id_skpd = 19"/g > kir_bkd.sql cat kir_setwan.sql | sed -e s/setwan/inspektorat/g | sed -e s/"id_skpd = 1"/"id_skpd = 20"/g > kir_inspektorat.sql cat kir_setwan.sql | sed -e s/setwan/bappeda/g | sed -e s/"id_skpd = 1"/"id_skpd = 21"/g > kir_bappeda.sql cat kir_setwan.sql | sed -e s/setwan/dlh/g | sed -e s/"id_skpd = 1"/"id_skpd = 22"/g > kir_dlh.sql cat kir_setwan.sql | sed -e s/setwan/dko/g | sed -e s/"id_skpd = 1"/"id_skpd = 23"/g > kir_dko.sql cat kir_setwan.sql | sed -e s/setwan/kesbangpol/g | sed -e s/"id_skpd = 1"/"id_skpd = 24"/g > kir_kesbangpol.sql cat kir_setwan.sql | sed -e s/setwan/satpolpp/g | sed -e s/"id_skpd = 1"/"id_skpd = 25"/g > kir_satpolpp.sql cat kir_setwan.sql | sed -e s/setwan/bkppd/g | sed -e s/"id_skpd = 1"/"id_skpd = 26"/g > kir_bkppd.sql cat kir_setwan.sql | sed -e s/setwan/korpri/g | sed -e s/"id_skpd = 1"/"id_skpd = 27"/g > kir_korpri.sql cat kir_setwan.sql | sed -e s/setwan/paringin/g | sed -e s/"id_skpd = 1"/"id_skpd = 28"/g > kir_paringin.sql cat kir_setwan.sql | sed -e s/setwan/paringinkota/g | sed -e s/"id_skpd = 1"/"id_skpd = 29"/g > kir_paringinkota.sql cat kir_setwan.sql | sed -e s/setwan/paringintimur/g | sed -e s/"id_skpd = 1"/"id_skpd = 30"/g > kir_paringintimur.sql cat kir_setwan.sql | sed -e s/setwan/lampihong/g | sed -e s/"id_skpd = 1"/"id_skpd = 31"/g > kir_lampihong.sql cat kir_setwan.sql | sed -e s/setwan/batumandi/g | sed -e s/"id_skpd = 1"/"id_skpd = 32"/g > kir_batumandi.sql cat kir_setwan.sql | sed -e s/setwan/juai/g | sed -e s/"id_skpd = 1"/"id_skpd = 33"/g > kir_juai.sql cat kir_setwan.sql | sed -e s/setwan/awayan/g | sed -e s/"id_skpd = 1"/"id_skpd = 34"/g > kir_awayan.sql cat kir_setwan.sql | sed -e s/setwan/halong/g | sed -e s/"id_skpd = 1"/"id_skpd = 35"/g > kir_halong.sql cat kir_setwan.sql | sed -e s/setwan/paringinselatan/g | sed -e s/"id_skpd = 1"/"id_skpd = 36"/g > kir_paringinselatan.sql cat kir_setwan.sql | sed -e s/setwan/batupiring/g | sed -e s/"id_skpd = 1"/"id_skpd = 37"/g > kir_batupiring.sql cat kir_setwan.sql | sed -e s/setwan/tebingtinggi/g | sed -e s/"id_skpd = 1"/"id_skpd = 38"/g > kir_tebingtinggi.sql cat kir_setwan.sql | sed -e s/setwan/bpbd/g | sed -e s/"id_skpd = 1"/"id_skpd = 39"/g > kir_bpbd.sql cat kir_setwan.sql | sed -e s/setwan/dpkp/g | sed -e s/"id_skpd = 1"/"id_skpd = 40"/g > kir_dpkp.sql cat kir_setwan.sql | sed -e s/setwan/disnakertrans/g | sed -e s/"id_skpd = 1"/"id_skpd = 41"/g > kir_disnakertrans.sql cat kir_setwan.sql | sed -e s/setwan/dppkb/g | sed -e s/"id_skpd = 1"/"id_skpd = 42"/g > kir_dppkb.sql cat kir_setwan.sql | sed -e s/setwan/kominfo/g | sed -e s/"id_skpd = 1"/"id_skpd = 43"/g > kir_kominfo.sql cat kir_setwan.sql | sed -e s/setwan/kearsipan/g | sed -e s/"id_skpd = 1"/"id_skpd = 44"/g > kir_kearsipan.sql cat kir_setwan.sql | sed -e s/setwan/perikanan/g | sed -e s/"id_skpd = 1"/"id_skpd = 45"/g > kir_perikanan.sql cat kir_setwan.sql | sed -e s/setwan/pariwisata/g | sed -e s/"id_skpd = 1"/"id_skpd = 46"/g > kir_pariwisata.sql cat kir_setwan.sql | sed -e s/setwan/perdagangan/g | sed -e s/"id_skpd = 1"/"id_skpd = 47"/g > kir_perdagangan.sql cat kir_setwan.sql | sed -e s/setwan/bppd/g | sed -e s/"id_skpd = 1"/"id_skpd = 48"/g > kir_bppd.sql cat kir_*.sql > ../gabungan_kir.sql
<filename>neat/parallel.py """ Runs evaluation functions in parallel subprocesses in order to evaluate multiple genomes at once. """ from multiprocessing import Pool class ParallelEvaluator(object): def __init__(self, num_workers, eval_function, timeout=None): """ eval_function should take one argument, a tuple of (genome object, config object), and return a single float (the genome's fitness). """ self.num_workers = num_workers self.eval_function = eval_function self.timeout = timeout self.pool = Pool(num_workers) def __del__(self): self.pool.close() # should this be terminate? self.pool.join() def evaluate(self, genomes, config): jobs = [] for ignored_genome_id, genome in genomes: jobs.append(self.pool.apply_async(self.eval_function, (genome, config))) # assign the fitness back to each genome for job, (ignored_genome_id, genome) in zip(jobs, genomes): genome.fitness, genome.alternative_metrics = job.get(timeout=self.timeout)
<reponame>mason-fish/brim import log from "electron-log" import {ipcMain, Menu} from "electron" global.process.on("spectron:mock", (name, value) => { log.info("Spectron is mocking", name, "with:", value) ipcMain.removeHandler(name) ipcMain.handle(name, () => value) }) global.process.on("spectron:clickAppMenuItem", (id) => { log.info("Specton is clicking app menu item id: ", id) const menu = Menu.getApplicationMenu() const item = menu.getMenuItemById(id) item.click() })
#this is not mean to be run locally # echo Check if TTY if [ "`tty`" != "not a tty" ]; then echo "YOU SHOULD NOT RUN THIS IN INTERACTIVE, IT DELETES YOUR LOCAL FILES" else # ls -ltr . echo "ENV..................................." env echo "VOMS" voms-proxy-info -all echo "CMSSW BASE, python path, pwd" echo $CMSSW_BASE echo $PYTHON_PATH echo $PWD rm -rf $CMSSW_BASE/lib/ rm -rf $CMSSW_BASE/src/ rm -rf $CMSSW_BASE/module/ rm -rf $CMSSW_BASE/python/ mv lib $CMSSW_BASE/lib mv src $CMSSW_BASE/src mv module $CMSSW_BASE/module mv python $CMSSW_BASE/python echo Found Proxy in: $X509_USER_PROXY python crab_script_2018_DYJets_DL-HT800.py $1 # ls -ltr fi
package cn.lts.common.pojo; import cn.lts.common.util.MessageUtil; /** * 响应消息之音乐消息 * */ public class MusicMessage extends BaseMessage { /** * 音乐 */ private Music Music; public Music getMusic() { return Music; } public void setMusic(Music music) { Music = music; } @Override public String getMsgType() { return MessageUtil.RESP_MESSAGE_TYPE_MUSIC; } }
<reponame>lwei20000/spring-framework package com.test_shangguigu.t08_propertyValue.config; import com.test_shangguigu.t08_propertyValue.beans.Person; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.PropertySource; /** * * 指定配置文件路径的方法: * xml方式使用配置文件: <context:property-placeHolder location="classpath:person.properties"/> * 注解方式使用配置文件:参照本例@PropertySource * * 取得配置文件中属性的方法: * bean中的@Value注解 * applicationContext.getEnviroment(); enviroment.getProperty("person.nickName"); * */ @PropertySource(value="classpath:com/test_shangguigu/person.properties") @Configuration public class AppConfig08 { @Bean("persion") public Person persion(){ return new Person("",0); } }
import Insert from './Insert'; export { Insert };
from bs4 import BeautifulSoup class HTMLLinkExtractor: def __init__(self): self.LinkList = [] def extract_links(self, html_content, filetype): soup = BeautifulSoup(html_content, 'html.parser') for tp in soup.find_all('a', href=True): tp_link = tp.get('href') try: if (tp_link.endswith(filetype) and tp_link is not None): self.LinkList.append(tp_link) except AttributeError: pass
#!/bin/sh # uncomment it for debugging #set -x # ssh config mkdir -p ~/.ssh src="/vagrant/files" mkdir -p ${src} if [ ! -f ${src}/id_rsa ] then # ensure we have ssh-keygen rpm installed sudo yum install -y -q openssh # generate key ssh-keygen -f ${src}/id_rsa -t rsa -N '' # ssh without password cat ${src}/id_rsa.pub |awk '{print $1, $2, "Generated by vagrant"}' >> ${src}/authorized_keys2 cat ${src}/id_rsa.pub |awk '{print $1, $2, "Generated by vagrant"}' >> ${src}/authorized_keys fi files=('authorized_keys2' 'authorized_keys') for f in ${files[@]} do cat ${src}/${f} >> ~/.ssh/${f} done cp ${src}/id_rsa* ~/.ssh/ cat >> ~/.ssh/config <<EOF Host * StrictHostKeyChecking no UserKnownHostsFile=/dev/null EOF chmod 600 ~/.ssh/*
import asyncio async def simulate_async_create_task(coroutine) -> bool: try: asyncio.create_task(coroutine) return True except Exception: return False
import { ElementRef, EventEmitter } from "@angular/core"; import { DatagridStringFilterImpl } from "./built-in/filters/datagrid-string-filter-impl"; import { DatagridHideableColumnModel } from "./datagrid-hideable-column.model"; import { ClrDatagridComparatorInterface } from "./interfaces/comparator.interface"; import { ClrDatagridSortOrder } from "./interfaces/sort-order"; import { DragDispatcher } from "./providers/drag-dispatcher"; import { FiltersProvider } from "./providers/filters"; import { Sort } from "./providers/sort"; import { DatagridFilterRegistrar } from "./utils/datagrid-filter-registrar"; export declare class ClrDatagridColumn extends DatagridFilterRegistrar<DatagridStringFilterImpl> { private _sort; private _dragDispatcher; constructor(_sort: Sort, filters: FiltersProvider, _dragDispatcher: DragDispatcher); /** * @property columnId * * @description * A ClrDatagridColumn class variable that holds the number of ClrDatagridColumn instances for a Datagrid. * It is used to generate a unique id for the ClrDatagridColumn instance. * */ columnId: string; /** * @property hidden * * @description * A property that allows the column to be hidden / shown with css * Note the default allows the ClrDatagridColumn to have an *ngIf on it. (EHCAIWC - will occur if its not * initialized) * * @default false * */ readonly hidden: boolean; handleElRef: ElementRef; handleTrackerElRef: ElementRef; /** * Subscription to the sort service changes */ private _sortSubscription; ngOnDestroy(): void; private _field; field: string; /** * ClrDatagridComparatorInterface to use when sorting the column */ private _sortBy; sortBy: ClrDatagridComparatorInterface<any> | string; /** * Indicates if the column is sortable */ readonly sortable: boolean; /** * Indicates if the column is currently sorted * * @deprecated This will be removed soon, in favor of the sortOrder mechanism */ private _sorted; /** * @deprecated This will be removed soon, in favor of the sortOrder mechanism */ sorted: boolean; /** * @deprecated This will be removed soon, in favor of the sortOrder mechanism */ sortedChange: EventEmitter<boolean>; /** * Indicates how the column is currently sorted */ private _sortOrder; sortOrder: ClrDatagridSortOrder; readonly ariaSort: string; sortOrderChange: EventEmitter<ClrDatagridSortOrder>; /** * Sorts the datagrid based on this column */ sort(reverse?: boolean): void; /** * Indicates if the column is currently sorted in ascending order */ readonly asc: boolean; /** * Indicates if the column is currently sorted in descending order */ readonly desc: boolean; /** * A custom filter for this column that can be provided in the projected content */ customFilter: boolean; projectedFilter: any; filterValue: string; updateFilterValue: string; filterValueChange: EventEmitter<{}>; /*********** * * @property hideable * * @description * When a column is hideable this is defined with an instance of DatagridHideableColumnModel. * When its not hideable should be undefined. * */ hideable: DatagridHideableColumnModel; }
#!/usr/bin/env bash # Make sure we’re using the latest Homebrew. brew update # Upgrade any already-installed formulae. brew upgrade # Save Homebrew’s installed location. BREW_PREFIX=$(brew --prefix) # Install GNU core utilities (those that come with macOS are outdated). # Don’t forget to add `$(brew --prefix coreutils)/libexec/gnubin` to `$PATH`. brew install coreutils ln -s "${BREW_PREFIX}/bin/gsha256sum" "${BREW_PREFIX}/bin/sha256sum" # Install some other useful utilities like `sponge`. brew install moreutils # Install GNU `find`, `locate`, `updatedb`, and `xargs`, `g`-prefixed. brew install findutils # Install GNU `sed`, overwriting the built-in `sed`. #TODO: Replace with a way that replaced built-in #brew install gnu-sed --with-default-names # Install a modern version of Bash. brew install bash brew install bash-completion2 # Switch to using brew-installed bash as default shell if ! fgrep -q "${BREW_PREFIX}/bin/bash" /etc/shells; then echo "${BREW_PREFIX}/bin/bash" | sudo tee -a /etc/shells chsh -s "${BREW_PREFIX}/bin/bash" fi # Install `wget` with IRI support. brew install wget --with-iri # Install GnuPG to enable PGP-signing commits. brew install gnupg # Install more recent versions of some macOS tools. brew install vim brew install grep brew install openssh brew install screen brew install gmp # Install font tools. brew tap bramstein/webfonttools brew tap homebrew/cask-drivers brew install sfnt2woff brew install sfnt2woff-zopfli brew install woff2 # Install other useful binaries. brew install ack #brew install exiv2 brew install git brew install git-lfs brew install gs brew install lua brew install p7zip brew install pigz brew install pv brew install rename brew install rlwrap brew install ssh-copy-id brew install tree brew install vbindiff brew install zopfli brew install ghq brew install emacs brew install nmap brew install figma brew install pulumi # Tooling brew install --cask google-chrome brew install --cask google-drive-file-stream brew install --cask adobe-creative-cloud brew install --cask visual-studio-code brew install --cask iterm2 brew install --cask spotify brew install --cask slack brew install --cask sonos brew install --cask google-cloud-sdk # Github CLI brew install gh # Remove outdated versions from the cellar. brew cleanup
<reponame>nebulatgs/cli package cmd import ( "context" "github.com/railwayapp/cli/entity" ) func (h *Handler) Logs(ctx context.Context, req *entity.CommandRequest) error { numLines, linesErr := req.Cmd.Flags().GetInt32("lines") if linesErr != nil { return linesErr } shouldDownload, shouldDownloadErr := req.Cmd.Flags().GetBool("download") if shouldDownloadErr != nil { return shouldDownloadErr } return h.ctrl.GetActiveDeploymentLogs(ctx, numLines, shouldDownload) }
<gh_stars>1-10 Alchemy::Content.include Alchemy::ContentDec
from base64 import b64encode from datetime import datetime, timedelta from uuid import uuid4 from flask import request, send_file from flask_restx import Api, Resource, fields from .captcha_generator import CaptchaGenerator from .db import db from .models import Captcha from .speech import text_to_speech api = Api( title="CAPTCHA API", description="A simple API for handling CAPTCHA", security={"oauth2": ["api"]}, doc="/swagger-ui", ) captcha_ns = api.namespace( "captcha", description="Utilities for validating and generating CAPTCHA" ) captcha_model = captcha_ns.model( "CaptchaAnswer", {"answer": fields.String, "id": fields.String} ) def get_request_data(request): """ Gets the data from the request """ # https://stackoverflow.com/questions/10434599/how-to-get-data-received-in-flask-request/25268170 data = request.form.to_dict() if request.form else request.get_json() if not data: return {} return data @captcha_ns.route("/") class CaptchaResource(Resource): """ Handling captchas """ def __init__(self, api=None, *args, **kwargs): super().__init__(api=api, *args, **kwargs) self.generator = CaptchaGenerator( fontname=api.app.config["DEFAULT_CAPTCHA_FONT"] ) def get(self): """ Generate a new captcha text """ img_array, answer = self.generator.generate_captcha() captcha_id = str(uuid4()) new_captcha = Captcha(id=captcha_id, answer=answer) db.session.add(new_captcha) db.session.commit() return { "id": captcha_id, "img": "data:image/jpeg;base64," + b64encode(img_array.getvalue()).decode(), } @captcha_ns.doc(body=captcha_model) def post(self): """ Solve a captcha and match it with the database thing """ data = get_request_data(request) existing = Captcha.query.filter_by(id=data["id"]).first() if not existing: return {"message": "Not found"}, 404 time_difference = datetime.utcnow() - existing.creation_time if time_difference > timedelta(minutes=1): db.session.delete(existing) db.session.commit() return {"message": "You did not answer fast enough!"}, 400 if data["answer"].casefold() != existing.answer.casefold(): db.session.delete(existing) db.session.commit() return {"message": "Invalid answer"}, 400 db.session.delete(existing) db.session.commit() return {"message": "Valid"} @captcha_ns.route("/audio/<string:captcha_id>") class CaptchaAudioResource(Resource): """ Sending audio recordings for captchas """ def get(self, captcha_id): """ Generate a new captcha text for the given captcha """ existing_captcha = Captcha.query.get_or_404(captcha_id) split_answer = ", ".join(existing_captcha.answer) mp3_file = text_to_speech(split_answer) return send_file( mp3_file, as_attachment=True, cache_timeout=-1, attachment_filename="captcha.mp3", mimetype="audio/mpeg", )
public class MaxOfThree { public static void main(String[] args) { int int1 = 10; int int2 = 20; int int3 = 30; int max = int1; if(int2 > max) max = int2; if(int3 > max) max = int3; System.out.println("The maximum number among "+int1+", "+int2+" and "+int3+" is "+max); } }
<gh_stars>0 /** * @file tm_reader_async.c * @brief Mercury API - background reading implementation * @author <NAME> * @date 11/18/2009 */ /* * Copyright (c) 2009 ThingMagic, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "tm_config.h" #include "tm_reader.h" #include "serial_reader_imp.h" #include <stdio.h> TMR_Status restart_reading(struct TMR_Reader *reader); #ifdef TMR_ENABLE_BACKGROUND_READS #include <stdlib.h> #include <pthread.h> #include <semaphore.h> #include <time.h> #include <stdio.h> #include <string.h> #ifndef WIN32 #include <sys/time.h> #endif #ifdef TMR_ENABLE_LLRP_READER #include "llrp_reader_imp.h" #endif #include "osdep.h" #include "tmr_utils.h" static void *do_background_reads(void *arg); static void *parse_tag_reads(void *arg); void process_async_response(TMR_Reader *reader); bool isBufferOverFlow = false; #endif /* TMR_ENABLE_BACKGROUND_READS */ #ifdef TMR_ENABLE_UHF extern bool isMultiSelectEnabled; extern bool isEmbeddedTagopEnabled; uint8_t multiReadAsyncCount; #endif /* TMR_ENABLE_UHF */ TMR_Status TMR_startReading(struct TMR_Reader *reader) { if (NULL == reader) { return TMR_ERROR_INVALID; } { #ifdef SINGLE_THREAD_ASYNC_READ TMR_Status ret; uint32_t ontime; TMR_paramGet(reader, TMR_PARAM_READ_ASYNCONTIME, &ontime); reader->continuousReading = true; if ( #ifdef TMR_ENABLE_UHF (((isM6eFamily(&reader->u.serialReader)) && (reader->readParams.asyncOffTime == 0 || (reader->readParams.asyncOffTime != 0 && (reader->featureFlags & TMR_READER_FEATURES_FLAG_DUTY_CYCLE)))) || (TMR_SR_MODEL_M3E == reader->u.serialReader.versionInfo.hardware[0])) && #endif /* TMR_ENABLE_UHF*/ ((TMR_READ_PLAN_TYPE_SIMPLE == reader->readParams.readPlan->type) || ((TMR_READ_PLAN_TYPE_MULTI == reader->readParams.readPlan->type))) ) { if (reader->readParams.asyncOffTime == 0) { reader->dutyCycle = false; } else { reader->dutyCycle = true; } } else { reader->dutyCycle = false; } ret = TMR_read(reader, ontime, NULL); if(TMR_SUCCESS != ret) return ret; #else #ifdef TMR_ENABLE_BACKGROUND_READS int ret; bool createParser = true; if (TMR_READER_TYPE_SERIAL == reader->readerType) { #ifdef TMR_ENABLE_SERIAL_READER /** * if model is M6e and it's variant * asyncOffTime == 0 * only then use streaming */ if ( #ifdef TMR_ENABLE_UHF (((isM6eFamily(&reader->u.serialReader)) && (reader->readParams.asyncOffTime == 0 || (reader->readParams.asyncOffTime != 0 && (reader->featureFlags & TMR_READER_FEATURES_FLAG_DUTY_CYCLE)))) || (TMR_SR_MODEL_M3E == reader->u.serialReader.versionInfo.hardware[0])) && #endif /* TMR_ENABLE_UHF*/ ((TMR_READ_PLAN_TYPE_SIMPLE == reader->readParams.readPlan->type) || ((TMR_READ_PLAN_TYPE_MULTI == reader->readParams.readPlan->type))) ) { if (reader->readParams.asyncOffTime == 0) { reader->dutyCycle = false; } else { reader->dutyCycle = true; } } else { createParser = false; reader->dutyCycle = false; } #ifdef TMR_ENABLE_UHF multiReadAsyncCount++; #endif /* TMR_ENABLE_UHF */ #else return TMR_ERROR_UNSUPPORTED; #endif/* TMR_ENABLE_SERIAL_READER */ } #ifdef TMR_ENABLE_LLRP_READER if (TMR_READER_TYPE_LLRP == reader->readerType) { multiReadAsyncCount++; /** * In case of LLRP reader and continuous reading, disable the * LLRP background receiver. **/ TMR_LLRP_setBackgroundReceiverState(reader, false); /** * Note the keepalive start time * Keepalive monitoring happens only * for async reads. **/ reader->u.llrpReader.ka_start = tmr_gettime(); } #endif /** * Initialize read_started semaphore **/ pthread_mutex_lock(&reader->backgroundLock); reader->readState = TMR_READ_STATE_STARTING; pthread_cond_broadcast(&reader->readCond); pthread_mutex_unlock(&reader->backgroundLock); if (true == createParser) { /** Background parser thread initialization * * Only M6e supports Streaming, and in case of other readers * we still use pseudo-async mechanism for continuous read. * To achieve continuous reading, create a parser thread */ pthread_mutex_lock(&reader->parserLock); if (false == reader->parserSetup) { ret = pthread_create(&reader->backgroundParser, NULL, parse_tag_reads, reader); if (0 != ret) { pthread_mutex_unlock(&reader->parserLock); return TMR_ERROR_NO_THREADS; } pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); pthread_detach(reader->backgroundParser); /** Initialize semaphores only for the first time * These semaphores are used only in case of streaming */ reader->queue_depth = 0; sem_init(&reader->queue_length, 0, 0); sem_init(&reader->queue_slots, 0, TMR_MAX_QUEUE_SLOTS); reader->parserSetup = true; } reader->parserEnabled = true; /* Enable streaming */ reader->continuousReading = true; reader->finishedReading = false; pthread_cond_signal(&reader->parserCond); pthread_mutex_unlock(&reader->parserLock); } /* Background reader thread initialization */ pthread_mutex_lock(&reader->backgroundLock); if (false == reader->backgroundSetup) { ret = pthread_create(&reader->backgroundReader, NULL, do_background_reads, reader); if (0 != ret) { pthread_mutex_unlock(&reader->backgroundLock); return TMR_ERROR_NO_THREADS; } pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); reader->backgroundSetup = true; } reader->backgroundEnabled = true; reader->searchStatus = true; #ifdef TMR_ENABLE_SERIAL_READER if (TMR_READER_TYPE_SERIAL == reader->readerType) { reader->u.serialReader.tagopFailureCount = 0; reader->u.serialReader.tagopSuccessCount = 0; } #endif/* TMR_ENABLE_SERIAL_READER */ pthread_cond_signal(&reader->backgroundCond); pthread_mutex_unlock(&reader->backgroundLock); /* End of Background reader thread initialization */ /** * Wait for the Background thread to send the read command. * This will prevent of adding extra sleep in the application * after TMR_startReading() call. **/ pthread_mutex_lock(&reader->backgroundLock); while (TMR_READ_STATE_STARTING == reader->readState) { pthread_cond_wait(&reader->readCond, &reader->backgroundLock); } pthread_mutex_unlock(&reader->backgroundLock); #endif /* TMR_ENABLE_BACKGROUND_READS */ #endif } return TMR_SUCCESS; } void reset_continuous_reading(struct TMR_Reader *reader) { if (NULL == reader) { return; } if (true == reader->continuousReading) { #ifndef SINGLE_THREAD_ASYNC_READ #ifdef TMR_ENABLE_LLRP_READER if (TMR_READER_TYPE_LLRP == reader->readerType) { /** * In case of LLRP reader, re-enable the * LLRP background receiver as continuous reading is finished **/ TMR_LLRP_setBackgroundReceiverState(reader, true); } #endif #endif /*SINGLE_THREAD_ASYNC_READ*/ /* disable streaming */ reader->continuousReading = false; reader->dutyCycle = false; } } bool TMR_isReadStopped(struct TMR_Reader *reader) { /* This flag will be true only after * receiveing Stop Read(2f with 02) response. */ return reader->finishedReading; } TMR_Status TMR_stopReading(struct TMR_Reader *reader) { if (NULL == reader) { return TMR_ERROR_INVALID; } reader->hasContinuousReadStarted = false; #ifdef SINGLE_THREAD_ASYNC_READ reader->cmdStopReading(reader); #else #ifdef TMR_ENABLE_BACKGROUND_READS /* Check if background setup is active */ pthread_mutex_lock(&reader->backgroundLock); if (false == reader->backgroundSetup) { pthread_mutex_unlock(&reader->backgroundLock); return TMR_SUCCESS; } if (false == reader->searchStatus) { /** * searchStatus is false, i.e., reading is already * stopped. Return success. **/ pthread_mutex_unlock(&reader->backgroundLock); return TMR_SUCCESS; } /** * Else, read is in progress. Set * searchStatus to false; **/ reader->searchStatus = false; pthread_mutex_unlock(&reader->backgroundLock); /** * Wait until the reading has started **/ pthread_mutex_lock(&reader->backgroundLock); while (TMR_READ_STATE_STARTING == reader->readState) { pthread_cond_wait(&reader->readCond, &reader->backgroundLock); } pthread_mutex_unlock(&reader->backgroundLock); if ((true == reader->continuousReading) && (true == reader->trueAsyncflag)) { /** * In case of true continuous reading, we need to send * stop reading message immediately. **/ if(!isBufferOverFlow) { reader->cmdStopReading(reader); } /** * Wait logic has been changed in case of continuous reading. * Wait while the background reader is still reading. **/ pthread_mutex_lock(&reader->backgroundLock); while (TMR_READ_STATE_DONE != reader->readState) { pthread_cond_wait(&reader->readCond, &reader->backgroundLock); } pthread_mutex_unlock(&reader->backgroundLock); /** * By this time, reader->backgroundEnabled is * already set to false. i.e., background reader thread * is suspended. **/ } /** * wait until background reader thread finishes. * This is needed for pseudo-async reads and also * worst case of continuous reading, when read isn't success **/ pthread_mutex_lock(&reader->backgroundLock); reader->backgroundEnabled = false; while (true == reader->backgroundRunning) { pthread_cond_wait(&reader->backgroundCond, &reader->backgroundLock); } pthread_mutex_unlock(&reader->backgroundLock); /** * Reset continuous reading settings, so that * the subsequent startReading() call doesn't have * any surprises. **/ #else reader->cmdStopReading(reader); #endif reset_continuous_reading(reader); #endif { #ifdef TMR_ENABLE_UHF if (multiReadAsyncCount > 0) { multiReadAsyncCount--; if (multiReadAsyncCount == 0) { isMultiSelectEnabled = false; } } isEmbeddedTagopEnabled = false; if (TMR_READER_TYPE_SERIAL == reader->readerType) { reader->u.serialReader.gen2AllMemoryBankEnabled = false; } #endif /* TMR_ENABLE_UHF */ #ifdef TMR_ENABLE_BACKGROUND_READS reader->isOffTimeAdded = false; reader->fetchTagReads = false; #endif /* TMR_ENABLE_BACKGROUND_READS */ reader->subOffTime = 0; } return TMR_SUCCESS; } void notify_read_listeners(TMR_Reader *reader, TMR_TagReadData *trd) { TMR_ReadListenerBlock *rlb; /* notify tag read to listener */ if (NULL != reader) { #if !defined(SINGLE_THREAD_ASYNC_READ) && defined(TMR_ENABLE_BACKGROUND_READS) pthread_mutex_lock(&reader->listenerLock); #endif rlb = reader->readListeners; while (rlb) { rlb->listener(reader, trd, rlb->cookie); rlb = rlb->next; } #if !defined(SINGLE_THREAD_ASYNC_READ) && defined(TMR_ENABLE_BACKGROUND_READS) pthread_mutex_unlock(&reader->listenerLock); #endif } } void notify_stats_listeners(TMR_Reader *reader, TMR_Reader_StatsValues *stats) { TMR_StatsListenerBlock *slb; if (NULL == reader) { return; } /* notify stats to the listener */ #if !defined(SINGLE_THREAD_ASYNC_READ) && defined(TMR_ENABLE_BACKGROUND_READS) pthread_mutex_lock(&reader->listenerLock); #endif slb = reader->statsListeners; while (slb) { slb->listener(reader, stats, slb->cookie); slb = slb->next; } #if !defined(SINGLE_THREAD_ASYNC_READ) && defined(TMR_ENABLE_BACKGROUND_READS) pthread_mutex_unlock(&reader->listenerLock); #endif } TMR_Status restart_reading(struct TMR_Reader *reader) { TMR_Status ret = TMR_SUCCESS; if (NULL == reader) { return TMR_ERROR_INVALID; } //Stop continuous reading ret = TMR_stopReading(reader); if(ret != TMR_SUCCESS) { return ret; } #ifdef SINGLE_THREAD_ASYNC_READ //Receive all tags from the previous reading { TMR_TagReadData trd; TMR_TRD_init(&trd); while(true) { ret = TMR_hasMoreTags(reader); if (TMR_SUCCESS == ret) { TMR_getNextTag(reader, &trd); notify_read_listeners(reader, &trd); } else if(ret == TMR_ERROR_END_OF_READING) break; } } #endif //Restart reading ret = TMR_startReading(reader); return ret; } #ifdef TMR_ENABLE_UHF #ifdef TMR_ENABLE_BACKGROUND_READS /* NOTE: There is only one auth object for all the authreq listeners, so whichever listener touches it last wins. * For now (2012 Jul 20) we only anticipate having a single authreq listener, but there may be future cases which * require multiples. Revise this design if necessary. */ void notify_authreq_listeners(TMR_Reader *reader, TMR_TagReadData *trd, TMR_TagAuthentication *auth) { TMR_AuthReqListenerBlock *arlb; if (NULL == reader) { return; } /* notify tag read to listener */ pthread_mutex_lock(&reader->listenerLock); arlb = reader->authReqListeners; while (arlb) { arlb->listener(reader, trd, arlb->cookie, auth); arlb = arlb->next; } pthread_mutex_unlock(&reader->listenerLock); } #endif /* TMR_ENABLE_BACKGROUND_READS */ #endif /* TMR_ENABLE_UHF */ TMR_Status TMR_addReadExceptionListener(TMR_Reader *reader, TMR_ReadExceptionListenerBlock *b) { if (NULL == reader) { return TMR_ERROR_INVALID; } #if !defined(SINGLE_THREAD_ASYNC_READ) && defined(TMR_ENABLE_BACKGROUND_READS) if (0 != pthread_mutex_lock(&reader->listenerLock)) return TMR_ERROR_TRYAGAIN; #endif b->next = reader->readExceptionListeners; reader->readExceptionListeners = b; #if !defined(SINGLE_THREAD_ASYNC_READ) && defined(TMR_ENABLE_BACKGROUND_READS) pthread_mutex_unlock(&reader->listenerLock); #endif return TMR_SUCCESS; } #ifdef TMR_ENABLE_UHF #ifdef TMR_ENABLE_BACKGROUND_READS TMR_Status TMR_removeReadExceptionListener(TMR_Reader *reader, TMR_ReadExceptionListenerBlock *b) { TMR_ReadExceptionListenerBlock *block, **prev; if (NULL == reader) { return TMR_ERROR_INVALID; } if (0 != pthread_mutex_lock(&reader->listenerLock)) return TMR_ERROR_TRYAGAIN; prev = &reader->readExceptionListeners; block = reader->readExceptionListeners; while (NULL != block) { if (block == b) { *prev = block->next; break; } prev = &block->next; block = block->next; } pthread_mutex_unlock(&reader->listenerLock); if (block == NULL) { return TMR_ERROR_INVALID; } return TMR_SUCCESS; } #endif #endif /* TMR_ENABLE_UHF */ void notify_exception_listeners(TMR_Reader *reader, TMR_Status status) { TMR_ReadExceptionListenerBlock *relb; if (NULL != reader) { #if !defined(SINGLE_THREAD_ASYNC_READ) && defined(TMR_ENABLE_BACKGROUND_READS) pthread_mutex_lock(&reader->listenerLock); #endif relb = reader->readExceptionListeners; while (relb) { relb->listener(reader, status, relb->cookie); relb = relb->next; } #if !defined(SINGLE_THREAD_ASYNC_READ) && defined(TMR_ENABLE_BACKGROUND_READS) pthread_mutex_unlock(&reader->listenerLock); #endif } } #ifdef TMR_ENABLE_BACKGROUND_READS TMR_Queue_tagReads * dequeue(TMR_Reader *reader) { TMR_Queue_tagReads *tagRead = NULL; pthread_mutex_lock(&reader->queue_lock); if (NULL != reader->tagQueueHead) { /* Fetch the head always */ tagRead = reader->tagQueueHead; reader->tagQueueHead = reader->tagQueueHead->next; } reader->queue_depth --; pthread_mutex_unlock(&reader->queue_lock); return(tagRead); } void enqueue(TMR_Reader *reader, TMR_Queue_tagReads *tagRead) { if (NULL == reader) { return; } pthread_mutex_lock(&reader->queue_lock); if (NULL == reader->tagQueueHead) { /* first tag */ reader->tagQueueHead = tagRead; reader->tagQueueHead->next = NULL; reader->tagQueueTail = reader->tagQueueHead; } else { reader->tagQueueTail->next = tagRead; reader->tagQueueTail = tagRead; tagRead->next = NULL; } reader->queue_depth ++; pthread_mutex_unlock(&reader->queue_lock); } static void * parse_tag_reads(void *arg) { TMR_Reader *reader; TMR_Queue_tagReads *tagRead; reader = arg; while (1) { pthread_mutex_lock(&reader->parserLock); reader->parserRunning = false; pthread_cond_broadcast(&reader->parserCond); while (false == reader->parserEnabled) { pthread_cond_wait(&reader->parserCond, &reader->parserLock); } reader->parserRunning = true; pthread_mutex_unlock(&reader->parserLock); /** * Wait until queue_length is more than zero, * i.e., Queue should have atleast one tagRead to process */ sem_wait(&reader->queue_length); if (NULL != reader->tagQueueHead) { /** * At this point there is a tagEntry in the queue * dequeue it and parse it. */ tagRead = dequeue(reader); if (false == tagRead->isStatusResponse) { /* Tag Buffer stream response */ #ifdef TMR_ENABLE_SERIAL_READER if (TMR_READER_TYPE_SERIAL == reader->readerType) { /** * For serial readers, the tags results are already processed * and placed in the queue. Just notify that to the listener. */ notify_read_listeners(reader, &tagRead->trd); } #endif/* TMR_ENABLE_SERIAL_READER */ #ifdef TMR_ENABLE_LLRP_READER if (TMR_READER_TYPE_LLRP == reader->readerType) { /* Else it is LLRP message, parse it */ LLRP_tSRO_ACCESS_REPORT *pReport; LLRP_tSTagReportData *pTagReportData; LLRP_tSRFSurveyReportData * pRFSurveyReportData; pReport = (LLRP_tSRO_ACCESS_REPORT *)tagRead->tagEntry.lMsg; for(pTagReportData = pReport->listTagReportData; NULL != pTagReportData; pTagReportData = (LLRP_tSTagReportData *)pTagReportData->hdr.pNextSubParameter) { TMR_TagReadData trd; TMR_Status ret; TMR_TRD_init(&trd); ret = TMR_LLRP_parseMetadataFromMessage(reader, &trd, pTagReportData); if (TMR_SUCCESS == ret) { trd.reader = reader; notify_read_listeners(reader, &trd); } } /** * Parse RFSurveyReports if available in ROAccessReport along with tag report data **/ if (reader->u.llrpReader.featureFlags & TMMP_READER_FEATURES_FLAG_STATS_LISTENER) { for(pRFSurveyReportData = pReport->listRFSurveyReportData; NULL != pRFSurveyReportData; pRFSurveyReportData = (LLRP_tSRFSurveyReportData *)pRFSurveyReportData->hdr.pNextSubParameter) { TMR_Reader_StatsValues stats; LLRP_tSParameter *pParameter; TMR_STATS_init(&stats); pParameter = pRFSurveyReportData->listCustom; stats.valid = reader->u.llrpReader.statsEnable; TMR_LLRP_parseCustomStatsValues((LLRP_tSCustomStatsValue *)pParameter, &stats); notify_stats_listeners(reader, &stats); } } } #endif } else { /* A status stream response */ if (TMR_READER_TYPE_SERIAL == reader->readerType) { TMR_Reader_StatsValues stats; uint8_t offset; #ifdef TMR_ENABLE_UHF uint8_t i,j; #endif /* TMR_ENABLE_UHF */ uint16_t flags = 0; offset = tagRead->bufPointer; #ifdef TMR_ENABLE_UHF if(isMultiSelectEnabled) { offset++; } TMR_STATS_init(&stats); if (NULL != reader->statusListeners && NULL== reader->statsListeners) { /* A status stream response */ TMR_StatusListenerBlock *slb; uint8_t index = 0, j; TMR_SR_StatusReport report[TMR_SR_STATUS_MAX]; /* Get status content flags */ flags = GETU16(tagRead->tagEntry.sMsg, offset); if (0 != (flags & TMR_SR_STATUS_FREQUENCY)) { report[index].type = TMR_SR_STATUS_FREQUENCY; report[index].u.fsr.freq = (uint32_t)(GETU24(tagRead->tagEntry.sMsg, offset)); index ++; } if (0 != (flags & TMR_SR_STATUS_TEMPERATURE)) { report[index].type = TMR_SR_STATUS_TEMPERATURE; report[index].u.tsr.temp = GETU8(tagRead->tagEntry.sMsg, offset); index ++; } if (0 != (flags & TMR_SR_STATUS_ANTENNA)) { uint8_t tx, rx; report[index].type = TMR_SR_STATUS_ANTENNA; tx = GETU8(tagRead->tagEntry.sMsg, offset); rx = GETU8(tagRead->tagEntry.sMsg, offset); for (j = 0; j < reader->u.serialReader.txRxMap->len; j++) { if ((rx == reader->u.serialReader.txRxMap->list[j].rxPort) && (tx == reader->u.serialReader.txRxMap->list[j].txPort)) { report[index].u.asr.ant = reader->u.serialReader.txRxMap->list[j].antenna; break; } } index ++; } report[index].type = TMR_SR_STATUS_NONE; /* notify status response to listener */ pthread_mutex_lock(&reader->listenerLock); slb = reader->statusListeners; while (slb) { slb->listener(reader, report, slb->cookie); slb = slb->next; } pthread_mutex_unlock(&reader->listenerLock); } else if (NULL != reader->statsListeners && NULL== reader->statusListeners) #else if (NULL != reader->statsListeners) #endif /* TMR_ENABLE_UHF */ { /* Get stats content flags */ if ((0x80) > reader->statsFlag) { offset += 1; } else if ((0x4000) > reader->statsFlag) { offset += 2; } else { offset += 3; } #ifdef TMR_ENABLE_UHF /** * preinitialize the rf ontime and the noise floor value to zero * berfore getting the reader stats */ for (i = 0; i < stats.perAntenna.max; i++) { stats.perAntenna.list[i].antenna = 0; stats.perAntenna.list[i].rfOnTime = 0; stats.perAntenna.list[i].noiseFloor = 0; } #endif /* TMR_ENABLE_UHF */ TMR_fillReaderStats(reader, &stats, flags, tagRead->tagEntry.sMsg, offset); #ifdef TMR_ENABLE_UHF /** * iterate through the per antenna values, * If found any 0-antenna rows, copy the * later rows down to compact out the empty space. */ for (i = 0; i < reader->u.serialReader.txRxMap->len; i++) { if (!stats.perAntenna.list[i].antenna) { for (j = i + 1; j < reader->u.serialReader.txRxMap->len; j++) { if (stats.perAntenna.list[j].antenna) { stats.perAntenna.list[i].antenna = stats.perAntenna.list[j].antenna; stats.perAntenna.list[i].rfOnTime = stats.perAntenna.list[j].rfOnTime; stats.perAntenna.list[i].noiseFloor = stats.perAntenna.list[j].noiseFloor; stats.perAntenna.list[j].antenna = 0; stats.perAntenna.list[j].rfOnTime = 0; stats.perAntenna.list[j].noiseFloor = 0; stats.perAntenna.len++; break; } } } else { /* Increment the length */ stats.perAntenna.len++; } } #endif /* TMR_ENABLE_UHF */ /* store the requested flags for future use */ stats.valid = reader->statsFlag; /* notify status response to listener */ TMR_DEBUG("%s", "Calling notify_stats_listeners"); notify_stats_listeners(reader, &stats); } else { /** * Control comes here when, user added both the listeners, * We should pop up error for that **/ TMR_Status ret; ret = TMR_ERROR_UNSUPPORTED; notify_exception_listeners(reader, ret); } } #ifdef TMR_ENABLE_LLRP_READER else { /** * TODO: Handle RFSurveyReports in case of * async read **/ if ((TMR_READER_TYPE_LLRP == reader->readerType) && (reader->u.llrpReader.featureFlags & TMMP_READER_FEATURES_FLAG_STATS_LISTENER)) { /* Else it is LLRP message, parse it */ LLRP_tSRO_ACCESS_REPORT *pReport; LLRP_tSRFSurveyReportData * pRFSurveyReportData; pReport = (LLRP_tSRO_ACCESS_REPORT *)tagRead->tagEntry.lMsg; for(pRFSurveyReportData = pReport->listRFSurveyReportData; NULL != pRFSurveyReportData; pRFSurveyReportData = (LLRP_tSRFSurveyReportData *)pRFSurveyReportData->hdr.pNextSubParameter) { TMR_Reader_StatsValues stats; LLRP_tSParameter *pParameter; TMR_STATS_init(&stats); pParameter = pRFSurveyReportData->listCustom; stats.valid = reader->u.llrpReader.statsEnable; TMR_LLRP_parseCustomStatsValues((LLRP_tSCustomStatsValue *)pParameter, &stats); notify_stats_listeners(reader, &stats); } } } #endif } /* Free the memory */ if (TMR_READER_TYPE_SERIAL == reader->readerType) { free(tagRead->tagEntry.sMsg); } #ifdef TMR_ENABLE_LLRP_READER else { TMR_LLRP_freeMessage(tagRead->tagEntry.lMsg); } #endif free(tagRead); /* Now, increment the queue_slots as we have removed one entry */ sem_post(&reader->queue_slots); } } return NULL; } void process_async_response(TMR_Reader *reader) { TMR_Queue_tagReads *tagRead; uint16_t flags = 0; if (NULL == reader) { return; } /* Decrement Queue slots */ sem_wait(&reader->queue_slots); tagRead = (TMR_Queue_tagReads *) malloc(sizeof(TMR_Queue_tagReads)); if (TMR_READER_TYPE_SERIAL == reader->readerType) { tagRead->tagEntry.sMsg = (uint8_t *) malloc(TMR_SR_MAX_PACKET_SIZE); /* size of bufResponse */ memcpy(tagRead->tagEntry.sMsg, reader->u.serialReader.bufResponse, TMR_SR_MAX_PACKET_SIZE); tagRead->bufPointer = reader->u.serialReader.bufPointer; } #ifdef TMR_ENABLE_LLRP_READER else { tagRead->tagEntry.lMsg = reader->u.llrpReader.bufResponse[0]; reader->u.llrpReader.bufResponse[0] = NULL; } #endif tagRead->isStatusResponse = reader->isStatusResponse; /** * Process the tag results here. The stats responses will be extracted * later by the parser thread. */ if (TMR_READER_TYPE_SERIAL == reader->readerType) { if (false == tagRead->isStatusResponse) { TMR_TRD_init(&tagRead->trd); #ifdef TMR_ENABLE_UHF if((isMultiSelectEnabled) || (reader->isReadAfterWrite)) { tagRead->bufPointer++; flags = GETU16AT(tagRead->tagEntry.sMsg, 9); } else #endif /* TMR_ENABLE_UHF */ { flags = GETU16AT(tagRead->tagEntry.sMsg, 8); } TMR_SR_parseMetadataFromMessage(reader, &tagRead->trd, flags, &tagRead->bufPointer, tagRead->tagEntry.sMsg); TMR_SR_postprocessReaderSpecificMetadata(&tagRead->trd, &reader->u.serialReader); tagRead->trd.reader = reader; } } /* Enqueue the tagRead into Queue */ enqueue(reader, tagRead); /* Increment queue_length */ sem_post(&reader->queue_length); if ((false == reader->isStatusResponse) && (TMR_READER_TYPE_SERIAL == reader->readerType)) { reader->u.serialReader.tagsRemainingInBuffer--; } } static void * do_background_reads(void *arg) { TMR_Status ret; TMR_Reader *reader; uint32_t onTime, offTime; int32_t sleepTime; uint64_t end, now, difftime; reader = arg; reader->trueAsyncflag = false; onTime = 0; while (1) { /* Wait for reads to be enabled */ pthread_mutex_lock(&reader->backgroundLock); reader->backgroundRunning = false; pthread_cond_broadcast(&reader->backgroundCond); while (false == reader->backgroundEnabled) { reader->trueAsyncflag = false; pthread_cond_wait(&reader->backgroundCond, &reader->backgroundLock); if (true == reader->backgroundThreadCancel) { /** * thread is no more, required, * hence, making it terminated **/ goto EXIT; } } if ((TMR_READER_TYPE_LLRP != reader->readerType) #ifdef TMR_ENABLE_LLRP_READER || (!(reader->u.llrpReader.featureFlags & TMMP_READER_FEATURES_FLAG_PERANTENNA_ONTIME)) #endif ) { TMR_paramGet(reader, TMR_PARAM_READ_ASYNCONTIME, &onTime); } if (!reader->trueAsyncflag) { reader->fetchTagReads = true; reader->tagFetchTime = 0; ret = TMR_read(reader, onTime, NULL); if (TMR_SUCCESS != ret) { if ((TMR_ERROR_TIMEOUT == ret) || (TMR_ERROR_CRC_ERROR == ret) || (TMR_ERROR_SYSTEM_UNKNOWN_ERROR == ret) || (TMR_ERROR_TM_ASSERT_FAILED == ret)) { if (TMR_READER_TYPE_SERIAL == reader->readerType) { reader->u.serialReader.transport.flush(&reader->u.serialReader.transport); } reader->backgroundEnabled = false; } #ifdef TMR_ENABLE_UHF /** * M5e and its variants hardware does not have a real PA protection.So, doing the read with out * antenna may cause the damage to the reader. * * it's okay to let M6e and its variants continue to operate because it has a PA protection mechanism. **/ if (((TMR_ERROR_HIGH_RETURN_LOSS == ret) || (TMR_ERROR_NO_ANTENNA == ret)) && (isNotM6eFamily(&reader->u.serialReader) && (TMR_SR_MODEL_M3E != reader->u.serialReader.versionInfo.hardware[0]))) { reader->backgroundEnabled = false; reader->readState = TMR_READ_STATE_DONE; pthread_mutex_unlock(&reader->backgroundLock); notify_exception_listeners(reader, ret); break; } #endif /* TMR_ENABLE_UHF */ notify_exception_listeners(reader, ret); if(false == reader->searchStatus) { /** * There could be something wrong in initiating a search, continue the * effort to initiate a search. But meanwhile if stopReading() * is called, it will be blocked as the search itself is not started. * sem_post on read_started will unblock it. **/ reader->readState = TMR_READ_STATE_STARTED; pthread_cond_broadcast(&reader->readCond); reader->backgroundEnabled = false; } pthread_mutex_unlock(&reader->backgroundLock); if(TMR_ERROR_CMDLEN_EXCEED_LIMIT == ret) { reader->backgroundEnabled = false; reader->readState = TMR_READ_STATE_DONE; } else { continue; } } if(reader->continuousReading) { /** * Set this flag, In case of true async reading * we have to send the command only once. */ reader->trueAsyncflag = true; } /** * Set an indication that the reading is started **/ reader->readState = TMR_READ_STATE_ACTIVE; pthread_cond_broadcast(&reader->readCond); } reader->backgroundRunning = true; pthread_mutex_unlock(&reader->backgroundLock); if (true == reader->continuousReading) { /** * Streaming is enabled only in case of M6e, * read till the end of stream. */ /* Make the time stamp zero for serial reader */ while (true) { if (TMR_READER_TYPE_SERIAL == reader->readerType) { if (false == reader->u.serialReader.isBasetimeUpdated) { /* Update the base time stamp */ TMR_SR_updateBaseTimeStamp(reader); reader->u.serialReader.isBasetimeUpdated = true; } } ret = TMR_hasMoreTags(reader); if (TMR_SUCCESS == ret) { /* Got a valid message, before posting it to queue * check whether we have slots free in the queue or * not. Validate this only for Serial reader. */ if (TMR_READER_TYPE_SERIAL == reader->readerType) { int slotsFree = 0; int semret; /* Get the semaphore value */ semret = sem_getvalue(&reader->queue_slots, &slotsFree); if (0 == semret) { if (10 > slotsFree) { tmr_sleep(20); } if (0 >= slotsFree) { /* In a normal case we should not come here. * we are here means there is no place to * store the tags. May be the read listener * is not fast enough. * In this case stop the read and exit. */ if (true == reader->searchStatus) { isBufferOverFlow = true; ret = TMR_ERROR_BUFFER_OVERFLOW; notify_exception_listeners(reader, ret); ret = verifySearchStatus(reader); /*isBufferOverFlow = false; pthread_mutex_lock(&reader->backgroundLock); reader->backgroundEnabled = false; reader->readState = TMR_READ_STATE_DONE; pthread_cond_broadcast(&reader->readCond); pthread_mutex_unlock(&reader->backgroundLock); reader->searchStatus = false;*/ /* Waiting till all slots are free */ while(slotsFree < TMR_MAX_QUEUE_SLOTS) { tmr_sleep(20); semret = sem_getvalue(&reader->queue_slots, &slotsFree); } reader->trueAsyncflag = false; break; } } } } /* There is place to store the response. Post it */ process_async_response(reader); } else if (TMR_ERROR_CRC_ERROR == ret) { /* Currently, just drop the corrupted packet, * inform the user about the error and move on. * * TODO: Fix the error by tracing the exact reason of failour */ notify_exception_listeners(reader, ret); } else if (TMR_ERROR_TAG_ID_BUFFER_FULL == ret) { /* In case of buffer full error, notify the exception */ notify_exception_listeners(reader, ret); /** * If stop read is already called, no need to resumbit the seach again. * Just spin in the curent loop and wait for the stop read command response. **/ if (true == reader->searchStatus) { /** * Stop read is not called. Resubmit the search immediately, without user interaction * Resetting the trueAsyncFlag will send the continuous read command again. */ ret = TMR_hasMoreTags(reader); reader->trueAsyncflag = false; reader->hasContinuousReadStarted = false; break; } } else { if ((TMR_ERROR_TIMEOUT == ret) || (TMR_ERROR_SYSTEM_UNKNOWN_ERROR == ret) || (TMR_ERROR_TM_ASSERT_FAILED == ret) || (TMR_ERROR_LLRP_READER_CONNECTION_LOST == ret)) { notify_exception_listeners(reader, ret); /** * In case of timeout error or CRC error, flush the transport buffer. * this avoids receiving of junk response. */ if (TMR_READER_TYPE_SERIAL == reader->readerType) { /* Handling this fix for serial reader now */ reader->u.serialReader.transport.flush(&reader->u.serialReader.transport); } /** * Check if reading is finished. * If not, send stop command. **/ if (!reader->finishedReading) { reader->cmdStopReading(reader); } pthread_mutex_lock(&reader->backgroundLock); reader->backgroundEnabled = false; reader->readState = TMR_READ_STATE_DONE; pthread_cond_broadcast(&reader->readCond); pthread_mutex_unlock(&reader->backgroundLock); /** * Forced stop * Reset continuous reading settings, so that * the subsequent startReading() call doesn't have * any surprises. **/ reader->searchStatus = false; reset_continuous_reading(reader); } else if (TMR_ERROR_END_OF_READING == ret) { while(0 < reader->queue_depth) { /** * reader->queue_depth is greater than zero. i.e., * there are still some tags left in queue. * Give some time for the parser to parse all of them. * 5 ms sleep shouldn't cause much delay. **/ tmr_sleep(5); } /** * Since the reading is finished, disable this * thread. **/ pthread_mutex_lock(&reader->backgroundLock); reader->backgroundEnabled = false; reader->readState = TMR_READ_STATE_DONE; pthread_cond_broadcast(&reader->readCond); pthread_mutex_unlock(&reader->backgroundLock); break; } else if ((TMR_ERROR_NO_TAGS_FOUND != ret) && (TMR_ERROR_NO_TAGS != ret) && (TMR_ERROR_TAG_ID_BUFFER_AUTH_REQUEST != ret) && (TMR_ERROR_TOO_BIG != ret)) { /* Any exception other than 0x400 should be notified */ notify_exception_listeners(reader, ret); } break; } } } else { /** * On M5e and its variants, streaming is not supported * So still, retain the pseudo-async mechanism * Also, when asyncOffTime is non-zero the API should fallback to * pseudo async mode. */ end = tmr_gettime(); while (TMR_SUCCESS == TMR_hasMoreTags(reader)) { TMR_TagReadData trd; TMR_ReadListenerBlock *rlb; TMR_TRD_init(&trd); ret = TMR_getNextTag(reader, &trd); if (TMR_SUCCESS != ret) { pthread_mutex_lock(&reader->backgroundLock); reader->backgroundEnabled = false; pthread_mutex_unlock(&reader->backgroundLock); notify_exception_listeners(reader, ret); break; } pthread_mutex_lock(&reader->listenerLock); rlb = reader->readListeners; while (rlb) { rlb->listener(reader, &trd, rlb->cookie); rlb = rlb->next; } pthread_mutex_unlock(&reader->listenerLock); } /* Calculate and accumulate time spent in fetching tags */ now = tmr_gettime(); difftime = now - end; reader->tagFetchTime += difftime; if(!reader->isOffTimeAdded) { TMR_paramGet(reader, TMR_PARAM_READ_ASYNCOFFTIME, &offTime); /* Credit tag-fetching overhead towards total offTime */ sleepTime = offTime - (uint32_t)reader->tagFetchTime; /* Wait for the asyncOffTime duration to pass */ if(sleepTime > 0) { tmr_sleep(sleepTime); } } else { offTime = 0; } } EXIT: if (reader->backgroundThreadCancel) { /** * oops.. time to exit **/ pthread_exit(NULL); } } return NULL; } #endif /* TMR_ENABLE_BACKGROUND_READS */ TMR_Status TMR_addReadListener(TMR_Reader *reader, TMR_ReadListenerBlock *b) { if (NULL == reader) { return TMR_ERROR_INVALID; } #if !defined(SINGLE_THREAD_ASYNC_READ) && defined(TMR_ENABLE_BACKGROUND_READS) if (0 != pthread_mutex_lock(&reader->listenerLock)) return TMR_ERROR_TRYAGAIN; #endif b->next = reader->readListeners; reader->readListeners = b; #if !defined(SINGLE_THREAD_ASYNC_READ) && defined(TMR_ENABLE_BACKGROUND_READS) pthread_mutex_unlock(&reader->listenerLock); #endif return TMR_SUCCESS; } #ifdef TMR_ENABLE_BACKGROUND_READS TMR_Status TMR_removeReadListener(TMR_Reader *reader, TMR_ReadListenerBlock *b) { TMR_ReadListenerBlock *block, **prev; if (NULL == reader) { return TMR_ERROR_INVALID; } if (0 != pthread_mutex_lock(&reader->listenerLock)) return TMR_ERROR_TRYAGAIN; prev = &reader->readListeners; block = reader->readListeners; while (NULL != block) { if (block == b) { *prev = block->next; break; } prev = &block->next; block = block->next; } pthread_mutex_unlock(&reader->listenerLock); if (block == NULL) { return TMR_ERROR_INVALID; } return TMR_SUCCESS; } #ifdef TMR_ENABLE_UHF TMR_Status TMR_addAuthReqListener(TMR_Reader *reader, TMR_AuthReqListenerBlock *b) { if (NULL == reader) { return TMR_ERROR_INVALID; } if (0 != pthread_mutex_lock(&reader->listenerLock)) return TMR_ERROR_TRYAGAIN; b->next = reader->authReqListeners; reader->authReqListeners = b; pthread_mutex_unlock(&reader->listenerLock); return TMR_SUCCESS; } TMR_Status TMR_removeAuthReqListener(TMR_Reader *reader, TMR_AuthReqListenerBlock *b) { TMR_AuthReqListenerBlock *block, **prev; if (NULL == reader) { return TMR_ERROR_INVALID; } if (0 != pthread_mutex_lock(&reader->listenerLock)) return TMR_ERROR_TRYAGAIN; prev = &reader->authReqListeners; block = reader->authReqListeners; while (NULL != block) { if (block == b) { *prev = block->next; break; } prev = &block->next; block = block->next; } pthread_mutex_unlock(&reader->listenerLock); if (block == NULL) { return TMR_ERROR_INVALID; } return TMR_SUCCESS; } TMR_Status TMR_addStatusListener(TMR_Reader *reader, TMR_StatusListenerBlock *b) { if (NULL == reader) { return TMR_ERROR_INVALID; } if (0 != pthread_mutex_lock(&reader->listenerLock)) return TMR_ERROR_TRYAGAIN; b->next = reader->statusListeners; reader->statusListeners = b; /*reader->streamStats |= b->statusFlags & TMR_SR_STATUS_CONTENT_FLAGS_ALL;*/ pthread_mutex_unlock(&reader->listenerLock); return TMR_SUCCESS; } #endif /* TMR_ENABLE_UHF */ #endif /* TMR_ENABLE_BACKGROUND_READS */ TMR_Status TMR_addStatsListener(TMR_Reader *reader, TMR_StatsListenerBlock *b) { if (NULL == reader) { return TMR_ERROR_INVALID; } #if !defined(SINGLE_THREAD_ASYNC_READ) && defined(TMR_ENABLE_BACKGROUND_READS) if (0 != pthread_mutex_lock(&reader->listenerLock)) return TMR_ERROR_TRYAGAIN; #endif b->next = reader->statsListeners; reader->statsListeners = b; /*reader->streamStats |= b->statusFlags & TMR_SR_STATUS_CONTENT_FLAGS_ALL; */ #if !defined(SINGLE_THREAD_ASYNC_READ) && defined(TMR_ENABLE_BACKGROUND_READS) pthread_mutex_unlock(&reader->listenerLock); #endif return TMR_SUCCESS; } #ifdef TMR_ENABLE_BACKGROUND_READS TMR_Status TMR_removeStatsListener(TMR_Reader *reader, TMR_StatsListenerBlock *b) { TMR_StatsListenerBlock *block, **prev; if (NULL == reader) { return TMR_ERROR_INVALID; } if (0 != pthread_mutex_lock(&reader->listenerLock)) return TMR_ERROR_TRYAGAIN; prev = &reader->statsListeners; block = reader->statsListeners; while (NULL != block) { if (block == b) { *prev = block->next; break; } prev = &block->next; block = block->next; } /* Remove the status flags requested by this listener and reframe */ /*reader->streamStats = TMR_SR_STATUS_CONTENT_FLAG_NONE; { TMR_StatusListenerBlock *current; current = reader->statusListeners; while (NULL != current) { reader->streamStats |= current->statusFlags; current = current->next; } }*/ pthread_mutex_unlock(&reader->listenerLock); if (block == NULL) { return TMR_ERROR_INVALID; } return TMR_SUCCESS; } #ifdef TMR_ENABLE_UHF TMR_Status TMR_removeStatusListener(TMR_Reader *reader, TMR_StatusListenerBlock *b) { TMR_StatusListenerBlock *block, **prev; if (NULL == reader) { return TMR_ERROR_INVALID; } if (0 != pthread_mutex_lock(&reader->listenerLock)) return TMR_ERROR_TRYAGAIN; prev = &reader->statusListeners; block = reader->statusListeners; while (NULL != block) { if (block == b) { *prev = block->next; break; } prev = &block->next; block = block->next; } /* Remove the status flags requested by this listener and reframe */ /*reader->streamStats = TMR_SR_STATUS_CONTENT_FLAG_NONE; { TMR_StatusListenerBlock *current; current = reader->statusListeners; while (NULL != current) { reader->streamStats |= current->statusFlags; current = current->next; } }*/ pthread_mutex_unlock(&reader->listenerLock); if (block == NULL) { return TMR_ERROR_INVALID; } return TMR_SUCCESS; } #endif /* TMR_ENABLE_UHF */ void cleanup_background_threads(TMR_Reader *reader) { if (NULL != reader) { pthread_mutex_lock(&reader->backgroundLock); pthread_mutex_lock(&reader->listenerLock); reader->readExceptionListeners = NULL; reader->statsListeners = NULL; if (true == reader->backgroundSetup) { /** * Signal for the thread exit by * removing all the pthread lock dependency **/ reader->backgroundThreadCancel = true; pthread_cond_broadcast(&reader->backgroundCond); } pthread_mutex_unlock(&reader->listenerLock); pthread_mutex_unlock(&reader->backgroundLock); if (true == reader->backgroundSetup) { /** * Wait for the back ground thread to exit **/ pthread_join(reader->backgroundReader, NULL); } pthread_mutex_lock(&reader->parserLock); pthread_mutex_lock(&reader->listenerLock); reader->readListeners = NULL; if (true == reader->parserSetup) { pthread_cancel(reader->backgroundParser); } pthread_mutex_unlock(&reader->listenerLock); pthread_mutex_unlock(&reader->parserLock); } } void* do_background_receiveAutonomousReading(void * arg) { TMR_Status ret; TMR_TagReadData trd; TMR_Reader *reader; TMR_Reader_StatsValues stats; reader = arg; TMR_TRD_init(&trd); while (1) { if (false == reader->u.serialReader.isBasetimeUpdated) { /* Update the base time stamp to current host time */ TMR_SR_updateBaseTimeStamp(reader); reader->u.serialReader.isBasetimeUpdated = true; } ret = TMR_SR_receiveAutonomousReading(reader, &trd, &stats); if (TMR_SUCCESS == ret) { if (false == reader->isStatusResponse) { /* Notify the read listener */ notify_read_listeners(reader, &trd); } else { TMR_DEBUG("%s", "Calling notify_stats_listeners"); notify_stats_listeners(reader, &stats); } } } return NULL; } #endif /* TMR_ENABLE_BACKGROUND_READS */
#!/usr/bin/env bash set -euxo pipefail echo "Setting up ssh bastion host..." export SSH_BASTION_NAMESPACE=test-ssh-bastion curl https://raw.githubusercontent.com/eparis/ssh-bastion/master/deploy/deploy.sh | bash -x
<gh_stars>10-100 module.exports = ({ chainWebpack, log }, options = []) => { chainWebpack((config) => { // List of expensive loaders to cache if user does not specify any const list = [ 'url-loader', 'babel-loader', 'ts-loader', 'mini-css-extract-plugin', ]; let loadersList; if (Object.keys(options).length) { loadersList = options; } else { loadersList = list; } const rules = config.toConfig().module.rules; log.info("Setting up cache-loader"); rules.forEach(rule => { // Checks if current loaders are in the list of expensive loaders const match = rule.use.some(item => loadersList.some(loader => item.loader.includes(loader))); if (match) { // eslint-disable-next-line no-underscore-dangle const ruleName = rule.__ruleNames[0]; // Relevant issue: https://github.com/webpack-contrib/cache-loader/issues/40 if (ruleName.includes("css") || ruleName.includes("scss") || ruleName.includes("less") ) { // Add cache-loader after MiniCssExtractPlugin loader entry config .module .rule(ruleName) .use('cache-loader') .loader(require.resolve('cache-loader')) // Reference the MiniCssExtractPlugin loader entry .after("MiniCssExtractPlugin.loader"); } else { // Add cache-loader as first entry config .module .rule(ruleName) .use('cache-loader') .loader(require.resolve('cache-loader')) // Reference the current loader entry .before(ruleName); } } }); }); };
using System; namespace PrintNTimes { class Program { static void Main(string[] args) { string text = "Hello World!"; int n = 3; for (int i = 0; i < n; i++) { Console.WriteLine(text); } } } }
# models.py from django.db import models class Article(models.Model): title = models.CharField(max_length=30) author = models.CharField(max_length=30) text = models.TextField() published_date = models.DateTimeField() # serializers.py from rest_framework import serializers from .models import Article class ArticleSerializer(serializers.ModelSerializer): class Meta: model = Article fields = ('title', 'author', 'text', 'published_date') # views.py from rest_framework import viewsets from .serializers import ArticleSerializer from .models import Article class ArticleViewSet(viewsets.ModelViewSet): serializer_class = ArticleSerializer queryset = Article.objects.all() # urls.py from django.urls import path from .views import ArticleViewSet urlpatterns = [ path('article/', ArticleViewSet.as_view()), ]
import { User } from './User'; export enum MessageType { Question = 'question', Answer = 'answer', General = 'general', Welcome = 'welcome', } interface BaseMessage { id: string; text: string; user: User; type: MessageType; } export interface MessageInput extends Omit<BaseMessage, 'id'> { answeredMessageId?: string; } export interface Message extends BaseMessage { timestamp: number; answeredQuestion?: Omit<Message, 'answeredQuestion'>; }
<filename>film-finder/app/src/main/java/com/abubusoft/filmfinder/service/model/AppSharedPreferences.java<gh_stars>1-10 package com.abubusoft.filmfinder.service.model; import com.abubusoft.kripton.android.annotation.BindSharedPreferences; @BindSharedPreferences(liveData = true) public class AppSharedPreferences { public String displayName="<NAME>"; public FriendListType friendList=FriendListType.ALWAYS; }
#!/bin/bash exit_code=0 VERSIONS=(1 2 3 17 18 19) file_size=$(ls -l dummy | awk '{ print $5 }') # generate test data dd if=/dev/urandom of=dummy bs=$file_size count=1 &>/dev/null for ver in ${VERSIONS[*]}; do for (( i=0; i < 3; i++ )); do if [[ $ver == 1 ]]; then data_shards=$((1 + RANDOM % 128)) parity_shards=$((1 + RANDOM % 128)) elif [[ $ver == 2 ]]; then data_shards=$((1 + RANDOM % 128)) parity_shards=$((1 + RANDOM % 128)) elif [[ $ver == 3 ]]; then data_shards=$((1 + RANDOM % 128)) parity_shards=$((1 + RANDOM % 128)) elif [[ $ver == 17 ]]; then data_shards=$((1 + RANDOM % 128)) parity_shards=$((1 + RANDOM % 128)) elif [[ $ver == 18 ]]; then data_shards=$((1 + RANDOM % 128)) parity_shards=$((1 + RANDOM % 128)) else data_shards=$((1 + RANDOM % 128)) parity_shards=$((1 + RANDOM % 128)) fi burst=0 # check that blkar defaults to guessing from start if --guess-burst-from is not specified offset=$[1 + RANDOM % 100] container_name=decode_$data_shards\_$parity_shards\_$ver.sbx output_name=decode_$data_shards\_$parity_shards\_$ver echo -n "Encoding in version $ver, data = $data_shards, parity = $parity_shards" output=$(./../blkar encode --json --sbx-version $ver -f dummy $container_name \ --hash sha1 \ --rs-data $data_shards --rs-parity $parity_shards \ --burst $burst) if [[ $(echo $output | jq -r ".error") != null ]]; then echo " ==> Invalid JSON" exit_code=1 fi if [[ $(echo $output | jq -r ".stats.sbxVersion") == "$ver" ]]; then echo -n " ==> Okay" else echo -n " ==> NOT okay" exit_code=1 fi if [[ $(echo $output | jq -r ".stats.hash" | awk '{ print $1 }') == "SHA1" ]]; then echo " ==> Okay" else echo " ==> NOT okay" exit_code=1 fi mv $container_name $container_name.tmp touch $container_name truncate -s $offset $container_name cat $container_name.tmp >> $container_name rm $container_name.tmp echo -n "Decoding container" output=$(./../blkar decode --json --force-misalign --from $offset $container_name - 2>&1 >$output_name) if [[ $(echo $output | jq -r ".error") != null ]]; then echo " ==> Invalid JSON" exit_code=1 fi cmp dummy $output_name if [[ $? == 0 ]]; then echo " ==> Okay" else echo " ==> NOT okay" exit_code=1 fi # check that blkar moves to the specified location if --guess-burst-from is specified offset=$[500 + RANDOM % 1000] echo -n "Encoding in version $ver, data = $data_shards, parity = $parity_shards" output=$(./../blkar encode --json --sbx-version $ver -f dummy $container_name \ --hash sha1 \ --rs-data $data_shards --rs-parity $parity_shards \ --burst $burst) if [[ $(echo $output | jq -r ".error") != null ]]; then echo " ==> Invalid JSON" exit_code=1 fi if [[ $(echo $output | jq -r ".stats.sbxVersion") == "$ver" ]]; then echo -n " ==> Okay" else echo -n " ==> NOT okay" exit_code=1 fi if [[ $(echo $output | jq -r ".stats.hash" | awk '{ print $1 }') == "SHA1" ]]; then echo " ==> Okay" else echo " ==> NOT okay" exit_code=1 fi mv $container_name $container_name.tmp touch $container_name truncate -s $offset $container_name cat $container_name.tmp >> $container_name rm $container_name.tmp echo -n "Decoding container" output=$(./../blkar decode --json --force-misalign --guess-burst-from $offset --from $offset $container_name - 2>&1 >$output_name) if [[ $(echo $output | jq -r ".error") != null ]]; then echo " ==> Invalid JSON" exit_code=1 fi cmp dummy $output_name if [[ $? == 0 ]]; then echo " ==> Okay" else echo " ==> NOT okay" exit_code=1 fi done done echo $exit_code > exit_code
import itertools import requests from typing import Iterator, Tuple class WordPronPair(Tuple[str, str]): pass def yield_cmn_pron(request: requests.Response, config: str) -> Iterator[str]: try: response_json = request.json() pronunciations = response_json.get("pronunciations", []) for pron in pronunciations: yield pron except (requests.RequestException, ValueError, KeyError): yield "Error: Failed to retrieve pronunciation" def generate_word_pronunciations( word: str, request: requests.Response, config: str ) -> Iterator[WordPronPair]: words = itertools.repeat(word) prons = yield_cmn_pron(request, config) yield from zip(words, prons)
SELECT books.title FROM books INNER JOIN authors ON books.author_id=authors.id WHERE authors.birth_year BETWEEN 2000 AND 2010;
$(function () { /* * Slideshow */ // 對每個有slideshow類別的元素進行處理 $('.slideshow').each(function () { var $slides = $(this).find('img'), // 所有Slide slideCount = $slides.length, // Slide個數 currentIndex = 0; // 目前Slide的index // 淡入顯示首張Slide $slides.eq(currentIndex).fadeIn(); // 每7500毫秒就執行showNextSlide函數 setInterval(showNextSlide, 7500); // 顯示下一張Slide的函數 function showNextSlide () { //下張Slide的index //(如果是最後一張Slide,則會到第一張) var nextIndex = (currentIndex + 1) % slideCount; // 目前的Slide淡出顯示 $slides.eq(currentIndex).fadeOut(); // 下一張Slide淡入顯示 $slides.eq(nextIndex).fadeIn(); // 更新目前的index currentIndex = nextIndex; } }); });
#include <iostream> #include <string.h> using namespace std; void replicateString(string str, int n) { string s; for (int i = 0; i < n; i++) s += str; cout << s << endl; } int main() { string str = "bird"; int n = 3; replicateString(str, n); return 0; }