text
stringlengths
1
1.05M
#!/bin/bash udify_config="config/ud/ro/udify_finetune_ro_rrt.json" udify_original_config="../config/udify_finetune_ro_rrt.json" save_path="logs/ro_rrt" function nice_print { title=$1 printf '\n%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' '#' printf "\n%*s" $((($(tput cols))/2 - 1 - (${#title})/2 + `if [ $(( $(tput cols) % 2 )) -eq 1 ]; then echo 1; else echo 0; fi`)) | tr ' ' '#' printf " $title " printf "%*s\n" $((($(tput cols))/2 - (${#title})/2 - 1)) | tr ' ' '#' printf '\n%*s\n\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' '#' } if [ -z "$1" ]; then echo "Please specify a model path or a model name from HuggingFace's repositoy." exit 1 else model=$1 fi device="cuda" if [ -n "$2" ]; then if [[ "$2" =~ ^[0-9]+$ ]] then iterations=$2 else device="$2" iterations=1 fi else iterations=1 fi if [ -n "$3" ]; then device="$3" fi model_basename=$(basename "$model") vocab="$model" nice_print "Training Udify model on UD Romanian RRT..." cd udify if curl --output /dev/null --silent --head --fail "https://s3.amazonaws.com/models.huggingface.co/bert/$model.tar.gz"; then printf "Model '$model' exists at the URL: 'https://s3.amazonaws.com/models.huggingface.co/bert/%s.tar.gz'. No local download is required.\n" "$model" else printf "Model '$model' does not exists at the URL: 'https://s3.amazonaws.com/models.huggingface.co/bert/%s.tar.gz'. Downloading in 'pretrained_models'...\n" "$model" [ ! -d "pretrained_models" ] && mkdir "pretrained_models" cd pretrained_models if [ ! -e "$model_basename.tar.gz" ] then printf "\nDownloading 'bert_config.json'...\n" curl -o bert_config.json "https://s3.amazonaws.com/models.huggingface.co/bert/$model/config.json" printf "\nDownloading 'vocab.txt'...\n" curl -o vocab.txt "https://s3.amazonaws.com/models.huggingface.co/bert/$model/vocab.txt" printf "\nDownloading 'pytorch_model.bin'...\n" curl -o pytorch_model.bin "https://s3.amazonaws.com/models.huggingface.co/bert/$model/pytorch_model.bin" printf "\nCompressing the following files in '%s.tar.gz':\n" "$model_basename" tar -czvf "$model_basename.tar.gz" pytorch_model.bin bert_config.json vocab.txt rm pytorch_model.bin bert_config.json vocab.txt else printf "\nModel '%s' already exists in directory 'pretrained_models'\n\n" "$model_basename.tar.gz" fi vocab="https://s3.amazonaws.com/models.huggingface.co/bert/$model/vocab.txt" model="pretrained_models/$model_basename.tar.gz" cd .. fi [ -d "$udify_config" ] && rm "$udify_config" cp "$udify_original_config" "$udify_config" sed -i '37s\.*\ "pretrained_model": "'"$model"'",\' "$udify_config" if [[ $model == *"uncased"* ]]; then sed -i '11s/.*/ "do_lowercase": true,/' "$udify_config" fi if [ "$device" == "cpu" ] then sed -i '86i\ "cuda_device": -1,' "$udify_config" fi sed -i '23i\ "bert_vocab": "'"$vocab"'",' "$udify_config" if [[ $model == *"uncased"* ]]; then sed -i '24i\ "do_lowercase": true,' "$udify_config" fi sed -i '11i\ "pretrained_model": "'"$vocab"'",' "$udify_config" [ ! -d "../models/$model_basename" ] && mkdir -p "../models/$model_basename" [ ! -d "../outputs/$model_basename" ] && mkdir -p "../outputs/$model_basename" [ ! -d "../results/$model_basename" ] && mkdir -p "../results/$model_basename" for (( iteration=1; iteration<="$iterations"; iteration++ )) do [ -d "$save_path" ] && rm -r "$save_path" python3 train.py --config "$udify_config" --name ro_rrt --replace_vocab nice_print "Evaluating Udify model on UD Romanian RRT..." model_path="$(find $save_path -name model.tar.gz)" cp "$model_path" "../models/$model_basename/udify_model_$iteration.tar.gz" python3 predict.py "$model_path" ../dataset-rrt/test.conllu "../outputs/$model_basename/predict_rrt_udify_$iteration.conllu" --device -1 results_path="$(find $save_path -name test_results.json)" cp "$results_path" "../results/$model_basename/udify_test_results_$iteration.json" done cd ..
<gh_stars>0 import { Factory } from 'miragejs'; const REQS = ['^0.1.0', '^2.1.3', '0.3.7', '~5.2.12']; export default Factory.extend({ default_features: i => i % 4 === 3, features: () => [], kind: i => (i % 3 === 0 ? 'dev' : 'normal'), optional: i => i % 4 !== 3, req: i => REQS[i % REQS.length], target: null, afterCreate(self) { if (!self.crateId) { throw new Error(`Missing \`crate\` relationship on \`dependency:${self.id}\``); } if (!self.versionId) { throw new Error(`Missing \`version\` relationship on \`dependency:${self.id}\``); } }, });
function squareArrayElements(my_arr) { return my_arr.map(num => num ** 2); } const result = squareArrayElements([2, 3, 4]); console.log(result);
def find_largest_sum(arr): max_ending_here = 0 max_so_far = 0 for i in arr: max_ending_here += i max_ending_here = max(0, max_ending_here) max_so_far = max(max_so_far, max_ending_here) return max_so_far arr = [-2,1,-3,4,-1,2,1,-5,4] print(find_largest_sum(arr))
import _ from 'lodash'; import Station from '../models/station'; import { workerlog, workerError } from './logger'; /* eslint-disable no-await-in-loop */ export const scanStations = async () => { const stations = await Station.find( { is_delete: false }, { station_name: 1, created_date: 1, playlist: 1, station_id: 1, _id: 0 }, ); if (!stations) return null; const removedStations = []; // Need to loop through every station one by one, not all // eslint-disable-next-line no-restricted-syntax for (const mStation of stations) { const result = await resolveStation(mStation.toObject()); removedStations.push(result); } // Remove all null result return removedStations.filter(id => id !== null); }; async function resolveStation(station) { if (!station.playlist) { // Delete the station if this station doesn't have a playlist workerlog("station that doesn't have playlist", station.station_id); return deleteStation(station.station_id); } const today = new Date().getTime(); let timestamp; if (station.playlist.length === 0) { timestamp = station.created_date + 7 * 24 * 60 * 60 * 1000; // day hour min sec msec workerlog('station inactive for 7 days', station.station_id); } else { const latestSong = _.maxBy(station.playlist, song => song.created_date); if (!latestSong) { return deleteStation(station.station_id); } timestamp = latestSong.created_date + 30 * 24 * 60 * 60 * 1000; // day hour min sec msec workerlog('station inactive for 30 days', station.station_id); } // If the station has no song and is created 7 or 30 days ago (base on above condition) // Kill the station if (timestamp < today) { return deleteStation(station.station_id); } return null; } async function deleteStation(stationId) { workerlog('start removing station with id: ', stationId); const stations = await Station.find( { is_delete: false }, { station_id: 1, _id: 1 }, ); // Remove if there is more than 10 stations if (stations.length > 10) return Station.remove({ station_id: stationId, }) .then(() => stationId) .catch(() => { workerError('remove station with id: ' + stationId); }); workerError('There are less than 10 stations, stop killing ' + stationId); return null; } export default { scanStations };
<filename>app/vocab/vocab.js 'use strict'; var vocabModule = angular.module('vocabModule', [ 'ngRoute', ]); vocabModule.config([ '$routeProvider', function ($routeProvider) { $routeProvider .when('/vocab', { templateUrl: 'app/vocab/vocab.html', controller: [ '$scope', function($scope){ subHeaderTitle.value = "Vocab"; subHeaderTimer.isShow = false; loadVocabList(); $scope.vocab = {}; var vocabIndex = 0; $scope.vocab = getVocab(vocabIndex); $scope.nextWord = function() { if(vocabIndex < 123) $scope.vocab = getVocab(++vocabIndex); }; $scope.prevWord = function() { if(vocabIndex > 0) $scope.vocab = getVocab(--vocabIndex); }; }], }); } ]);
""" Generate a Python script to scrub a list of emails for invalid entries """ import re def validate_email(email): # Create the regex email_regex = re.compile(r'[\w\.-]+@[\w\.-]+\.\w+') # Check whether the email is valid is_valid = email_regex.match(email) # Return the result return is_valid def scrub_emails(emails): # Initialize a list for valid emails valid_emails = [] # Loop over the emails and validate them for email in emails: # Check if the email is valid is_valid = validate_email(email) # If valid, append to the list if is_valid: valid_emails.append(email) # Return the list of valid emails return valid_emails
from typing import Any, Type def check_exception_chain(err: Exception, object_type: Type) -> bool: if isinstance(err, object_type): return True elif err.__cause__ is not None: return check_exception_chain(err.__cause__, object_type) elif err.__context__ is not None: return check_exception_chain(err.__context__, object_type) else: return False
//Function for caltulate the normalized values function CoverNormalize(c, dec) { //Check the number of decimals if(typeof dec === 'undefined') { //Set the number of decimals as 2 var dec = 2; } //Count the number of covers var covers = c[0].length - 2; //Check the number if(covers <= 1){ return c; } //Mean coverage var mean = []; //Size for each cover var size = []; //Initialize all the vars for(var j = 0; j < covers; j++) { //Initialize the mean cover mean[j] = 0; //Initialize the size size[j] = c.length; } //Read all the values for(var i = 0; i < c.length; i++) { //Read all the covers for(var j = 0; j < covers; j++) { //Check for zero if(c[i][j + 2] == 0) { //Remove this value size[j] = size[j] - 1; } //Increment the mean mean[j] = mean[j] + c[i][j + 2]; } } //For get the min mean value var min = mean[0]/size[0]; //Calculate the mean for(var j = 0; j < covers; j++) { //Calculate the mean mean[j] = mean[j]/size[j]; //Find the min value min = (mean[j] < min)? mean[j] : min; } //Calculate the increment values for(var j = 0; j < covers; j++) { mean[j] = min/mean[j]; } //Update all the values for(var i = 0; i < c.length; i++) { //Read all the covers for this line for(var j = 0; j < covers; j++) { //Update the value c[i][j + 2] = c[i][j + 2]*mean[j]; //Round c[i][j + 2] = c[i][j + 2].toFixed(dec); } } //Return the new matrix return c; } //Exports to node module.exports = CoverNormalize;
<reponame>ZiminGrigory/trik-studio /* Copyright 2007-2015 QReal Research Group * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "ev3Kit/blocks/ev3BlocksFactory.h" #include <kitBase/blocksBase/common/enginesStopBlock.h> #include <kitBase/blocksBase/common/clearEncoderBlock.h> #include <kitBase/blocksBase/common/waitForColorBlock.h> #include <kitBase/blocksBase/common/waitForColorIntensityBlock.h> #include <kitBase/blocksBase/common/waitForEncoderBlock.h> #include <kitBase/blocksBase/common/waitForLightSensorBlock.h> #include <kitBase/blocksBase/common/waitForSonarDistanceBlock.h> #include <kitBase/blocksBase/common/waitForSoundSensorBlock.h> #include <kitBase/blocksBase/common/waitForTouchSensorBlock.h> #include <kitBase/blocksBase/common/waitForGyroscopeSensorBlock.h> #include <kitBase/blocksBase/common/waitForButtonBlock.h> #include <kitBase/robotModel/robotParts/rangeSensor.h> #include "details/beepBlock.h" #include "details/playToneBlock.h" #include "details/ledBlock.h" #include "details/ev3EnginesForwardBlock.h" #include "details/ev3EnginesBackwardBlock.h" #include "details/drawPixelBlock.h" #include "details/drawLineBlock.h" #include "details/drawRectBlock.h" #include "details/drawCircleBlock.h" using namespace ev3::blocks; using namespace details; using namespace kitBase::blocksBase::common; Ev3BlocksFactory::Ev3BlocksFactory(const QStringList &interpretedModels) : mInterpretedModels(interpretedModels) { } qReal::interpretation::Block *Ev3BlocksFactory::produceBlock(const qReal::Id &element) { if (elementMetatypeIs(element, "Ev3Beep")) { return new BeepBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3PlayTone")) { return new PlayToneBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3EnginesForward")) { return new details::Ev3EnginesForwardBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3EnginesBackward")) { return new details::Ev3EnginesBackwardBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3EnginesStop")) { return new EnginesStopBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3ClearEncoder")) { return new ClearEncoderBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3Led")) { return new LedBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3WaitForTouchSensor")) { return new WaitForTouchSensorBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3WaitForSonarDistance")) { return new WaitForSonarDistanceBlock(mRobotModelManager->model() , kitBase::robotModel::DeviceInfo::create< kitBase::robotModel::robotParts::RangeSensor>()); } else if (elementMetatypeIs(element, "Ev3WaitForColor")) { return new WaitForColorBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3WaitForEncoder")) { return new WaitForEncoderBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3WaitForColorIntensity")) { return new WaitForColorIntensityBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3WaitForLight")) { return new WaitForLightSensorBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3WaitForSound")) { return new WaitForSoundSensorBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3WaitForGyroscope")) { return new WaitForGyroscopeSensorBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3WaitForButton")) { return new WaitForButtonBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3DrawPixel")) { return new DrawPixelBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3DrawLine")) { return new DrawLineBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3DrawCircle")) { return new DrawCircleBlock(mRobotModelManager->model()); } else if (elementMetatypeIs(element, "Ev3DrawRect")) { return new DrawRectBlock(mRobotModelManager->model()); } return nullptr; } qReal::IdList Ev3BlocksFactory::providedBlocks() const { return { id("Ev3Beep") , id("Ev3PlayTone") , id("Ev3EnginesForward") , id("Ev3EnginesBackward") , id("Ev3EnginesStop") , id("Ev3ClearEncoder") , id("Ev3CalibrateGyroscope") , id("Ev3Led") , id("Ev3SendMail") , id("Ev3StartCompassCalibration") , id("Ev3StopCompassCalibration") , id("Ev3ReadRGB") , id("Ev3WaitForTouchSensor") , id("Ev3WaitForSonarDistance") , id("Ev3WaitForLight") , id("Ev3WaitForColor") , id("Ev3WaitForColorIntensity") , id("Ev3WaitForEncoder") , id("Ev3WaitForButton") , id("Ev3WaitForSound") , id("Ev3WaitForGyroscope") , id("Ev3WaitForReceivingMail") , id("Ev3CalibrateWhiteLL") , id("Ev3CalibrateBlackLL") , id("Ev3CalibratePIDLL") , id("Ev3SleepLL") , id("Ev3WakeUpLL") , id("Ev3ReadAvrLL") , id("Ev3ReadAllLL") , id("Ev3ReadSteeringLL") , id("Ev3DrawPixel") , id("Ev3DrawLine") , id("Ev3DrawCircle") , id("Ev3DrawRect") }; } qReal::IdList Ev3BlocksFactory::blocksToDisable() const { qReal::IdList result; if (mRobotModelManager->model().name().contains("TwoD")) { result << id("Ev3WaitForSound") << id("Ev3SendMail") << id("Ev3WaitForReceivingMail") << id("Ev3WaitForGyroscope") << id("Ev3CalibrateGyroscope") << id("Ev3StartCompassCalibration") << id("Ev3StopCompassCalibration") << id("Ev3ReadRGB") << id("Ev3CalibrateWhiteLL") << id("Ev3CalibrateBlackLL") << id("Ev3CalibratePIDLL") << id("Ev3SleepLL") << id("Ev3WakeUpLL") << id("Ev3ReadAvrLL") << id("Ev3ReadAllLL") << id("Ev3ReadSteeringLL") << id("Ev3WaitForGyroscope") << id("Ev3WaitForReceivingMail") ; } else { if (!mInterpretedModels.contains(mRobotModelManager->model().robotId())) { result << id("Join") << id("SendMessageThreads") << id("ReceiveMessageThreads") << id("KillThread"); } } return result; }
#!/usr/bin/env bash function print_usage { echo '' echo 'CoreCLR test runner script.' echo '' echo 'Typical command line:' echo '' echo 'src/tests/run.sh <arch> <configurations>' echo '' echo 'Optional arguments:' echo ' --testRootDir=<path> : Root directory of the test build (e.g. runtime/artifacts/tests/Windows_NT.x64.Debug).' echo ' --testNativeBinDir=<path> : Directory of the native CoreCLR test build (e.g. runtime/artifacts/obj/Linux.x64.Debug/tests).' echo ' --coreOverlayDir=<path> : Directory containing core binaries and test dependencies.' echo ' --coreClrBinDir=<path> : Directory of the CoreCLR build (e.g. runtime/artifacts/bin/coreclr/Linux.x64.Debug).' echo ' --build-overlay-only : Build coreoverlay only, and skip running tests.' echo ' --disableEventLogging : Disable the events logged by both VM and Managed Code' echo ' --sequential : Run tests sequentially (default is to run in parallel).' echo ' -v, --verbose : Show output from each test.' echo ' -h|--help : Show usage information.' echo ' --useServerGC : Enable server GC for this test run' echo ' --test-env : Script to set environment variables for tests' echo ' --crossgen : Precompiles the framework managed assemblies' echo ' --runcrossgentests : Runs the ready to run tests' echo ' --runcrossgen2tests : Runs the ready to run tests compiled with Crossgen2' echo ' --jitstress=<n> : Runs the tests with COMPlus_JitStress=n' echo ' --jitstressregs=<n> : Runs the tests with COMPlus_JitStressRegs=n' echo ' --jitminopts : Runs the tests with COMPlus_JITMinOpts=1' echo ' --jitforcerelocs : Runs the tests with COMPlus_ForceRelocs=1' echo ' --jitdisasm : Runs jit-dasm on the tests' echo ' --gcstresslevel=<n> : Runs the tests with COMPlus_GCStress=n' echo ' 0: None 1: GC on all allocs and '"'easy'"' places' echo ' 2: GC on transitions to preemptive GC 4: GC on every allowable JITed instr' echo ' 8: GC on every allowable NGEN instr 16: GC only on a unique stack trace' echo ' --gcname=<n> : Runs the tests with COMPlus_GCName=n' echo ' --long-gc : Runs the long GC tests' echo ' --ilasmroundtrip : Runs ilasm round trip on the tests' echo ' --gcsimulator : Runs the GCSimulator tests' echo ' --tieredcompilation : Runs the tests with COMPlus_TieredCompilation=1' echo ' --link <ILlink> : Runs the tests after linking via ILlink' echo ' --xunitOutputPath=<path> : Create xUnit XML report at the specifed path (default: <test root>/coreclrtests.xml)' echo ' --printLastResultsOnly : Print the results of the last run' echo ' --runincontext : Run each tests in an unloadable AssemblyLoadContext' } function set_up_core_dump_generation { # We will only enable dump generation here if we're on Mac or Linux if [[ ! ( "$(uname -s)" == "Darwin" || "$(uname -s)" == "Linux" ) ]]; then return fi # We won't enable dump generation on OS X/macOS if the machine hasn't been # configured with the kern.corefile pattern we expect. if [[ ( "$(uname -s)" == "Darwin" && "$(sysctl -n kern.corefile)" != "core.%P" ) ]]; then echo "WARNING: Core dump generation not being enabled due to unexpected kern.corefile value." return fi # Allow dump generation ulimit -c unlimited if [ "$(uname -s)" == "Linux" ]; then if [ -e /proc/self/coredump_filter ]; then # Include memory in private and shared file-backed mappings in the dump. # This ensures that we can see disassembly from our shared libraries when # inspecting the contents of the dump. See 'man core' for details. echo 0x3F > /proc/self/coredump_filter fi fi } function check_cpu_architecture { local CPUName=$(uname -m) local __arch= if [[ "$(uname -s)" == "SunOS" ]]; then CPUName=$(isainfo -n) fi case $CPUName in i686) __arch=x86 ;; amd64|x86_64) __arch=x64 ;; armv7l) __arch=arm ;; aarch64) __arch=arm64 ;; *) echo "Unknown CPU $CPUName detected, configuring as if for x64" __arch=x64 ;; esac echo "$__arch" } ################################################################################ # Handle Arguments ################################################################################ ARCH=$(check_cpu_architecture) echo "Running on CPU- $ARCH" # Exit code constants readonly EXIT_CODE_SUCCESS=0 # Script ran normally. readonly EXIT_CODE_EXCEPTION=1 # Script exited because something exceptional happened (e.g. bad arguments, Ctrl-C interrupt). readonly EXIT_CODE_TEST_FAILURE=2 # Script completed successfully, but one or more tests failed. # Argument variables buildArch=$ARCH buildConfiguration="Debug" testRootDir= testNativeBinDir= coreOverlayDir= coreClrBinDir= mscorlibDir= coreClrObjs= coverageOutputDir= testEnv= playlistFile= showTime= noLFConversion= gcsimulator= longgc= limitedCoreDumps= illinker= ((disableEventLogging = 0)) ((serverGC = 0)) # Handle arguments verbose=0 doCrossgen=0 jitdisasm=0 ilasmroundtrip= printLastResultsOnly= runSequential=0 runincontext=0 for i in "$@" do case $i in -h|--help) print_usage exit $EXIT_CODE_SUCCESS ;; -v|--verbose) verbose=1 ;; x64) buildArch="x64" ;; x86) buildArch="x86" ;; arm) buildArch="arm" ;; arm64) buildArch="arm64" ;; wasm) buildArch="wasm" ;; debug|Debug) buildConfiguration="Debug" ;; checked|Checked) buildConfiguration="Checked" ;; release|Release) buildConfiguration="Release" ;; --printLastResultsOnly) printLastResultsOnly=1 ;; --crossgen) doCrossgen=1 ;; --jitstress=*) export COMPlus_JitStress=${i#*=} ;; --jitstressregs=*) export COMPlus_JitStressRegs=${i#*=} ;; --jitminopts) export COMPlus_JITMinOpts=1 ;; --copyNativeTestBin) export copyNativeTestBin=1 ;; --jitforcerelocs) export COMPlus_ForceRelocs=1 ;; --link=*) export ILLINK=${i#*=} export DoLink=true ;; --tieredcompilation) export COMPlus_TieredCompilation=1 ;; --jitdisasm) jitdisasm=1 ;; --ilasmroundtrip) ((ilasmroundtrip = 1)) ;; --testRootDir=*) testRootDir=${i#*=} ;; --testNativeBinDir=*) testNativeBinDir=${i#*=} ;; --coreOverlayDir=*) coreOverlayDir=${i#*=} ;; --coreClrBinDir=*) coreClrBinDir=${i#*=} ;; --mscorlibDir=*) mscorlibDir=${i#*=} ;; --testDir=*) testDirectories[${#testDirectories[@]}]=${i#*=} ;; --testDirFile=*) set_test_directories "${i#*=}" ;; --runFailingTestsOnly) ((runFailingTestsOnly = 1)) ;; --disableEventLogging) ((disableEventLogging = 1)) ;; --runcrossgentests) export RunCrossGen=1 ;; --runcrossgen2tests) export RunCrossGen2=1 ;; --sequential) runSequential=1 ;; --useServerGC) ((serverGC = 1)) ;; --long-gc) ((longgc = 1)) ;; --gcsimulator) ((gcsimulator = 1)) ;; --playlist=*) playlistFile=${i#*=} ;; --coreclr-coverage) CoreClrCoverage=ON ;; --coreclr-objs=*) coreClrObjs=${i#*=} ;; --coverage-output-dir=*) coverageOutputDir=${i#*=} ;; --test-env=*) testEnv=${i#*=} ;; --gcstresslevel=*) export COMPlus_GCStress=${i#*=} ;; --gcname=*) export COMPlus_GCName=${i#*=} ;; --show-time) showTime=ON ;; --no-lf-conversion) noLFConversion=ON ;; --limitedDumpGeneration) limitedCoreDumps=ON ;; --xunitOutputPath=*) xunitOutputPath=${i#*=} ;; --runincontext) runincontext=1 ;; *) echo "Unknown switch: $i" print_usage exit $EXIT_CODE_SUCCESS ;; esac done ################################################################################ # Runtests ################################################################################ if ((disableEventLogging == 0)); then export COMPlus_EnableEventLog=1 fi export COMPlus_gcServer="$serverGC" ################################################################################ # Runtest.py ################################################################################ runtestPyArguments=("-arch" "${buildArch}" "-build_type" "${buildConfiguration}") scriptPath="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" repoRootDir=$scriptPath/../.. if [ -z "$testRootDir" ]; then echo "testRootDir and other existing arguments is no longer required. If the " echo "default location is incorrect or does not exist, please use " echo "--testRootDir to explicitly override the defaults." echo "" fi echo "Build Architecture : ${buildArch}" echo "Build Configuration : ${buildConfiguration}" if [ $buildArch = "wasm" ]; then runtestPyArguments+=("-os" "Browser") fi if [ ! -z "$testRootDir" ]; then runtestPyArguments+=("-test_location" "$testRootDir") echo "Test Location : ${testRootDir}" fi if [ ! -z "$coreClrBinDir" ]; then runtestPyArguments+=("-product_location" "$coreClrBinDir") echo "Product Location : ${coreClrBinDir}" fi if [ ! -z "$testNativeBinDir" ]; then runtestPyArguments+=("-test_native_bin_location" "$testNativeBinDir") echo "Test Native Bin Location : ${testNativeBinDir}" fi if [ ! -z "${testEnv}" ]; then runtestPyArguments+=("-test_env" "${testEnv}") echo "Test Env : ${testEnv}" fi echo "" if [ ! -z "$longgc" ]; then echo "Running Long GC tests" runtestPyArguments+=("--long_gc") fi if [ ! -z "$gcsimulator" ]; then echo "Running GC simulator tests" runtestPyArguments+=("--gcsimulator") fi if [[ ! "$jitdisasm" -eq 0 ]]; then echo "Running jit disasm" runtestPyArguments+=("--jitdisasm") fi if [ ! -z "$ilasmroundtrip" ]; then echo "Running Ilasm round trip" runtestPyArguments+=("--ilasmroundtrip") fi if (($verbose!=0)); then runtestPyArguments+=("--verbose") fi if [ ! "$runSequential" -eq 0 ]; then echo "Run tests sequentially." runtestPyArguments+=("--sequential") fi if [ ! -z "$printLastResultsOnly" ]; then runtestPyArguments+=("--analyze_results_only") fi if [ ! -z "$RunCrossGen" ]; then runtestPyArguments+=("--run_crossgen_tests") fi if [ ! -z "$RunCrossGen2" ]; then runtestPyArguments+=("--run_crossgen2_tests") fi if (($doCrossgen!=0)); then runtestPyArguments+=("--precompile_core_root") fi if [ "$limitedCoreDumps" == "ON" ]; then runtestPyArguments+=("--limited_core_dumps") fi if [[ ! "$runincontext" -eq 0 ]]; then echo "Running in an unloadable AssemblyLoadContext" runtestPyArguments+=("--run_in_context") fi # Default to python3 if it is installed __Python=python if command -v python3 &>/dev/null; then __Python=python3 fi # Run the tests using cross platform runtest.py echo "python $repoRootDir/src/coreclr/tests/runtest.py ${runtestPyArguments[@]}" $__Python "$repoRootDir/src/coreclr/tests/runtest.py" "${runtestPyArguments[@]}" exit "$?"
#!/bin/bash set -eou pipefail SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" # Build go code cd ${SCRIPT_DIR}/../ go build -o functions/azure-playground-generator chmod +x functions/azure-playground-generator echo "Build complete!"
#!/bin/bash set -euxo pipefail cd "$( dirname "${BASH_SOURCE[0]}" )/../.." make .bin/hydra make .bin/yq export PATH=.bin:$PATH export KRATOS_PUBLIC_URL=http://127.0.0.1:4433/ export KRATOS_BROWSER_URL=http://127.0.0.1:4433/ export KRATOS_ADMIN_URL=http://127.0.0.1:4434/ export KRATOS_UI_URL=http://127.0.0.1:4456/ export LOG_LEAK_SENSITIVE_VALUES=true export DEV_DISABLE_API_FLOW_ENFORCEMENT=true if [ -z ${TEST_DATABASE_POSTGRESQL+x} ]; then docker rm -f kratos_test_database_mysql kratos_test_database_postgres kratos_test_database_cockroach || true docker run --name kratos_test_database_mysql -p 3444:3306 -e MYSQL_ROOT_PASSWORD=secret -d mysql:5.7 docker run --name kratos_test_database_postgres -p 3445:5432 -e POSTGRES_PASSWORD=secret -e POSTGRES_DB=postgres -d postgres:9.6 postgres -c log_statement=all docker run --name kratos_test_database_cockroach -p 3446:26257 -d cockroachdb/cockroach:v20.2.4 start-single-node --insecure export TEST_DATABASE_MYSQL="mysql://root:secret@(127.0.0.1:3444)/mysql?parseTime=true&multiStatements=true" export TEST_DATABASE_POSTGRESQL="postgres://postgres:secret@127.0.0.1:3445/postgres?sslmode=disable" export TEST_DATABASE_COCKROACHDB="cockroach://root@127.0.0.1:3446/defaultdb?sslmode=disable" fi base=$(pwd) if [ -z ${NODE_UI_PATH+x} ]; then node_ui_dir="$(mktemp -d -t ci-XXXXXXXXXX)/kratos-selfservice-ui-node" git clone https://github.com/ory/kratos-selfservice-ui-node.git "$node_ui_dir" (cd "$node_ui_dir" && npm i && npm run build) else node_ui_dir="${NODE_UI_PATH}" fi if [ -z ${RN_UI_PATH+x} ]; then rn_ui_dir="$(mktemp -d -t ci-XXXXXXXXXX)/kratos-selfservice-ui-react-native" git clone https://github.com/ory/kratos-selfservice-ui-react-native.git "$rn_ui_dir" (cd "$rn_ui_dir" && npm i) else rn_ui_dir="${RN_UI_PATH}" fi (cd test/e2e/proxy; npm i) kratos=./test/e2e/.bin/kratos go build -tags sqlite -o $kratos . if [ -z ${CI+x} ]; then docker rm mailslurper hydra hydra-ui -f || true docker run --name mailslurper -p 4436:4436 -p 4437:4437 -p 1025:1025 oryd/mailslurper:latest-smtps > "${base}/test/e2e/mailslurper.e2e.log" 2>&1 & fi dev=no for i in "$@" do case $i in --dev) dev=yes shift # past argument=value ;; esac done run() { killall kratos || true killall node || true killall hydra || true killall hydra-login-consent || true # Check if any ports that we need are open already ! nc -zv 127.0.0.1 4434 ! nc -zv 127.0.0.1 4433 ! nc -zv 127.0.0.1 4446 ! nc -zv 127.0.0.1 4455 ! nc -zv 127.0.0.1 4456 ! nc -zv 127.0.0.1 4457 (cd "$rn_ui_dir"; WEB_PORT=4457 KRATOS_URL=http://127.0.0.1:4433 npm run web -- --non-interactive \ > "${base}/test/e2e/rn-profile-app.e2e.log" 2>&1 &) DSN=memory URLS_SELF_ISSUER=http://127.0.0.1:4444 \ LOG_LEVEL=trace \ URLS_LOGIN=http://127.0.0.1:4446/login \ URLS_CONSENT=http://127.0.0.1:4446/consent \ hydra serve all --dangerous-force-http > "${base}/test/e2e/hydra.e2e.log" 2>&1 & npm run wait-on -- -l -t 300000 http-get://127.0.0.1:4445/health/alive hydra clients delete \ --endpoint http://127.0.0.1:4445 \ kratos-client google-client github-client || true hydra clients create \ --endpoint http://127.0.0.1:4445 \ --id kratos-client \ --secret kratos-secret \ --grant-types authorization_code,refresh_token \ --response-types code,id_token \ --scope openid,offline \ --callbacks http://127.0.0.1:4455/self-service/methods/oidc/callback/hydra hydra clients create \ --endpoint http://127.0.0.1:4445 \ --id google-client \ --secret kratos-secret \ --grant-types authorization_code,refresh_token \ --response-types code,id_token \ --scope openid,offline \ --callbacks http://127.0.0.1:4455/self-service/methods/oidc/callback/google hydra clients create \ --endpoint http://127.0.0.1:4445 \ --id github-client \ --secret kratos-secret \ --grant-types authorization_code,refresh_token \ --response-types code,id_token \ --scope openid,offline \ --callbacks http://127.0.0.1:4455/self-service/methods/oidc/callback/github if [ -z ${NODE_UI_PATH+x} ]; then (cd "$node_ui_dir"; PORT=4456 SECURITY_MODE=cookie npm run serve \ > "${base}/test/e2e/secureapp.e2e.log" 2>&1 &) else (cd "$node_ui_dir"; PORT=4456 SECURITY_MODE=cookie npm run start \ > "${base}/test/e2e/secureapp.e2e.log" 2>&1 &) fi (cd test/e2e/proxy; PORT=4455 npm run start \ > "${base}/test/e2e/proxy.e2e.log" 2>&1 &) (cd test/e2e/hydra-login-consent; \ go build . && \ PORT=4446 HYDRA_ADMIN_URL=http://127.0.0.1:4445 ./hydra-login-consent > "${base}/test/e2e/hydra-ui.e2e.log" 2>&1 &) export DSN=${1} if [ "$DSN" != "memory" ]; then $kratos migrate sql -e --yes fi for profile in email mobile oidc recovery verification; do yq merge test/e2e/profiles/kratos.base.yml "test/e2e/profiles/${profile}/.kratos.yml" > test/e2e/kratos.${profile}.yml cp test/e2e/kratos.email.yml test/e2e/kratos.generated.yml done ($kratos serve --watch-courier --dev -c test/e2e/kratos.generated.yml > "${base}/test/e2e/kratos.e2e.log" 2>&1 &) npm run wait-on -- -l -t 300000 http-get://127.0.0.1:4434/health/ready \ http-get://127.0.0.1:4455/health \ http-get://127.0.0.1:4445/health/ready \ http-get://127.0.0.1:4446/ \ http-get://127.0.0.1:4456/health \ http-get://127.0.0.1:4457/ \ http-get://127.0.0.1:4437/mail if [[ $dev = "yes" ]]; then npm run test:watch -- --config integrationFolder="test/e2e/cypress/integration" else if [ -z ${CYPRESS_RECORD_KEY+x} ]; then npm run test -- --config integrationFolder="test/e2e/cypress/integration" else npm run test -- --record --config integrationFolder="test/e2e/cypress/integration" fi fi } usage() { echo $"This script runs the e2e tests. To run the tests just pick a database name: $0 <database> Supported databases are 'sqlite', 'mysql', 'postgres', 'cockroach': $0 sqlite $0 mysql $0 postgres $0 cockroach ... If you are using a database other than SQLite, you need to set an environment variable that points to it: export TEST_DATABASE_MYSQL=... export TEST_DATABASE_POSTGRESQL=... export TEST_DATABASE_COCKROACHDB=... $0 <database> The Makefile has a helper for that which uses Docker to start the databases: make test-resetdb source script/test-envs.sh $0 <database> To run e2e tests in dev mode (useful for writing them), run: $0 --dev <database> If you are making changes to the kratos-selfservice-ui-node project as well, point the 'NODE_UI_PATH' environment variable to the path where the kratos-selfservice-ui-node project is checked out: export NODE_UI_PATH=$HOME/workspace/kratos-selfservice-ui-node export RN_UI_PATH=$HOME/workspace/kratos-selfservice-ui-react-native $0 ..." } export TEST_DATABASE_SQLITE="sqlite:///$(mktemp -d -t ci-XXXXXXXXXX)/db.sqlite?_fk=true" export TEST_DATABASE_MEMOry="memory" case "$1" in sqlite) db="${TEST_DATABASE_SQLITE}" ;; mysql) db="${TEST_DATABASE_MYSQL}" ;; postgres) db="${TEST_DATABASE_POSTGRESQL}" ;; cockroach) db="${TEST_DATABASE_COCKROACHDB}" ;; *) usage exit 1 esac run "${db}"
require 'rhc/commands/base' module RHC::Commands class Snapshot < Base summary "Save the current state of your application locally" syntax "<action>" description <<-DESC Snapshots allow you to export the current state of your OpenShift application into an archive on your local system, and then to restore it later. The snapshot archive contains the Git repository, dumps of any attached databases, and any other information that the cartridges decide to export. WARNING: Both 'save' and 'restore' will stop the application and then restart after the operation completes. DESC alias_action :"app snapshot", :root_command => true default_action :help summary "Save a snapshot of your app to disk" syntax "<application> [--filepath FILE] [--ssh path_to_ssh_executable]" takes_application :argument => true option ["-f", "--filepath FILE"], "Local path to save tarball (default: ./$APPNAME.tar.gz)" option ["--deployment"], "Snapshot as a deployable file which can be deployed with 'rhc deploy'" option ["--ssh PATH"], "Full path to your SSH executable with additional options" alias_action :"app snapshot save", :root_command => true, :deprecated => true def save(app) rest_app = find_app raise RHC::DeploymentsNotSupportedException.new if options.deployment && !rest_app.supports?("DEPLOY") filename = options.filepath ? options.filepath : "#{rest_app.name}.tar.gz" save_snapshot(rest_app, filename, options.deployment, options.ssh) 0 end summary "Restores a previously saved snapshot" syntax "<application> [--filepath FILE] [--ssh path_to_ssh_executable]" takes_application :argument => true option ["-f", "--filepath FILE"], "Local path to restore tarball" option ["--ssh PATH"], "Full path to your SSH executable with additional options" alias_action :"app snapshot restore", :root_command => true, :deprecated => true def restore(app) rest_app = find_app filename = options.filepath ? options.filepath : "#{rest_app.name}.tar.gz" if File.exists? filename restore_snapshot(rest_app, filename, options.ssh) else raise RHC::SnapshotRestoreException.new "Archive not found: #{filename}" end 0 end protected include RHC::SSHHelpers end end
<filename>test/test_one_worker.js //Testing to see if I can get data from 1 worker... var path = require('path'), net = require('net'), assert = require('assert'); var fugue = require(path.join(__dirname, '..', 'lib', 'fugue.js')); var expected_data = 'here is some data'; server = net.createServer(function(conn) { conn.end(expected_data, 'ascii'); }); var port = 4001; exports.setup = function() { fugue.start(server, port, null, 1, {verbose: false} ); } exports.run = function(next) { var client = net.createConnection(port); var got_some_data = false; client.on('data', function(what) { got_some_data = true; assert.equal(what.toString('ascii'), expected_data); process.exit(); }); setTimeout(function() { assert.ok(got_some_data, "Couldn't get data from server"); if(next) next(); }, 3000); } exports.teardown = function() { fugue.stop(); }
import os import os.path as op import shutil def merge(src: str, dst: str) -> None: for file in os.listdir(src): f_path = op.join(src, file) if op.isfile(f_path): shutil.copy(f_path, dst) elif op.isdir(f_path): if file.startswith('.'): continue if file not in os.listdir(dst): os.makedirs(op.join(dst, file)) merge(f_path, op.join(dst, file))
import React from "react" import { useStaticQuery, graphql } from "gatsby" import Img from "gatsby-image" const Image = (props) => { const data = useStaticQuery(graphql` query { allFile { edges { node { base childImageSharp { fluid(maxWidth: 600) { ...GatsbyImageSharpFluid } } } } } } `) return ( <div> {data.allFile.edges.map(image => ( <Img fluid={image.node.childImageSharp.fluid} alt={image.node.base.split(".")[0]} // only use section of the file extension with the filename /> ))} </div> ) } export default Image
import React from 'react'; import { View,TouchableOpacity } from 'react-native' import Icon from 'react-native-vector-icons/FontAwesome'; export const BackButton = ({onPress}) => { return ( <TouchableOpacity style={{ alignItems:'center', justifyContent:'center', width:50, height:50, backgroundColor:'hsla(187, 100%, 52%, 0.5)', borderRadius:50, position: 'absolute', top:'4%', left:'4%' }} onPress={onPress} > <Icon name="arrow-left" size={15} color="#fff" /> </TouchableOpacity> ) }
The DataFrame contains the following data types: the 'Name' column is a string, the 'Age' column is an integer and the 'Salary' column is a floating point number.
<filename>c2d-core/src/test/java/info/u250/c2d/engine/C2dCamera.java package info.u250.c2d.engine; import com.badlogic.gdx.graphics.OrthographicCamera; /** * @author xjjdog */ public class C2dCamera extends OrthographicCamera { private float rotate; public float getRotate() { return rotate; } public void setRotate(float rotate) { this.rotate = rotate; } public C2dCamera(float width, float height) { super(width, height); this.position.set(width / 2, height / 2, 0); } public void resize(float width, float height) { this.viewportWidth = width; this.viewportHeight = height; this.position.set(width / 2, height / 2, 0); this.update(); } }
<gh_stars>0 const redis = require('redis') const redisClient = redis.createClient() const TOKEN_EXPIRE_TIME = 3600 * 10 function saveToRedis(token) { redisClient.set(token, 1) redisClient.expire(token, TOKEN_EXPIRE_TIME) } function getToken(headers) { if (headers && headers.authorization) { const auth = headers.authorization if (auth) { return auth } } return null } async function veryfiToken(ctx, next) { const token = getToken(ctx.headers) if (token) { redisClient.get(token, (err, reply) => { if (err) { console.log(err); ctx.status = 404 } if (!reply) { ctx.status = 404 } }) } await next() } function expireToken(headers) { const token = getToken(headers) if (token) { return new Promise((resolve, reject) => { redisClient.del(token, (err) => { if (err) { reject(404) } else { resolve(200) } }) }) } return null } module.exports = { saveToRedis, getToken, veryfiToken, expireToken, }
public class RoomInfo { // Define the properties of room information } public class ResultMessage { // Define the properties of the result message } public class Room { public ResultMessage updateRoomInfo(List<RoomInfo> roomInfoList) { // Implement the logic to update room information // Return a ResultMessage indicating the success or failure of the update operation } } public class HotelManagementSystem { private Room room; public HotelManagementSystem(Room room) { this.room = room; } public ResultMessage updateRoomInfo(List<RoomInfo> roomInfoList) { // Delegate the update operation to the "room" component ResultMessage resultMessage = room.updateRoomInfo(roomInfoList); // Handle the "ResultMessage" returned by the "room" component if (resultMessage.isSuccess()) { // Perform any additional actions upon successful update } else { // Handle the failure scenario } // Return the "ResultMessage" from this function return resultMessage; } }
import pytest def resolve_json_id(json_id, default_value): json_data = { "valid-id-1": "Value1", "valid-id-2": "Value2", "valid-id-3": "Value3" } if json_id.isalnum() and '-' in json_id and not json_id.startswith('-') and not json_id.endswith('-'): return json_data.get(json_id, default_value) else: return default_value def test_resolve_json_id(): assert resolve_json_id("valid-id-1", "Default") == "Value1" # Valid identifier assert resolve_json_id("invalid_id", "Default") == "Default" # Invalid identifier assert resolve_json_id("valid-id-3", "Default") == "Value3" # Valid identifier assert resolve_json_id("this-is-invalid", "Default") == "Default" # Invalid identifier
<gh_stars>1-10 package mezz.jei.config; import net.minecraftforge.fml.common.eventhandler.Event; public class OverlayToggleEvent extends Event { private final boolean overlayEnabled; public OverlayToggleEvent(boolean overlayEnabled) { this.overlayEnabled = overlayEnabled; } public boolean isOverlayEnabled() { return overlayEnabled; } }
from enum import Enum class FontHeight(Enum): SMALL = 1 MEDIUM = 2 LARGE = 3 def get_pixels(self): if self == FontHeight.SMALL: return 10 elif self == FontHeight.MEDIUM: return 15 elif self == FontHeight.LARGE: return 20 else: raise ValueError("Invalid font height") # Test the implementation print(FontHeight.SMALL.get_pixels()) # Output: 10 print(FontHeight.MEDIUM.get_pixels()) # Output: 15 print(FontHeight.LARGE.get_pixels()) # Output: 20
<reponame>saucelabs/travis-core class AddExtraColumnsToOrganizations < ActiveRecord::Migration def change add_column :organizations, :avatar_url, :string add_column :organizations, :location, :string add_column :organizations, :email, :string add_column :organizations, :company, :string add_column :organizations, :homepage, :string end end
import React, { Component } from 'react' import PropTypes from 'prop-types'; import {Alert, Row, Col, Container,Table} from 'reactstrap' import { Line} from 'react-chartjs-2'; const mainChartOpts = { maintainAspectRatio: false, legend: { display: false, labels:{ fontSize: 0 } }, scales: { xAxes: [{ gridLines: { drawOnChartArea: true, } }], yAxes: [{ ticks: { beginAtZero: true, maxTicksLimit: 11, stepSize:0.5, max: 5.5 } }] }, elements: { point: { radius: 1, hitRadius: 10, hoverRadius: 4, hoverBorderWidth: 3, }, line: { tension: 0, // disables bezier curves } } } class Survey extends Component { render() { const {data} = this.props var title = []; var MeanDataSet = []; {data !==null && data.map(function(e,i){ title.push('ข้อ.'+(i+1)) MeanDataSet.push(e.Mean) })} const Point = { labels: title, datasets: [ { label: 'ค่าเฉลี่ย', pointStyle:'rect', hitRadius:3, backgroundColor: 'rgba(255, 0, 0, 0.3)', borderColor: '#ee0a0a', pointHoverBackgroundColor: '#ee0a0a', borderWidth: 2, data: MeanDataSet }] } return ( <div className="container"> <Row style={{paddingTop:'1rem'}}> <Col> <span>จำนวนผู้ร่วมตอบแบบประเมิณ {data!==null &&data.lenght>0 &&data[0].COUNT}</span> </Col> </Row> <div className="chart-wrapper" style={{height: 300 + 'px', marginTop: 40 + 'px'}}> <Line data={Point} options={mainChartOpts} height={300}/> </div> <Row style={{paddingTop:'1rem'}}> <Col> <Table striped> <thead> <tr> <th className="text-algn-center">ข้อ</th> <th className="text-algn-left">เรื่อง</th> <th>ค่าเฉลี่ย</th> <th>S.D.</th> </tr> </thead> <tbody> {data!==null && data.map((e,i)=>{ return ( <tr key={"row_"+i}> <td className="text-algn-center">ข้อ.{i+1}</td> <td>{e.title}</td> <td>{(e.Mean)}</td> <td>{(Math.sqrt(e.Error))}</td> </tr> ) })} </tbody> </Table> </Col> </Row> </div> ) } } export default Survey;
package com.ideator; import android.content.Context; import android.content.Intent; import android.os.Bundle; import android.support.annotation.Nullable; import android.support.v4.text.TextUtilsCompat; import android.support.v7.app.AppCompatActivity; import android.text.TextUtils; import android.view.View; import android.widget.Button; import android.widget.EditText; /** * @author <NAME> (<EMAIL>) * @since {25/05/2016} */ public class SignInActivity extends AppCompatActivity { public static Intent newIntent(Context context) { return new Intent(context, SignInActivity.class); } private EditText mEditEmail; private EditText mEditPassword; private Button mButtonSignIn; @Override protected void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_sign_in); mEditEmail = (EditText)findViewById(R.id.edit_email); mEditPassword = (EditText)findViewById(R.id.edit_password); mButtonSignIn = (Button)findViewById(R.id.button_sign_in); mButtonSignIn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { attemptSignIn(); } }); } private void attemptSignIn() { boolean signInFailed = false; if(TextUtils.isEmpty(mEditEmail.getText())) { mEditEmail.setError(getString(R.string.error_need_to_fill_in)); signInFailed = true; } if(TextUtils.isEmpty(mEditPassword.getText())) { mEditEmail.setError(getString(R.string.error_need_to_fill_in)); signInFailed = true; } if(!signInFailed) { signIn(mEditEmail.getText().toString(), mEditPassword.getText().toString()); } } private void signIn(String username, String password) { // TODO: Do actual login Intent intentToMain = MainActivity.newIntent(this); startActivity(intentToMain); finishAffinity(); } }
<reponame>Hconk/AutoKernel #include <stdio.h> #include <math.h> extern "C" { #include "sys_port.h" #include "tengine_errno.h" #include "tengine_log.h" #include "vector.h" #include "tengine_ir.h" #include "tengine_op.h" #include "../../dev/cpu/cpu_node_ops.h" // include op param header file here, locate in src/op/ #include "fc_param.h" } #include "HalideBuffer.h" // include the c_header file here #include "halide_fc.h" void RegisterAutoKernelFc();
<filename>molicode-common/src/main/java/com/shareyi/molicode/common/constants/CacheKeyConstant.java<gh_stars>10-100 package com.shareyi.molicode.common.constants; import com.shareyi.molicode.common.vo.git.GitRepoVo; import org.apache.commons.lang3.StringUtils; /** * 缓存key常量 * * @author david * @date 2019/7/5 */ public class CacheKeyConstant { /** * 锁定时间 */ public static final long LOCK_TIME_MSEC = 10 * 60 * 1000; /** * 获取缓存的用户信息 * * @param userName * @return */ public static String getAcUserCacheKey(String userName) { return "ac_user_" + StringUtils.lowerCase(userName.trim()); } /** * 失败次数key * * @param userName * @return */ public static String getLoginFailureKey(String userName) { return "login_fail_" + StringUtils.lowerCase(userName.trim()); } /** * 获取git 锁定key * * @param gitRepoVo * @return */ public static String buildGitRepoLock(GitRepoVo gitRepoVo) { return "lock_git_" + gitRepoVo.getGitUrl() + "_" + gitRepoVo.getBranchName(); } }
package internal import ( "errors" "net/http" "github.com/DisgoOrg/log" "github.com/DisgoOrg/disgo/api" ) // NewBuilder returns a new api.DisgoBuilder instance func NewBuilder(token string) api.DisgoBuilder { return &DisgoBuilderImpl{ token: token, cacheFlags: api.CacheFlagsDefault, } } // DisgoBuilderImpl implementation of the api.DisgoBuilder interface type DisgoBuilderImpl struct { logger log.Logger token string httpClient *http.Client gateway api.Gateway restClient api.RestClient audioController api.AudioController cache api.Cache memberCachePolicy api.MemberCachePolicy messageCachePolicy api.MessageCachePolicy cacheFlags api.CacheFlags gatewayIntents api.GatewayIntents rawGatewayEventsEnabled bool entityBuilder api.EntityBuilder eventManager api.EventManager voiceDispatchInterceptor api.VoiceDispatchInterceptor webhookServer api.WebhookServer listenURL *string listenPort *int publicKey *string eventListeners []api.EventListener } // SetLogger sets logger implementation disgo should use as an example logrus func (b *DisgoBuilderImpl) SetLogger(logger log.Logger) api.DisgoBuilder { b.logger = logger return b } // SetToken sets the BotToken to connect to discord func (b *DisgoBuilderImpl) SetToken(token string) api.DisgoBuilder { b.token = token return b } // SetHTTPClient sets the http.Client the api.RestClient uses func (b *DisgoBuilderImpl) SetHTTPClient(httpClient *http.Client) api.DisgoBuilder { b.httpClient = httpClient return b } // SetGatewayIntents sets the api.GatewayIntents to connect to discord func (b *DisgoBuilderImpl) SetGatewayIntents(gatewayIntents ...api.GatewayIntents) api.DisgoBuilder { b.gatewayIntents = api.GatewayIntentsNone.Add(gatewayIntents...) return b } // SetRawGatewayEventsEnabled enables/disables the events.RawGatewayEvent func (b *DisgoBuilderImpl) SetRawGatewayEventsEnabled(enabled bool) api.DisgoBuilder { b.rawGatewayEventsEnabled = enabled return b } // SetEntityBuilder lets you inject your own api.EntityBuilder func (b *DisgoBuilderImpl) SetEntityBuilder(entityBuilder api.EntityBuilder) api.DisgoBuilder { b.entityBuilder = entityBuilder return b } // SetEventManager lets you inject your own api.EventManager func (b *DisgoBuilderImpl) SetEventManager(eventManager api.EventManager) api.DisgoBuilder { b.eventManager = eventManager return b } // AddEventListeners lets you add an api.EventListener to your api.EventManager func (b *DisgoBuilderImpl) AddEventListeners(eventListeners ...api.EventListener) api.DisgoBuilder { for _, eventListener := range eventListeners { b.eventListeners = append(b.eventListeners, eventListener) } return b } // SetVoiceDispatchInterceptor sets the api.VoiceDispatchInterceptor func (b *DisgoBuilderImpl) SetVoiceDispatchInterceptor(voiceDispatchInterceptor api.VoiceDispatchInterceptor) api.DisgoBuilder { b.voiceDispatchInterceptor = voiceDispatchInterceptor return b } // SetWebhookServer lets you inject your own api.EventManager func (b *DisgoBuilderImpl) SetWebhookServer(webhookServer api.WebhookServer) api.DisgoBuilder { b.webhookServer = webhookServer return b } // SetWebhookServerProperties sets the default api.WebhookServer properties func (b *DisgoBuilderImpl) SetWebhookServerProperties(listenURL string, listenPort int, publicKey string) api.DisgoBuilder { b.listenURL = &listenURL b.listenPort = &listenPort b.publicKey = &publicKey return b } // SetRestClient lets you inject your own api.RestClient func (b *DisgoBuilderImpl) SetRestClient(restClient api.RestClient) api.DisgoBuilder { b.restClient = restClient return b } // SetAudioController lets you inject your own api.AudioController func (b *DisgoBuilderImpl) SetAudioController(audioController api.AudioController) api.DisgoBuilder { b.audioController = audioController return b } // SetCache lets you inject your own api.Cache func (b *DisgoBuilderImpl) SetCache(cache api.Cache) api.DisgoBuilder { b.cache = cache return b } // SetMemberCachePolicy lets you set your own api.MemberCachePolicy func (b *DisgoBuilderImpl) SetMemberCachePolicy(memberCachePolicy api.MemberCachePolicy) api.DisgoBuilder { b.memberCachePolicy = memberCachePolicy return b } // SetMessageCachePolicy lets you set your own api.MessageCachePolicy func (b *DisgoBuilderImpl) SetMessageCachePolicy(messageCachePolicy api.MessageCachePolicy) api.DisgoBuilder { b.messageCachePolicy = messageCachePolicy return b } // SetCacheFlags lets you set the api.CacheFlags func (b *DisgoBuilderImpl) SetCacheFlags(cacheFlags api.CacheFlags) api.DisgoBuilder { b.cacheFlags = cacheFlags return b } // EnableCacheFlags lets you enable certain api.CacheFlags func (b *DisgoBuilderImpl) EnableCacheFlags(cacheFlags api.CacheFlags) api.DisgoBuilder { b.cacheFlags.Add(cacheFlags) return b } // DisableCacheFlags lets you disable certain api.CacheFlags func (b *DisgoBuilderImpl) DisableCacheFlags(cacheFlags api.CacheFlags) api.DisgoBuilder { b.cacheFlags.Remove(cacheFlags) return b } // SetGateway lets you inject your own api.Gateway func (b *DisgoBuilderImpl) SetGateway(gateway api.Gateway) api.DisgoBuilder { b.gateway = gateway return b } // Build builds your api.Disgo instance func (b *DisgoBuilderImpl) Build() (api.Disgo, error) { disgo := &DisgoImpl{ logger: b.logger, rawGatewayEventsEnabled: b.rawGatewayEventsEnabled, } if b.token == "" { return nil, errors.New("please specify the BotToken") } disgo.botToken = b.token id, err := IDFromToken(disgo.botToken) if err != nil { disgo.Logger().Errorf("error while getting application id from BotToken: %s", err) return nil, err } disgo.selfUserID = *id if b.gateway == nil { b.gateway = newGatewayImpl(disgo) } disgo.gateway = b.gateway if b.httpClient == nil { b.httpClient = http.DefaultClient } if b.restClient == nil { b.restClient = newRestClientImpl(disgo, b.httpClient) } disgo.restClient = b.restClient if b.audioController == nil { b.audioController = newAudioControllerImpl(disgo) } disgo.audioController = b.audioController disgo.gatewayIntents = b.gatewayIntents if b.entityBuilder == nil { b.entityBuilder = newEntityBuilderImpl(disgo) } disgo.entityBuilder = b.entityBuilder if b.eventManager == nil { b.eventManager = newEventManagerImpl(disgo, b.eventListeners) } disgo.eventManager = b.eventManager disgo.voiceDispatchInterceptor = b.voiceDispatchInterceptor if b.webhookServer == nil && b.listenURL != nil && b.listenPort != nil && b.publicKey != nil { b.webhookServer = newWebhookServerImpl(disgo, *b.listenURL, *b.listenPort, *b.publicKey) } disgo.webhookServer = b.webhookServer if b.cache == nil { if b.memberCachePolicy == nil { b.memberCachePolicy = api.MemberCachePolicyDefault } if b.messageCachePolicy == nil { b.messageCachePolicy = api.MessageCachePolicyDefault } b.cache = newCacheImpl(disgo, b.memberCachePolicy, b.messageCachePolicy, b.cacheFlags) } disgo.cache = b.cache return disgo, nil }
from typing import List def power_off(button_presses: List[int]) -> bool: state = True # True represents ON, False represents OFF consecutive_presses = 0 for press in button_presses: if press == 1: # Button pressed consecutive_presses += 1 if consecutive_presses == 3: # Three consecutive presses state = True # Reset to ON state consecutive_presses = 0 elif consecutive_presses == 2: # Two consecutive presses continue # No state change else: # Single press state = not state # Toggle state else: # Invalid button press return False return not state # Return True if the device successfully powered off (state is OFF)
#!/bin/sh echo "Change directory to MNN_SOURCE_ROOT/project/ios before running this script" echo "Current PWD: ${PWD}" rm -rf ios_64 mkdir ios_64 cd ios_64 cmake -G Xcode ../../../ \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_TOOLCHAIN_FILE=../../../cmake/ios.toolchain.cmake \ -DMNN_METAL=ON \ -DARCHS="arm64" \ -DENABLE_BITCODE=0 \ -DENABLE_ARC=1 \ -DMNN_AAPL_FMWK=1 \ -DMNN_SEP_BUILD=0 echo "Building AArch64" xcodebuild -project MNN.xcodeproj \ -configuration Release \ -target MNN \ -sdk iphoneos \ -quiet \ ONLY_ACTIVE_ARCH=NO echo "End Building AArch64" cd ../ rm -rf ios_32 mkdir ios_32 cd ios_32 cmake -G Xcode ../../../ \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_TOOLCHAIN_FILE=../../../cmake/ios.toolchain.cmake \ -DMNN_METAL=ON \ -DARCHS="armv7;armv7s" \ -DENABLE_BITCODE=0 \ -DMNN_AAPL_FMWK=1 \ -DMNN_SEP_BUILD=0 echo "Building AArch32" xcodebuild -project MNN.xcodeproj \ -configuration Release \ -target MNN \ -sdk iphoneos \ -quiet \ ONLY_ACTIVE_ARCH=NO echo "End Building AArch32" cd ../ find ios_32 -name "MNN*framework" find ios_64 -name "MNN*framework" mv ios_32/Release-iphoneos/MNN.framework/MNN ios_32/Release-iphoneos/MNN.framework/MNN_32 echo "Creating Fat Binary" lipo -create \ ios_32/Release-iphoneos/MNN.framework/MNN_32 \ ios_64/Release-iphoneos/MNN.framework/MNN \ -output ios_32/Release-iphoneos/MNN.framework/MNN rm ios_32/Release-iphoneos/MNN.framework/MNN_32 echo "Patching Framework Headers" rm -rf ./MNN.framework cp -R ios_32/Release-iphoneos/MNN.framework ./MNN.framework cp -R ../../include/MNN/expr ./MNN.framework/Headers/expr
package netty.guide; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.nio.channels.SelectionKey; import java.nio.channels.Selector; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.Date; import java.util.Iterator; public class TimerServer implements Runnable { private Selector selector; private ServerSocketChannel serverChannel; public TimerServer(int port) { try { selector = Selector.open(); serverChannel = ServerSocketChannel.open(); serverChannel.configureBlocking(false); // 非阻塞 //serverChannel.socket().bind(new InetSocketAddress(InetAddress.getLocalHost().getHostAddress(), port), 1024); serverChannel.socket().bind(new InetSocketAddress(port), 1024); serverChannel.register(selector, SelectionKey.OP_ACCEPT); } catch (IOException e) { e.printStackTrace(); System.exit(1); } } @Override public void run() { try { while (true) { try { selector.select(1000); // 1 seconds timeout for (Iterator<SelectionKey> iter = selector.selectedKeys().iterator(); iter.hasNext(); iter.remove()) { handleInput(iter.next()); } } catch (IOException e) { e.printStackTrace(); } } } catch (Exception e1) { e1.printStackTrace(); if (selector != null) try { selector.close(); } catch (IOException e) { e.printStackTrace(); } } } private void handleInput(SelectionKey key) throws IOException { if (!key.isValid()) return; try { if (key.isAcceptable()) { ServerSocketChannel ssc = (ServerSocketChannel) key.channel(); SocketChannel sc = ssc.accept(); sc.configureBlocking(false); sc.register(selector, SelectionKey.OP_READ); } if (key.isReadable()) { SocketChannel sc = (SocketChannel) key.channel(); ByteBuffer buff = ByteBuffer.allocate(1024); int count = sc.read(buff); if (count > 0) { buff.flip(); byte[] bytes = new byte[buff.remaining()]; buff.get(bytes); String receive = new String(bytes, "UTF-8"); System.out.println("time server receive data: " + receive); String resp = "time".equalsIgnoreCase(receive) ? new Date().toString() : "unknow"; // response byte[] respBytes = resp.getBytes(); ByteBuffer write = ByteBuffer.allocate(respBytes.length); write.put(respBytes); write.flip(); sc.write(write); } else if (count < 0) { key.cancel(); sc.close(); } else { // ignored } } } catch (IOException e) { //e.printStackTrace(); if (key != null) { key.cancel(); if (key.channel() != null) { key.channel().close(); } } } } public static void main(String[] args) { new Thread(new TimerServer(1234), "time-server-01").start(); } }
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest from webkitpy.common.system.filesystem_mock import MockFileSystem from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.common.system.workspace import Workspace from webkitpy.common.system.executive_mock import MockExecutive class WorkspaceTest(unittest.TestCase): def test_find_unused_filename(self): filesystem = MockFileSystem({ "dir/foo.jpg": "", "dir/foo-1.jpg": "", "dir/foo-2.jpg": "", }) workspace = Workspace(filesystem, None) self.assertEqual(workspace.find_unused_filename("bar", "bar", "bar"), "bar/bar.bar") self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg", search_limit=1), None) self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg", search_limit=2), None) self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg"), "dir/foo-3.jpg") def test_create_zip(self): workspace = Workspace(None, MockExecutive(should_log=True)) expected_logs = "MOCK run_command: ['zip', '-9', '-r', '/zip/path', '.'], cwd=/source/path\n" class MockZipFile(object): def __init__(self, path): self.filename = path archive = OutputCapture().assert_outputs(self, workspace.create_zip, ["/zip/path", "/source/path", MockZipFile], expected_logs=expected_logs) self.assertEqual(archive.filename, "/zip/path") def test_create_zip_exception(self): workspace = Workspace(None, MockExecutive(should_log=True, should_throw=True)) expected_logs = """MOCK run_command: ['zip', '-9', '-r', '/zip/path', '.'], cwd=/source/path Workspace.create_zip failed in /source/path: MOCK ScriptError output: MOCK output of child process """ class MockZipFile(object): def __init__(self, path): self.filename = path archive = OutputCapture().assert_outputs(self, workspace.create_zip, ["/zip/path", "/source/path", MockZipFile], expected_logs=expected_logs) self.assertIsNone(archive)
#!/usr/bin/env bash python setup.py sdist bdist_wheel twine upload --repository-url https://test.pypi.org/legacy/ dist/* pip install --index-url https://test.pypi.org/simple/ drz rm -rf dist
#!/bin/bash # VectorMap output directory VECTOR_MAP_DIR=/tmp/test_lanelet_aisan_converter/aisan_vector_map # Wait until CSV files are generated while : do if [ -f "$VECTOR_MAP_DIR/.completion_notice" ]; then rm $VECTOR_MAP_DIR/.completion_notice break fi done # Collect CSV file names ARGS="" for file in `find $VECTOR_MAP_DIR -maxdepth 1 -type f -name "*.csv"`; do ARGS="$ARGS $file" done # Load CSV files and publish VectorMap messages rosrun map_file vector_map_loader $ARGS
<reponame>pavelsevecek/OpenSPH<filename>core/timestepping/TimeStepping.h #pragma once /// \file TimeStepping.h /// \brief Algorithms for temporal evolution of the physical model. /// \author <NAME> (sevecek at s<EMAIL>) /// \date 2016-2021 #include "common/ForwardDecl.h" #include "objects/containers/Array.h" #include "objects/geometry/Vector.h" #include "objects/wrappers/SharedPtr.h" NAMESPACE_SPH_BEGIN class IScheduler; /// \brief Base object providing integration in time for all quantities. /// /// The integration is done by iterating with discrete time step, using \ref step method. All derived /// objects must implement \ref stepParticles function, which shall iterate over all independant quantities /// and advance their values using temporal derivatives computed by \ref ISolver object passed in argument of /// the method. The \ref step function then calls the user-defined \ref stepParticles while also doing more /// legwork, such as saving statistics and computing new value of time step. Function \ref stepParticles can /// also save statistics specific to the implementation, using provided \ref Statistics object, but it shall /// not compute the time step value. To control time step, see \ref TimeStepCriterion and derived classes. /// /// The time-stepping object must take care of clearing derivatives, as there can be values from previous /// timestep, or some garbage memory when the method is called for the first time. It is also necessary to /// clamp all quantities by their minimal/maximal allowed values. These values can be different for /// different materials; the range of quantity for given \ref IMaterial object can be obtained by /// \ref IMaterial::range() const function. /// /// Temporal derivatives are computed by calling \ref ISolver::integrate function. The solver /// assumes the storage already has zeroed highest-order derivatives of all quantities. The implementation of /// \ref ITimeStepping inteface must also assume that \ref ISolver::integrate changes the number of particles /// in the storage. If the timestepping uses derivatives from different times, stored in separate Storage /// objects, the variable number of particles can be handled automatically by the Storage object, using the /// concept of dependenet storages - if you create an auxiliary storage, for example to store the prediction /// derivatives in predictor-corrector algorithm, simply add this storage as a dependent storage to the main /// one, and it will be updated every time the number of particles in the main storage changes. /// /// Timestepping is bound to a single Storage object, passed in constructor, and this object cannot be changed /// during the lifetime of the timestepping object. Timestepping implementation can create any number of /// auxiliary storages and link then to the main one, this hierarchy established in the constructor will not /// change during the run. class ITimeStepping : public Polymorphic { protected: /// Main storage holding all the particles in the run SharedPtr<Storage> storage; /// Current time step Float timeStep; /// Maximal allowed time step Float maxTimeStep; /// Criterion used to compute the time step AutoPtr<ITimeStepCriterion> criterion; public: /// \brief Constructs the timestepping, using timestep criteria from parameters in settings. /// /// \param storage Storage used during the run /// \param settings Settings containing initial and maximal timestep and aslo timestep criteria ITimeStepping(const SharedPtr<Storage>& storage, const RunSettings& settings); /// \brief Constructs the timestepping, explicitly specifying the timestep criterion used in the run. /// /// \note Use MultiCriterion if more than one criterion is used. ITimeStepping(const SharedPtr<Storage>& storage, const RunSettings& settings, AutoPtr<ITimeStepCriterion>&& criterion); ~ITimeStepping() override; INLINE Float getTimeStep() const { return timeStep; } void step(IScheduler& scheduler, ISolver& solver, Statistics& stats); protected: virtual void stepParticles(IScheduler& scheduler, ISolver& solver, Statistics& stats) = 0; }; /// \brief Simple Euler first-order timestepping. class EulerExplicit : public ITimeStepping { public: explicit EulerExplicit(const SharedPtr<Storage>& storage, const RunSettings& settings) : ITimeStepping(storage, settings) {} virtual void stepParticles(IScheduler& scheduler, ISolver& solver, Statistics& stats) override; }; /// \brief Predictor-corrector second-order timestepping class PredictorCorrector : public ITimeStepping { private: /// Separate storage holding prediction derivatives. Holds only highest-order derivatives, other buffers /// are empty. Must be kept synchronized with the main storage. SharedPtr<Storage> predictions; public: PredictorCorrector(const SharedPtr<Storage>& storage, const RunSettings& settings); ~PredictorCorrector() override; protected: virtual void stepParticles(IScheduler& scheduler, ISolver& solver, Statistics& stats) override; void makePredictions(IScheduler& scheduler); void makeCorrections(IScheduler& scheduler); }; /// \brief Leapfrog timestepping /// /// Uses the drift-kick-drift version of the algorithm for second-order quantities. First-order quantities are /// integrated using ordinary Euler timestepping. class LeapFrog : public ITimeStepping { public: LeapFrog(const SharedPtr<Storage>& storage, const RunSettings& settings) : ITimeStepping(storage, settings) {} protected: virtual void stepParticles(IScheduler& scheduler, ISolver& solver, Statistics& stats) override; }; class RungeKutta : public ITimeStepping { private: SharedPtr<Storage> k1, k2, k3, k4; public: RungeKutta(const SharedPtr<Storage>& storage, const RunSettings& settings); ~RungeKutta() override; protected: virtual void stepParticles(IScheduler& scheduler, ISolver& solver, Statistics& stats) override; void integrateAndAdvance(ISolver& solver, Statistics& stats, Storage& k, const Float m, const Float n); }; class ModifiedMidpointMethod : public ITimeStepping { private: SharedPtr<Storage> mid; Size n; public: ModifiedMidpointMethod(const SharedPtr<Storage>& storage, const RunSettings& settings); protected: virtual void stepParticles(IScheduler& scheduler, ISolver& solver, Statistics& stats) override; }; class BulirschStoer : public ITimeStepping { private: Float eps; public: BulirschStoer(const SharedPtr<Storage>& storage, const RunSettings& settings); protected: virtual void stepParticles(IScheduler& scheduler, ISolver& solver, Statistics& stats) override; }; NAMESPACE_SPH_END
<reponame>coders-for-labour/dashboard-for-labour 'use strict'; /** * Dashboard for Labour * * @file auth.js * @description * @module System * @author Lighten * */ const Config = require('node-env-obj')('../../'); const passport = require('passport'); const TwitterStrategy = require('passport-twitter').Strategy; const FacebookStrategy = require('passport-facebook').Strategy; const Buttress = require('buttress-js-api'); const Humanname = require('humanname'); const Cache = require('./cache'); const Logging = require('./logging'); const AppRoles = require('../schema/appRoles'); /* ************************************************************ * * BUTTRESS AUTHENTICATION * **************************************************************/ const __authenticateUser = (appAuth, existingUser) => { const authentication = { unauthorised: { authLevel: 1, domains: [`${Config.app.protocol}://${Config.app.host}`], role: AppRoles.default, permissions: [ {'route': 'app/schema', 'permission': 'read'}, {'route': 'topic', 'permission': 'list'}, {'route': 'issue', 'permission': 'list'}, {'route': 'thunderclap', 'permission': 'list'}, {'route': 'resource', 'permission': 'list'}, ], }, editor: { authLevel: 1, domains: [`${Config.app.protocol}://${Config.app.host}`], role: 'admin.editor', permissions: [ {'route': '*', 'permission': '*'}, ], }, super: { authLevel: 1, domains: [`${Config.app.protocol}://${Config.app.host}`], role: 'admin.super', permissions: [ {'route': '*', 'permission': '*'}, ], }, }; let constituencies = null; let constituency = null; let authorisation = 'unauthorised'; let user = null; let topic = null; Logging.logDebug(`AUTH: Pending ${appAuth.name} using ${appAuth.username}`); const tCache = Cache.Manager.getCache(Cache.Constants.Type.TEAM); const cCache = Cache.Manager.getCache(Cache.Constants.Type.CONSTITUENCY); return cCache.getData() .then((data) => { constituencies = []; for (const name in data) { if (data.hasOwnProperty(name) === false) continue; const c = data[name]; c.name = name; constituencies.push(c); } }) .then(() => tCache.getData()) .then((users) => { const authorised = users.find((u) => appAuth.app === 'twitter' && u.twitter.toUpperCase() === appAuth.username.toUpperCase()); if (!authorised) { Logging.logDebug(`AUTH: Pending ${appAuth.name} Not found in team sheet`); return null; } if (authorised.teamName === 'National') { authorisation = 'super'; Logging.logDebug(`AUTH: Matched ${appAuth.name} as team ${authorised.teamName}`); } else { authorisation = 'editor'; Logging.logDebug(`AUTH: Matched ${appAuth.name} as team ${authorised.teamName}`); } return authorised; }) .then((authorised) => { if (!authorised) { return; } constituency = constituencies.find((c) => c.pano == authorised.teamName); if (!constituency) { return; } return Buttress.getCollection('topic').getAll(); }) .then((topics) => { if (!topics) { return null; } return topics.find((t) => t.constituencyPano == constituency.pano); }) .then((_topic) => { if (_topic) return _topic; if (_topic === null) return; const r17 = constituency['2017'].results; const mp = r17[0]; const labourIdx = r17.findIndex((mp) => mp.party === 'Labour'); const labour = r17[labourIdx]; const labourBehind = r17.reduce((behind, mp, idx) => { if (idx >= labourIdx) return behind; behind += mp.ahead; return behind; }, 0); let description = ''; if (labour !== mp) { description = `This seat is currently held by ${mp.party} with a majority of ${mp.ahead}. The MP is ${mp.name}. The Labour MP in the 2017 election was ${labour.name}. We need ${labourBehind} votes to win this seat.`; } else { description = `This seat is currently held by Labour with a majority of ${mp.ahead}. Your MP is ${mp.name}.`; } return Buttress.getCollection('topic').save({ name: constituency.name, description: description, constituencyPano: constituency.pano, banner: `/images/cards/photo${Math.floor((Math.random() * 32) + 1)}.jpg`, parentId: Config.topics.constituenciesTopicId, editorIds: [], viewCount: 0, published: true, }); }) .then((_topic) => topic = _topic) .then(() => Buttress.Auth.findOrCreateUser(appAuth, authentication[authorisation])) .then((_user) => { if (!_user) { Logging.logError(`AUTH: User ${appAuth.name} profile doesn\'t exist using ${appAuth.username}`); return cb(null, null); } user = _user; Logging.logDebug(`AUTH: Success ${appAuth.name} using ${user.id}`); if (!user.tokens || user.tokens.length < 1) { Logging.logDebug(`AUTH: Missing token for ${user.id}:${appAuth.name}`); return Buttress.Auth.createToken(user.id, authentication) .then((token) => { user.tokens.push(token); return user; }); } }) .then(() => { if (!user || authorisation === 'unauthorised' || !topic) { return; } const editorIdx = topic.editorIds.indexOf(user.id); if (editorIdx != -1) { return; } return Promise.all([ Buttress.getCollection('topic').update(topic.id, { path: 'editorIds', value: user.id, }), Buttress.getCollection('topic').update(topic.id, { path: 'hasEditors', value: true, }), ]); }) .then(() => Buttress.getCollection('people').getAll()) .then((people) => { const person = people.find((p) => p.authId === user.id); if (!person) { const name = Humanname.parse(appAuth.name); const title = name.salutation ? name.salutation + ' ' : ''; const initials = name.initials ? name.initials + ' ' : ''; return Buttress.getCollection('people').save({ authId: user.id, title: name.salutation, formalName: `${title}${name.firstName} ${initials}${name.lastName}`.trim(), name: `${name.firstName} ${name.lastName}`.trim(), forename: name.firstName, initials: name.initials, surname: name.lastName, suffix: name.suffix, avatar: appAuth.profileImgUrl, type: 'CLIENT', role: 'CLIENT', }); } return false; }) .then(() => user) .catch((err) => { Logging.logError(err); }); }; module.exports.init = (app) => { /* ************************************************************ * * TWITTER * **************************************************************/ passport.use(new TwitterStrategy({ consumerKey: Config.auth.twitter.consumerKey, consumerSecret: Config.auth.twitter.consumerSecret, callbackURL: `/auth/twitter/callback`, }, (token, tokenSecret, profile, cb) => { Logging.logSilly('AUTHENTICATE: Strategy'); const user = { app: 'twitter', id: profile.id, token: token, tokenSecret: tokenSecret, name: profile.displayName, username: profile.username, profileUrl: `https://twitter.com/${profile.username}`, profileImgUrl: profile.photos[0].value, bannerImgUrl: profile._json.profile_background_image_url ? profile._json.profile_background_image_url : '', }; cb(null, user); })); /* ************************************************************ * * FACEBOOK * **************************************************************/ passport.use(new FacebookStrategy({ clientID: Config.auth.facebook.appId, clientSecret: Config.auth.facebook.appSecret, callbackURL: '/auth/facebook/callback', profileFields: ['id', 'displayName', 'name', 'cover', 'picture', 'email'], }, (token, refreshToken, profile, cb) => { const p = profile._json; const user = { app: 'facebook', id: p.id, token: token, name: p.name, email: p.email, profileImgUrl: p.picture.data.url, bannerImgUrl: p.cover ? p.cover.source : '', }; Logging.logSilly(user); cb(null, user); })); /* ************************************************************ * * SERIALISE / DESERIALISE * **************************************************************/ passport.serializeUser((user, done) => { Logging.logVerbose('Auth Serialise User'); Logging.logSilly(user); done(null, user); }); passport.deserializeUser((user, done) => { Logging.logVerbose('Auth Deserialise User'); Logging.logSilly(user); done(null, user); }); /* ************************************************************ * * AUTHENTICATED * **************************************************************/ app.get('/authenticated', (req, res) => { if (!req.user) { res.json(null); return; } let _user = null; return Buttress.App.getSchema() .then(() => Buttress.User.get(req.user.id)) .then((user) => { if (!user) { throw new Error(`Unable to find user ${req.user.id}`); } _user = user; }) .then(() => Buttress.getCollection('people').getAll()) .then((people) => { const person = people.find((p) => p.authId === _user.id); if (!person) { throw new Error(`AUTH: No profile found for user id: ${_user.id}`); } _user.person = person; return res.json({ user: _user, }); }) .catch((err) => { if (err instanceof Error) { Logging.logError(err); res.status(503).end(); } else { res.json(null).end(); return null; } }); Buttress.User.load(req.user.id) .then((user) => { res.json({ user: { id: req.user.id, profiles: user.auth.map(function(a) { return { id: a.appId, app: a.app, email: a.email, url: a.profileUrl, images: a.images, }; }), person: { title: req.user.person.title, forename: req.user.person.forename, initials: req.user.person.initials, surname: req.user.person.surname, formalName: req.user.person.formalName, }, authToken: req.user.token, }, }); }); }); app.get('/logout', (req, res) => { req.logout(); res.redirect('/'); }); /* ************************************************************ * * ROUTES * **************************************************************/ const TW_AUTH_SCOPE = [ ]; app.get('/auth/twitter', (req, res, next) => { Logging.logSilly('AUTHENTICATE: /auth/twitter'); req.session.returnPath = req.get('Referer'); Logging.logSilly(req.session.returnPath); passport.authenticate( 'twitter', { scope: TW_AUTH_SCOPE.join(' '), } )(req, res, next); }); app.get('/auth/twitter/callback', (req, res, next) => { const rp = req.session.returnPath; Logging.logSilly('AUTHENTICATE: /auth/twitter/callback'); // Check to see if user is already auth'd, if they are // the return them to the app. if (req.user) { Logging.logInfo('AUTHENTICATE: Redirecting already authed user from /auth/twitter/callback'); req.session.returnPath = ''; res.redirect(rp ? rp : '/'); return next(); } passport.authenticate('twitter', (err, appAuth, info) => { Logging.logSilly('AUTHENTICATE: Authenticated'); if (err) throw err; return __authenticateUser(appAuth, req.user) .then((user) => { Logging.logDebug(user); req.login(user, (err) => { if (err) throw err; req.session.returnPath = ''; res.redirect(rp ? rp : '/'); }); }) .catch(Logging.Promise.logError()); })(req, res, next); }); const FB_AUTH_SCOPE = [ 'public_profile', 'email', 'publish_actions', ]; app.get('/auth/facebook', (req, res, next) => { req.session.returnPath = req.get('Referer'); Logging.logSilly(req.session.returnPath); passport.authenticate( 'facebook', { scope: FB_AUTH_SCOPE, } )(req, res, next); }); app.get('/auth/facebook/callback', (req, res, next) => { passport.authenticate('facebook', (err, appAuth, info) => { if (err) throw err; __authenticateUser(appAuth, req.user) .then((user) => { Logging.logSilly(user); req.login(user, (err) => { if (err) throw err; const rp = req.session.returnPath; req.session.returnPath = ''; res.redirect(rp ? rp : '/'); }); }) .catch(Logging.Promise.logError()); })(req, res, next); }); };
#!/usr/bin/env bash export JAVA_OPTS='-server -Xms2048m -Xmx2048m -XX:PermSize=1024m -XX:MaxPermSize=1024m -XX:+UseParallelOldGC -XX:+UseAdaptiveSizePolicy -XX:+UseBiasedLocking' export _JAVA_OPTIONS='-Dsun.java2d.opengl=true -Dsun.java2d.xrender=true'
def histogram(values): """This function will take a list of numbers and generate a histogram.""" # Get the min and max values of the list min_val = min(values) max_val = max(values) # Iterate over each value in the list for num in values: # Get the percentage of the number relative to the min and max percent = (num - min_val) / (max_val - min_val) # Scale the percentage to get the appropriate number of * num_asterisks = int(percent * 10) # Build the histogram hist = "*" * num_asterisks # Print the histogram print(num, ": ", hist) # Test values = [3, 10, 5, 8, 9, 1] histogram(values)
<gh_stars>0 package storage //Redirect entry declaration type Redirect struct { Hostname string //hostname of the redirector URL string //URL on the hostname Target string //forwarding address } // Redirector interface type Redirector interface { GetAllRedirects() []Redirect // Get all redirects known to redirects GetRedirectsForHost(hostname string) []Redirect // Get all redirects for a specific hostname GetRedirect(hostname string, url string) []Redirect // Get redirect for a specific hostname & url (should be only one) AddRedirect(redirect Redirect) error // Add a new redirect for a hostname & url RemoveRedirect(redirect Redirect) // Remove a redirect specific to hostname & url RemoveAllRedirectsForHost(redirect Redirect) // Remove all redirects for a hostname GetTarget(hostname string, url string) (target string, err error) // Return the redirect target for the hostname & url }
<reponame>minuk8932/Algorithm_BaekJoon package math; import java.io.BufferedReader; import java.io.InputStreamReader; /** * * @author minchoba * 백준 1977번 : 완전 제곱수 * * @see https://www.acmicpc.net/problem/1977 * */ public class Boj1977 { private static final int MAX = 102; private static final int NONE = -1; private static final double ZERO = 0.0; public static void main(String[] args) throws Exception{ // 버퍼를 통한 값 입력 BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); int M = Integer.parseInt(br.readLine()); int N = Integer.parseInt(br.readLine()); int[] arr = new int[MAX]; double sqrtM = Math.sqrt(M); double sqrtN = Math.sqrt(N); int start = (int) Math.sqrt(M); int end = (int) Math.sqrt(N); double spare = sqrtM - start; // M의 소숫점 자리의 값을 구함 if(spare != ZERO){ // 소수점 자리가 0 일 경우 : M은 원래 제곱수 (N일 경우엔 무시해도 됨) start++; // 0이 아니라면, a.xxxx 인데, a보다는 크고 a+1보다 작은 제곱수 이므로 정수형 M값에 1을 더해줌 } if(sqrtM != sqrtN && start <= end){ // if(두 수가 같은 수의 제곱수가 아니면서, 동시에 start <= end) int total = 0; int tmp = start; for(int i = 1; i <= end - start + 1; i++){ // start <= x <= end 에 해당하는 값들을 제곱해 arr 배열에 넣으며 총합을 구함 arr[i] = (int)Math.pow(tmp, 2); tmp++; total += arr[i]; } System.out.println(total); // 총합 System.out.println(arr[1]); // 가장 작은 결과 값 } else { if(spare == ZERO){ // N, M이 모두 제곱수이면서 같은 값을 갖는 경우 System.out.println(M); System.out.println(M); } else{ System.out.println(NONE); // 사이에 제곱수가 없거나 구할 수 없는 경우 : -1 } } } }
class Animal: def __init__(self, species, color, age): self.species = species self.color = color self.age = age
<reponame>naga-project/webfx package dev.webfx.kit.mapper.peers.javafxgraphics.base; import javafx.scene.image.Image; import javafx.scene.image.ImageView; /** * @author <NAME> */ public interface ImageViewPeerMixin <N extends ImageView, NB extends ImageViewPeerBase<N, NB, NM>, NM extends ImageViewPeerMixin<N, NB, NM>> extends NodePeerMixin<N, NB, NM> { void updateImage(Image image); void updateFitWidth(Double fitWidth); void updateFitHeight(Double fitHeight); void updateX(Double x); void updateY(Double y); }
<gh_stars>1-10 /* * This JavaScript file is strictly for implemen- * tations on the view file: resources/views/chat.blade.php */ (function ($) { // the chat agent $.TaskAgent = {}; // domain object model references $.TaskAgent.Dom = {}; $.TaskAgent.Dom.task_name = null; $.TaskAgent.Dom.task_update_btn = null; $.TaskAgent.Dom.status_panel = null; // websocket $.TaskAgent.Socket = null; // cross site request forgery token $.TaskAgent.csrf_token = null; // websocket host url and port $.TaskAgent.socket_url = null; $.TaskAgent.socket_port = null; // user and task uuid $.TaskAgent.user_uuid = null; $.TaskAgent.task_uuid = null; $.TaskAgent.launched = false; $.TaskAgent.disrupted = false; // socket server end point $.TaskAgent.get_socket_end_point = function() { return this.socket_url + ':' + this.socket_port; } $.TaskAgent.disable_task_updater_components = function() { this.Dom.task_name.attr('disabled', 'disabled'); this.Dom.task_update_btn.attr('disabled', 'disabled'); } $.TaskAgent.enable_task_updater_components = function() { this.Dom.task_name.removeAttr('disabled'); this.Dom.task_update_btn.removeAttr('disabled'); } $.TaskAgent.start_act_disrupted = function() { this.disable_task_updater_components(); this.disrupted = true; var status_str = 'Attempting to connect to websocket'; this.Dom.status_panel.text(status_str); } $.TaskAgent.continue_act_disrupted = function() { if (!this.disrupted) { return; } var status_str = this.Dom.status_panel.text(); status_str += '.'; this.Dom.status_panel.text(status_str); } $.TaskAgent.stop_act_disrupted = function() { this.Dom.status_panel.text(''); this.disrupted = false; this.enable_task_updater_components(); } $.TaskAgent.get_task_name = function() { return this.Dom.task_name.val(); } // update task function $.TaskAgent.update_task = function() { var task_uuid = this.task_uuid; var task_name = this.get_task_name(); // abort on empty task name if (task_name === '') { return false; } // update task $.ajax({ url: '/task/update', type: 'POST', headers: { 'X-CSRF-TOKEN': $.TaskAgent.csrf_token }, data: { 'task_uuid': task_uuid, 'task_name': task_name }, success: function(data, textStatus, jqXHR) { // abort send on failure if (data.stat !== 0) { return false; } // send update notification var task_owner = data.task_owner; var shared_list = data.shared_list; shared_list.forEach(userObject => { var recipient = userObject.uuid; // send (broadcast) task update $.TaskAgent.Socket.emit('task_updated', { recipient: recipient, task_uuid: task_uuid, task_owner: task_owner, task_name: task_name }); }); // go back to task list location.href = '/task'; } }); } // task agent launcher $.TaskAgent.launch = function() { // launch once if (this.launched) { return false; } // trigger update task on click this.Dom.task_update_btn.click(function() { $.TaskAgent.update_task(); }); // trigger update task on enter this.Dom.task_name.keypress(function(e) { var keyCode = e.keyCode; var enterKeyCode = 13; if (keyCode === enterKeyCode) { $.TaskAgent.update_task(); } }); this.start_act_disrupted(); var socket_end_point = this.get_socket_end_point(); // establish websocket connection this.Socket = io.connect(socket_end_point); // custom 'joined' event handler this.Socket.on('joined', (object) => { // enable task list components on successful join this.stop_act_disrupted(); }); // 'disconnect' event handler this.Socket.on('disconnect', () => { this.start_act_disrupted(); }); // 'reconnecting' event handler this.Socket.on('reconnecting', (attemptNumber) => { this.continue_act_disrupted(); }); // 'reconnect' event handler this.Socket.on('reconnect', (attemptNumber) => { this.stop_act_disrupted(); }); // join room this.Socket.emit('join', { client_uuid: this.user_uuid }); this.launched = true; } // entry point $(document).ready(function() { /** * Initializing the Task Agent */ $.TaskAgent.Dom.task_name = $('#task_name'); $.TaskAgent.Dom.task_update_btn = $('#task_update_btn'); $.TaskAgent.Dom.status_panel = $('#status_panel'); $.TaskAgent.csrf_token = $('#csrf-token').val(); $.TaskAgent.socket_url = $('#socket_url').val(); $.TaskAgent.socket_port = $('#socket_port').val(); $.TaskAgent.user_uuid = $('#user_uuid').val(); $.TaskAgent.task_uuid = $('#task_uuid').val(); // launch the task agent $.TaskAgent.launch(); }); }(jQuery));
#include <iostream> struct Node { int data; struct Node * next; }; class LinkedList { private: struct Node *head; public: LinkedList() { head = NULL; } // Prints the current list void printList() { struct Node *temp = head; while (temp != NULL) { std::cout << temp->data << " "; temp = temp->next; } std::cout << std::endl; } // Adds a new element to the list void addNode(int data) { // Create a new node struct Node* new_node = new Node(); // Set its data new_node->data = data; new_node->next = NULL; // Check if list is empty if (head == NULL) { // If list is empty, make new node as head head = new_node; return; } // If list is not empty, traverse to the end struct Node *temp = head; while (temp->next != NULL) temp = temp->next; // Add node to the next of last node temp->next = new_node; } // Delete a node from the list void deleteNode(int data) { // Store head node struct Node *temp = head, *prev; // Search for the key to be deleted while (temp != NULL && temp->data != data) { prev = temp; temp = temp->next; } // If key was not present in linked list if (temp == NULL) return; // Unlink the node from linked list prev->next = temp->next; // Free memory delete temp; } };
package greedy; import java.io.BufferedReader; import java.io.InputStreamReader; import java.util.Arrays; import java.util.StringTokenizer; /** * * @author exponential-e * 백준 18234번: 당근 훔쳐 먹기 * * @see https://www.acmicpc.net/problem/18234/ * */ public class Boj18234 { private static final long CIPHER = 1_000; public static void main(String[] args) throws Exception{ BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); StringTokenizer st = new StringTokenizer(br.readLine()); int N = Integer.parseInt(st.nextToken()); long T = Long.parseLong(st.nextToken()); long[] carrot = new long[N]; for(int i = 0; i < N; i++) { st = new StringTokenizer(br.readLine()); long w = Long.parseLong(st.nextToken()); long p = Long.parseLong(st.nextToken()); carrot[i] = p * CIPHER + w; } System.out.println(steal(T - N, carrot)); } private static long steal(long days, long[] carrot) { Arrays.sort(carrot); long result = 0; int index = 0; for(long c: carrot) { result += (days + index++) * (c / CIPHER) + c % CIPHER; // put off as much as possible, opt } return result; } }
#!/bin/bash # Use jq to slurp and sort the keys of the JSON data from person.json # Use Y2J to convert the YAML data from person.yaml to JSON format and pass it as input to jq # Compare the JSON data from person.json with the JSON data from person.yaml to check if they are identical # Use grep to check if the result of the comparison is true # If the comparison is true, exit with a status code of 0 jq --sort-keys --slurp --raw-output \ '.[0] == .[1]' \ data/person.json \ <(Y2J data/person.yaml) \ | grep -q true exit
<filename>server/config/roles.js const allRoles = { user: ['manageBasket','getBaskets'], admin: ['getUsers', 'manageUsers', 'getLocations', 'manageLocations'], business: ['getLocations', 'manageLocations', 'manageProducts','manageBasket','getBaskets'] }; const roles = Object.keys(allRoles); const roleRights = new Map(Object.entries(allRoles)); module.exports = { roles, roleRights, };
export interface Output { amount: number address: string message?: any } export interface WalletTransaction { txid: string action: string amount: number fees: number time: number confirmations: number feePerKb: number outputs: Output[] message?: any creatorName: string hasUnconfirmedInputs: boolean }
<reponame>sshrack/nodejs-rest-sample<filename>controllers/userAuth.js 'use strict'; const crypto = require('crypto'); const fs = require('fs'); const http = require('http'); const config = require('../config/config'); const utils = require('../helpers/utils'); module.exports = { doLogin, doLogout, sessionValid }; function doLogin(oRequest, oResponse) { var responseCb = utils.getResponseCb(oResponse, utils.returnJsonResponse); var loginCb = function(oData) { checkLoginCreds(oData, responseCb); }; // Parse the JSON data in the request, and call the Login CB function // with the resulting data return utils.getJsonPostData(oRequest, loginCb); } function doLogout(oRequest, oResponse) { var responseCb = utils.getResponseCb(oResponse, utils.returnJsonResponse); var logoutCb = function(oData) { removeUserSession(oData, responseCb); }; // Parse the JSON data in the request, and call the Logout CB function // with the resulting data return utils.getJsonPostData(oRequest, logoutCb); } function sessionValid(authStr, callback) { if (!authStr) { return callback(false); } var sessionId = authStr.replace('session:', ''); // Look up session ID in session file fs.readFile(config.session_file, 'utf8', function(err, data) { if (err) { return callback(err, false); } var goodSession = false; // Strip out \r and split on newline data = data.replace(/\r/g,''); var aSessions = data.split('\n'); // Loop on sessions to see if the specified // session exists, and the password matches. for (var i=0; i < aSessions.length; i++) { let sess = aSessions[i]; // Skip comment lines if (sess.indexOf('//') == 0) continue; let aTmp = sess.split(' '); if (aTmp.length > 1) { var cur_sess = aTmp[1]; if (cur_sess == sessionId) { goodSession = true; break; } } } return callback(null, goodSession); }); } //---------------------------------------------------------------------- function passwordsMatch(clearPwd, encPwd) { // The passwords stored in the users.txt file should be // encrypted with the pwd_salt key. // Hash encrypt the input clear-text password // using the 'pwd_salt' key from the config file. const hash = crypto.createHash('sha256'); hash.update(clearPwd + config.pwd_salt); var hashPwd = hash.digest('hex'); var pwdMatch = (hashPwd == encPwd) return pwdMatch; } function generateSessionKey(userid, responseCallback) { var sessionKey = crypto.randomBytes(16).toString('hex'); var sessionRecord = userid + ' ' + sessionKey + '\r\n'; // Open session key file for append, and add sessionRecord fs.appendFile(config.session_file, sessionRecord, function(err, data) { if (err) { var respData = {'http_code' : 500, 'error' : 'Error writing session data'}; return responseCallback(null, respData); } var oRespData = {}; oRespData['session'] = sessionKey; var respData = { 'http_code': HTTP_OK, 'data': oRespData }; return responseCallback(null, respData); }); } function removeUserSession(oData, responseCallback) { if (!oData) { // Generate an invalid request response var respData = { 'http_code': HTTP_BAD_REQUEST, 'error' : 'Invalid post data' }; return responseCallback(null, respData); } if (!oData.hasOwnProperty('userid')) { var respData = { 'http_code': HTTP_BAD_REQUEST, 'error': 'Logout requires userid attribute' }; return responseCallback(null, respData); } // Remove all instances of this user from session file fs.readFile(config.session_file, 'utf8', function(err, data) { if (err) { var respData = {'http_code' : 500, 'error' : 'Error removing session.'}; return responseCallback(null, respData); } var newData = extractSessions(data, oData.userid); fs.writeFile(config.session_file, newData, function(err, data) { if (err) { var respData = {'http_code' : 500, 'error' : 'Error removing session.'}; return responseCallback(null, respData); } var respData = { 'http_code': HTTP_NO_CONTENT }; return responseCallback(null, respData); }); }); } function checkLoginCreds(oData, responseCallback) { if (!oData) { // Generate an invalid request response var respData = { 'http_code': HTTP_BAD_REQUEST, 'error' : 'Invalid post data' }; return responseCallback(null, respData); } if (!oData.hasOwnProperty('userid') || !oData.hasOwnProperty('password')) { var respData = { 'http_code': HTTP_BAD_REQUEST, 'error': 'Login requires userid and password attributes' }; return responseCallback(null, respData); } // Look up user ID in user ID file fs.readFile(config.user_file, 'utf8', function(err, data) { if (err) { var respData = {'http_code' : 500, 'error' : 'Error validating user.'}; return responseCallback(null, respData); } var goodLogin = false; // Strip out \r and split on newline data = data.replace(/\r/g,''); var aUsers = data.split('\n'); // Loop on users in user ID file to see if the specified // user exists, and the password matches. // Design Note: Use 'for' instead of 'forEach', so I can break // out if the userid is found. for (var i=0; i < aUsers.length; i++) { let user = aUsers[i]; // Skip comment lines if (user.indexOf('//') == 0) continue; let aTmp = user.split(' '); if (aTmp.length > 1) { var userid = aTmp[0]; var password = aTmp[1]; if (userid == oData.userid) { if (passwordsMatch(oData.password, password)) { goodLogin = true; } break; } } } if (goodLogin) { return generateSessionKey(oData.userid, responseCallback); } else { var respData = { 'http_code': HTTP_UNAUTHORIZED, 'error': 'Invalid Username or Password' }; return responseCallback(null, respData); } }); } // Remove session lines from input file data that match the input userId. function extractSessions(data, userId) { var aSessions = data.split('\n'); var newData = ''; aSessions.forEach( (session) => { if (session.length) { let aTmp = session.split(' '); if (aTmp[0] != userId) newData += session + '\n'; } }); return newData; }
// https://codeforces.com/contest/1300/problem/C #include <bits/stdc++.h> using namespace std; int bits[32]; int main() { cin.tie(0), ios::sync_with_stdio(0); int n, m; cin >> n; vector<int> a(n); for (int i = 0; i < n; i++) { cin >> a[i]; m = 1; for (int j = 0; j < 32; j++, m *= 2) if (a[i] & m) { if (!bits[j]) bits[j] = i + 1; else bits[j] = -1; } } vector<int> r, s(n); for (int j = 31; j >= 0; j--) { if (bits[j] > 0 && !s[bits[j]-1]) { r.push_back(bits[j]-1); s[bits[j]-1] = 1; } } for (int i = 0; i < n; i++) if (!s[i]) r.push_back(i); for (int i = 0; i < n; i++) cout << a[r[i]] << " \n"[i == n - 1]; }
class Stack: def __init__(self): self.stack = [] def push(self, item): self.stack.append(item) def pop(self): item = self.stack.pop() return item def is_empty(self): return self.stack == [] def peek(self): if not self.is_empty(): return self.stack[-1] def get_stack(self): return self.stack
print('Conversor de Unidades') m = float(input('Tamanho em metros: ')) cm = m * 100 mm = m * 1000 print(f'Quantidade em metros: {m}. \n Convertendo... \n Quantidade em centímetros: {cm} \n Quantidade em milímetros: {mm}.')
<html> <head> <title>Sign Up Form</title> <script> function validateForm() { if (document.getElementById("password").value.length < 8) { alert("Password must be at least 8 characters long!"); return false; } return true; } </script> </head> <body> <form onsubmit="return validateForm()"> <input type="email" name="email" placeholder="Email"/> <input type="password" id="password" name="password" placeholder="Password"/> <button type="submit">Submit</button> </form> </body> </html>
#!/bin/bash set -e # constants DEV_CONTAINER_IMAGE="openintegrationhub/dev-connector:latest" TENANT_1_NAME="Tenant 1" TENANT_1_ADMIN="ta1@example.com" TENANT_1_ADMIN_PASSWORD="1234" TENANT_1_USER="tu1@example.com" TENANT_1_USER_PASSWORD="1234" TENANT_2_NAME="Tenant 2" TENANT_2_ADMIN="ta2@example.com" TENANT_2_ADMIN_PASSWORD="1234" TENANT_2_USER="tu2@example.com" TENANT_2_USER_PASSWORD="1234" SERVICE_ACCOUNT_USERNAME=test@test.de SERVICE_ACCOUNT_PASSWORD=testtest1234 ###### EXPOSED_SERVICES=( \ app-directory.example.com \ iam.example.com \ skm.example.com \ flow-repository.example.com \ auditlog.example.com \ metadata.example.com \ component-repository.example.com \ snapshots-service.example.com \ dispatcher-service.example.com \ webhooks.example.com \ attachment-storage-service.example.com \ data-hub.example.com \ ils.example.com \ web-ui.example.com \ ) REQUIRED_TOOLS=( \ curl \ kubectl \ minikube \ base64 \ python3 \ ) # minikube settings MK_MEMORY=8192 MK_CPUS=4 # Absolute path this script is in, thus /home/user/bin SCRIPTPATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) # argument cache skip_services=() start_proxy="false" clear_minikube="false" # script cache and settings os="" cluster_ip="" admin_token="" tenant_1_id="" tenant_1_admin_id="" tenant_1_user_id="" tenant_2_id="" tenant_2_admin_id="" tenant_2_user_id="" service_account_id="" service_account_token="" service_account_token_encoded="" custom_secret_id="" development_component_id="" development_private_component_id="" development_global_component_id="" result="" # escape esc="\e" function cleanup { sudo -k } function checkOS { unameOut="$(uname -s)" case "${unameOut}" in Linux*) os=Linux;; Darwin*) os=Darwin;; *) echo "Unsupported operating system" && exit 1 esac echo "Operating System: $os" } function colorEcho { # $1 bash color code # $2 text echo -e "${esc}[$1m$2${esc}[0m" } function checkTools { for i in "${REQUIRED_TOOLS[@]}" do if ! type "${i}" > /dev/null; then echo "Please install '${i}' and run this script again" exit 1 fi done } function updateHostsFile { cluster_ip=$(minikube ip) for host_name in "${EXPOSED_SERVICES[@]}" do match_in_hosts="$(grep "$host_name" /etc/hosts | cut -f1)" host_entry="${cluster_ip} ${host_name}" if [ -n "$match_in_hosts" ] then echo "Updating existing hosts entry: $host_entry" updated_hosts=$(python3 -c "import sys;lines=sys.stdin.read();print(lines.replace('$match_in_hosts','$host_entry'))" < /etc/hosts) echo "$updated_hosts" | sudo tee /etc/hosts > /dev/null else echo "Adding new hosts entry: $host_entry" echo "$host_entry" | sudo tee -a /etc/hosts > /dev/null fi done } function waitForServiceStatus { # $1 - serviceUrl # $2 - serviceStatus status="000" while [ $status != "$2" ]; do colorEcho 36 "Waiting for $1" sleep 2 status=$(curl -w "%{http_code}" --silent --output /dev/null "$1") done } function waitForPodStatus { # $1 - pod regular expression pod_status=$(kubectl get pods --all-namespaces || true); while [ -z "$(grep "$1" <<< "$pod_status")" ]; do colorEcho 36 "Waiting for $1" sleep 2 pod_status=$(kubectl get pods --all-namespaces || true); done } function postJSON { # $1 - serviceUrl # $2 - jsonPayload # $3 - Bearer Token res=$(curl \ -H "Content-Type: application/json" \ -H "Authorization: Bearer ${3}" \ --silent \ --show-error \ --fail \ --request POST \ --data "$2" \ "$1" ) status=$? result=$res } function setAdminToken { read -r -d '' JSON << EOM || true { "username": "admin@openintegrationhub.com", "password": "somestring" } EOM postJSON http://iam.example.com/login "$JSON" admin_token=$(echo "$result"| python3 -c "import sys, json; print(json.load(sys.stdin)['token'])") } function createServiceAccount { read -r -d '' JSON << EOM || true { "username":"$SERVICE_ACCOUNT_USERNAME", "firstname":"a", "lastname":"b", "role":"SERVICE_ACCOUNT", "status":"ACTIVE", "password":"$SERVICE_ACCOUNT_PASSWORD", "permissions":[ "all" ] } EOM postJSON http://iam.example.com/api/v1/users "$JSON" "$admin_token" service_account_id=$(echo "$result" | python3 -c "import sys, json; print(json.load(sys.stdin)['id'])") } function createTenantAndUsers_1 { # create tenant read -r -d '' JSON << EOM || true { "name": "$TENANT_1_NAME", "confirmed": true, "status": "ACTIVE" } EOM postJSON http://iam.example.com/api/v1/tenants "$JSON" "$admin_token" tenant_1_id=$(echo "$result" | python3 -c "import sys, json; print(json.load(sys.stdin)['id'])") # create tenant admin read -r -d '' JSON << EOM || true { "status" : "ACTIVE", "confirmed": true, "role": "TENANT_ADMIN", "permissions": ["all"], "username": "$TENANT_1_ADMIN", "password": "$TENANT_1_ADMIN_PASSWORD", "tenant": "$tenant_1_id" } EOM postJSON http://iam.example.com/api/v1/users "$JSON" "$admin_token" tenant_1_admin_id=$(echo "$result" | python3 -c "import sys, json; print(json.load(sys.stdin)['id'])") # create user read -r -d '' JSON << EOM || true { "status" : "ACTIVE", "confirmed": true, "role": "USER", "username": "$TENANT_1_USER", "password": "$TENANT_1_USER_PASSWORD", "tenant": "$tenant_1_id" } EOM postJSON http://iam.example.com/api/v1/users "$JSON" "$admin_token" tenant_1_user_id=$(echo "$result" | python3 -c "import sys, json; print(json.load(sys.stdin)['id'])") } function createTenantAndUsers_2 { # create tenant read -r -d '' JSON << EOM || true { "name": "$TENANT_2_NAME", "confirmed": true, "status": "ACTIVE" } EOM postJSON http://iam.example.com/api/v1/tenants "$JSON" "$admin_token" tenant_2_id=$(echo "$result" | python3 -c "import sys, json; print(json.load(sys.stdin)['id'])") # create tenant admin read -r -d '' JSON << EOM || true { "status" : "ACTIVE", "confirmed": true, "role": "TENANT_ADMIN", "permissions": ["all"], "username": "$TENANT_2_ADMIN", "password": "$TENANT_2_ADMIN_PASSWORD", "tenant": "$tenant_2_id" } EOM postJSON http://iam.example.com/api/v1/users "$JSON" "$admin_token" tenant_2_admin_id=$(echo "$result" | python3 -c "import sys, json; print(json.load(sys.stdin)['id'])") # create user read -r -d '' JSON << EOM || true { "status" : "ACTIVE", "confirmed": true, "role": "USER", "username": "$TENANT_2_USER", "password": "$TENANT_2_USER_PASSWORD", "tenant": "$tenant_2_id" } EOM postJSON http://iam.example.com/api/v1/users "$JSON" "$admin_token" tenant_2_user_id=$(echo "$result" | python3 -c "import sys, json; print(json.load(sys.stdin)['id'])") } function setServiceAccountToken { read -r -d '' JSON << EOM || true { "accountId": "$service_account_id", "expiresIn": -1, "initiator": "$service_account_id", "inquirer": "$service_account_id" } EOM postJSON http://iam.example.com/api/v1/tokens "$JSON" "$admin_token" service_account_token=$(echo "$result" | python3 -c "import sys, json; print(json.load(sys.stdin)['token'])") } function addTokenToSecret { if [ "$os" == "Linux" ]; then service_account_token_encoded=$(echo -n "$service_account_token" | base64 -w0) else service_account_token_encoded=$(echo -n "$service_account_token" | base64) fi new_secret=$(python3 -c "import sys;lines=sys.stdin.read();print(lines.replace('REPLACE ME','$service_account_token_encoded'))" < ./3-Secret/SharedSecret.yaml) echo "$new_secret" > ./3-Secret/SharedSecret.yaml } function removeTokenFromSecret { new_secret=$(python3 -c "import sys;lines=sys.stdin.read();print(lines.replace('$service_account_token_encoded','REPLACE ME'))" < ./3-Secret/SharedSecret.yaml) echo "$new_secret" > ./3-Secret/SharedSecret.yaml } function deployServices { for dir in ./4-Services/* do IFS=' ' service_name=$(echo "$dir" | sed "s/.\/4-Services\///") if [[ " ${skip_services[*]} " == *" $service_name "* ]] then colorEcho 33 "Deploy $service_name (temporary)" else colorEcho 32 "Deploy $service_name" fi kubectl apply -Rf "$dir" done } function removeTemporaryServices { for dir in ./4-Services/* do IFS=' ' service_name=$(echo "$dir" | sed "s/.\/4-Services\///") if [[ " ${skip_services[*]} " == *" $service_name "* ]] then colorEcho 33 "Removing $service_name" kubectl -n oih-dev-ns delete services "$service_name" || true kubectl -n oih-dev-ns delete deployment "$service_name" || true fi done } function createCustomSecret { read -r -d '' JSON << EOM || true { "data": { "name": "custom_secret", "type": "MIXED", "value": { "payload": "secret" } } } EOM postJSON http://skm.example.com/api/v1/secrets "$JSON" "$admin_token" custom_secret_id=$(echo "$result" | python3 -c "import sys, json; print(json.load(sys.stdin)['data']['_id'])") echo "$custom_secret_id" } function createDevComponent { read -r -d '' JSON << EOM || true { "data": { "distribution": { "type": "docker", "image": "$DEV_CONTAINER_IMAGE" }, "access": "public", "name": "Development Component", "description": "A component just for testing" } } EOM postJSON http://component-repository.example.com/components "$JSON" "$admin_token" development_component_id=$(echo "$result" | python3 -c "import sys, json; print(json.load(sys.stdin)['data']['id'])") } function createDevPrivateComponent { read -r -d '' JSON << EOM || true { "data": { "distribution": { "type": "docker", "image": "$DEV_CONTAINER_IMAGE" }, "access": "private", "name": "Development Component (Private)", "description": "A component just for testing", "owners": [ { "id": "$tenant_2_user_id", "type": "user" } ] } } EOM postJSON http://component-repository.example.com/components "$JSON" "$admin_token" development_private_component_id=$(echo "$result" | python3 -c "import sys, json; print(json.load(sys.stdin)['data']['id'])") } function createDevGlobalComponent { read -r -d '' JSON << EOM || true { "data": { "distribution": { "type": "docker", "image": "$DEV_CONTAINER_IMAGE" }, "access": "public", "isGlobal": true, "name": "Global Development Component", "description": "A component just for testing" } } EOM postJSON http://component-repository.example.com/components "$JSON" "$admin_token" development_global_component_id=$(echo "$result" | python3 -c "import sys, json; print(json.load(sys.stdin)['data']['id'])") } function createDevSimpleFlow { read -r -d '' JSON << EOM || true { "name":"Simplest flow (single component)", "description:": "just one component", "graph":{ "nodes":[ { "id": "step_1", "componentId": "$development_component_id", "function": "testTrigger" } ], "edges":[ ] }, "cron":"*/1 * * * *" } EOM postJSON http://flow-repository.example.com/flows "$JSON" "$admin_token" } function createDevWebhookFlow { read -r -d '' JSON << EOM || true { "name":"Simple flow with local and global components (webhook)", "description:": "just one component", "graph":{ "nodes":[ { "id": "step_1", "componentId": "$development_component_id", "function": "testTrigger", "credentials_id": "$custom_secret_id", "fields":{ "code":"async function run() { console.log('running async function1');}" } }, { "id": "step_2", "componentId": "$development_global_component_id", "function": "testAction", "credentials_id": "$custom_secret_id", "fields":{ "code":"async function run() { console.log('running async function2');}" } }, { "id": "step_3", "componentId": "$development_component_id", "function": "testAction", "credentials_id": "$custom_secret_id", "fields":{ "code":"async function run() { console.log('running async function3');}" } }, { "id": "step_4", "componentId": "$development_global_component_id", "function": "testAction", "credentials_id": "$custom_secret_id", "fields":{ "code":"async function run() { console.log('running async function4');}" } } ], "edges":[ { "source": "step_1", "target": "step_2" }, { "source": "step_1", "target": "step_3" }, { "source": "step_3", "target": "step_4" } ] } } EOM postJSON http://flow-repository.example.com/flows "$JSON" "$admin_token" } function createDevConsecutiveFlow { read -r -d '' JSON << EOM || true { "name": "LocalDevFlow (Consecutive)", "graph": { "nodes": [ { "id": "step_1", "componentId": "$development_component_id", "function": "testTrigger" }, { "id": "step_2", "componentId": "$development_component_id", "function": "testAction" }, { "id": "step_3", "componentId": "$development_component_id", "function": "testAction" } ], "edges": [ { "source": "step_1", "target": "step_2" }, { "source": "step_2", "target": "step_3" } ] }, "cron": "*/1 * * * *" } EOM postJSON http://flow-repository.example.com/flows "$JSON" "$admin_token" } function createDevGlobalConsecutiveFlow { read -r -d '' JSON << EOM || true { "name": "GlobalDevFlow (Consecutive)", "graph": { "nodes": [ { "id": "step_1", "componentId": "$development_global_component_id", "function": "testTrigger" }, { "id": "step_2", "componentId": "$development_global_component_id", "function": "testAction" }, { "id": "step_3", "componentId": "$development_global_component_id", "function": "testAction" } ], "edges": [ { "source": "step_1", "target": "step_2" }, { "source": "step_2", "target": "step_3" } ] } } EOM postJSON http://flow-repository.example.com/flows "$JSON" "$admin_token" } function createDevConcurrentFlow { read -r -d '' JSON << EOM || true { "name": "LocalDevFlow (Concurrent)", "graph": { "nodes": [ { "id": "step_1", "componentId": "$development_component_id", "function": "testTrigger" }, { "id": "step_2", "componentId": "$development_component_id", "function": "testAction" }, { "id": "step_3", "componentId": "$development_component_id", "function": "testAction" } ], "edges": [ { "source": "step_1", "target": "step_2" }, { "source": "step_1", "target": "step_3" } ] }, "cron": "*/1 * * * *" } EOM postJSON http://flow-repository.example.com/flows "$JSON" "$admin_token" } function createDevGlobalFlow { read -r -d '' JSON << EOM || true { "name": "LocalDevFlow with global component (Concurrent)", "graph": { "nodes": [ { "id": "step_1", "componentId": "$development_component_id", "function": "testTrigger" }, { "id": "step_2", "componentId": "$development_global_component_id", "function": "testAction" }, { "id": "step_3", "componentId": "$development_component_id", "function": "testAction" }, { "id": "step_4", "componentId": "$development_component_id", "function": "testAction" }, { "id": "step_5", "componentId": "$development_component_id", "function": "testAction" }, { "id": "step_6", "componentId": "$development_global_component_id", "function": "testAction" }, { "id": "step_7", "componentId": "$development_component_id", "function": "testAction" }, { "id": "step_8", "componentId": "$development_global_component_id", "function": "testAction" }, { "id": "step_9", "componentId": "$development_component_id", "function": "testAction" }, { "id": "step_10", "componentId": "$development_component_id", "function": "testAction" } ], "edges": [ { "source": "step_1", "target": "step_2" }, { "source": "step_1", "target": "step_3" }, { "source": "step_2", "target": "step_4" }, { "source": "step_2", "target": "step_5" }, { "source": "step_3", "target": "step_6" }, { "source": "step_3", "target": "step_7" }, { "source": "step_7", "target": "step_8" }, { "source": "step_7", "target": "step_9" }, { "source": "step_7", "target": "step_10" } ] } } EOM postJSON http://flow-repository.example.com/flows "$JSON" "$admin_token" } function writeDotEnvFile { echo "export IAM_TOKEN=$service_account_token" > "$SCRIPTPATH"/.env } function startProxy { if [ "$start_proxy" == "true" ]; then kubectl -n oih-dev-ns port-forward service/mongodb-service 27017:27017 & kubectl -n oih-dev-ns port-forward service/rabbitmq-service 15672:15672 & kubectl -n oih-dev-ns port-forward service/rabbitmq-service 5672:5672 & kubectl -n oih-dev-ns port-forward service/redis-service 6379:6379 & fi } function clearMinikube { if [ "$clear_minikube" == "true" ]; then minikube delete fi } trap cleanup EXIT checkOS if [ "$os" == "Darwin" ]; then esc="\x1B" fi # check arguments while getopts "cs:i:p" option do case "${option}" in # -c clear minikuke c) clear_minikube="true" colorEcho 32 "- clear minikube";; # -s [serviceName,..] remove service deployments after setup is done s) IFS=', ' read -r -a skip_services <<< "${OPTARG}" colorEcho 32 "- skip deployments: ${skip_services[*]}";; # -i imageName use custom image for development component i) IFS='' read -r DEV_CONTAINER_IMAGE <<< "${OPTARG}" colorEcho 32 "- use custom image '$DEV_CONTAINER_IMAGE' for dev component";; # -p proxy dbs and message queue p) start_proxy="true" colorEcho 32 "- start proxy";; *) ;; esac done # preserve newlines in substitutions IFS= echo "WARNING: OIH kubernetes setup will be restored." sudo -v ### ### 1. check for required tools ### checkTools ### ### 2. setup minikube ### clearMinikube if [ "$os" == "Darwin" ]; then minikube start --vm=true --driver=hyperkit --memory $MK_MEMORY --cpus $MK_CPUS else minikube start --memory $MK_MEMORY --cpus $MK_CPUS fi minikube addons enable ingress minikube addons enable dashboard minikube addons enable metrics-server # remove oih resources kubectl -n oih-dev-ns delete pods,services,deployments --all kubectl -n oih-dev-ns delete pvc --all kubectl delete pv local-volume || true kubectl delete ns oih-dev-ns || true kubectl -n flows delete pods,services,deployments --all kubectl delete ns flows || true ### ### 3. insert/update hosts entries ### updateHostsFile waitForPodStatus ingress-nginx-controller.*1/1 ### ### 4. deploy platform base ### kubectl apply -f ./1-Platform waitForPodStatus mongodb.*1/1 waitForPodStatus rabbitmq.*1/1 waitForPodStatus redis.*1/1 ### ### 5. deploy IAM ### kubectl apply -f ./2-IAM waitForServiceStatus http://iam.example.com 200 ### ### 6. set admin token ### setAdminToken ### ### 7a. create accounts ### createTenantAndUsers_1 createTenantAndUsers_2 ### ### 7b. setup service account ### createServiceAccount setServiceAccountToken ### ### 8. replace token in secret and apply settings ### addTokenToSecret kubectl apply -f ./3-Secret removeTokenFromSecret ### ### 9. deploy framework services ### deployServices # kubectl apply -Rf ./4-Services ### ### 10. create custom secret ### waitForServiceStatus http://skm.example.com/api/v1/secrets 401 createCustomSecret ### ### 11. add example components and flow ### waitForServiceStatus http://component-repository.example.com/components 401 createDevComponent # create multiple global components createDevGlobalComponent createDevGlobalComponent # create for tenant_2_user_id createDevPrivateComponent waitForServiceStatus http://flow-repository.example.com/flows 401 createDevSimpleFlow createDevWebhookFlow createDevGlobalConsecutiveFlow createDevConsecutiveFlow createDevConcurrentFlow createDevGlobalFlow ### ### 12. Point to web ui if ready ### waitForServiceStatus http://web-ui.example.com 200 ### ### 13. Remove temporary deployments ### removeTemporaryServices ### ### 14. Write .env file ### writeDotEnvFile ### ### 15. Print pod status ### kubectl -n oih-dev-ns get pods ### ### 16. Proxy db and queue connections ### startProxy ### ### 17. Open dashboard ### # end sudo session sudo -k minikube dashboard
def is_done(self): if self._command_data and isinstance(self._command_data, dict) and bool(self._command_data): return True elif self._data: try: obj = json.loads(self._data) if obj and isinstance(obj, dict) and bool(obj): return True except ValueError: pass return False
/*--------------------------------------------------------------------------------------------- * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.md in the project root for license information. *--------------------------------------------------------------------------------------------*/ import { ApiManagementModels } from "@azure/arm-apimanagement"; import { ProgressLocation, window } from "vscode"; import { AzureParentTreeItem, AzureTreeItem, DialogResponses, ISubscriptionContext, UserCancelledError } from "vscode-azureextensionui"; import { localize } from "../localize"; import { OperationConsole } from "../operationConsole/OperationConsole"; import { nonNullProp } from "../utils/nonNull"; import { treeUtils } from "../utils/treeUtils"; import { IApiTreeRoot } from "./IApiTreeRoot"; import { IOperationTreeRoot } from "./IOperationTreeRoot"; import { OperationPolicyTreeItem } from "./OperationPolicyTreeItem"; export class ApiOperationTreeItem extends AzureParentTreeItem<IOperationTreeRoot> { public static contextValue: string = 'azureApiManagementApiOperation'; public contextValue: string = ApiOperationTreeItem.contextValue; public readonly commandId: string = 'azureApiManagement.showArmApiOperation'; public readonly policyTreeItem: OperationPolicyTreeItem; private _name: string; private _label: string; public get root(): IOperationTreeRoot { return this._root; } public get iconPath(): { light: string, dark: string } { return treeUtils.getThemedIconPath('op'); } public get label() : string { return this._label; } public get id(): string { return this._name; } private _root: IOperationTreeRoot; constructor( parent: AzureParentTreeItem, public readonly operationContract: ApiManagementModels.OperationContract) { super(parent); this._root = this.createRoot(parent.root); this.policyTreeItem = new OperationPolicyTreeItem(this); this._label = `[${nonNullProp(this.operationContract, 'method')}] ${nonNullProp(this.operationContract, 'displayName')}`; this._name = nonNullProp(this.operationContract, 'name'); } public async loadMoreChildrenImpl(): Promise<AzureTreeItem<IOperationTreeRoot>[]> { return [this.policyTreeItem]; } public hasMoreChildrenImpl(): boolean { return false; } public async deleteTreeItemImpl() : Promise<void> { const message: string = localize("confirmDeleteOperation", `Are you sure you want to delete operation '${this.root.opName}'?`); const result = await window.showWarningMessage(message, { modal: true }, DialogResponses.deleteResponse, DialogResponses.cancel); if (result === DialogResponses.deleteResponse) { const deletingMessage: string = localize("", `Deleting operation "${this.root.opName}"...`); await window.withProgress({ location: ProgressLocation.Notification, title: deletingMessage }, async () => { await this.root.client.apiOperation.deleteMethod(this.root.resourceGroupName, this.root.serviceName, this.root.apiName, this.root.opName, '*'); }); // don't wait window.showInformationMessage(localize("deletedOperation", `Successfully deleted API "${this.root.apiName}".`)); } else { throw new UserCancelledError(); } } public async getOperationTestInfo(): Promise<string> { return await new OperationConsole().buildRequestInfo(this.root); } public async getOperationDebugInfo(): Promise<string> { return await new OperationConsole().buildDebugRequestInfo(this.root); } private createRoot(subRoot: ISubscriptionContext): IOperationTreeRoot { return Object.assign({}, <IApiTreeRoot>subRoot, { opName : nonNullProp(this.operationContract, 'name') }); } }
/* * Copyright 2017-2018 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package create import ( "context" "encoding/base64" "fmt" "io/ioutil" "os" "reflect" "strings" "github.com/spf13/cobra" "github.com/IBM/newrelic-cli/cmd/add" "github.com/IBM/newrelic-cli/newrelic" "github.com/IBM/newrelic-cli/tracker" "github.com/IBM/newrelic-cli/utils" ) var monitorCmd = &cobra.Command{ Use: "monitor", Short: "Create monitor from a file.", Example: "nr create monitor -f <example.yaml>", Run: func(cmd *cobra.Command, args []string) { file, err := utils.GetArg(cmd, "file") if err != nil { fmt.Printf("Unable to get argument 'file': %v\n", err) os.Exit(1) return } f, err := os.Open(file) defer f.Close() if err != nil { fmt.Printf("Unable to open file '%v': %v\n", file, err) os.Exit(1) return } // validation decorder := utils.NewYAMLOrJSONDecoder(f, 4096) var p = new(newrelic.Monitor) err = decorder.Decode(p) if err != nil { fmt.Printf("Unable to decode %q: %v\n", file, err) os.Exit(1) return } if reflect.DeepEqual(new(newrelic.Monitor), p) { fmt.Printf("Error validating %q.\n", file) os.Exit(1) return } // start to create var scriptTextEncoded *newrelic.Script scriptTextEncoded = &newrelic.Script{} if *p.Type == "SCRIPT_BROWSER" || *p.Type == "SCRIPT_API" { flags := cmd.Flags() if flags.Lookup("script-file") != nil { scriptFileName, err := cmd.Flags().GetString("script-file") if err != nil { fmt.Printf("error accessing flag %s for command %s: %v\n", "script-file", cmd.Name(), err) os.Exit(1) return } if scriptFileName != "" { sf, err := os.Open(scriptFileName) defer sf.Close() if err != nil { fmt.Printf("Unable to open monitor script file '%v': %v\n", file, err) os.Exit(1) return } byteArr, err := ioutil.ReadAll(sf) sfContentEncoded := base64.StdEncoding.EncodeToString(byteArr) scriptTextEncoded.ScriptText = &sfContentEncoded } else { scriptTextEncoded = p.Script } } else { scriptTextEncoded = p.Script } } _, err, returnValue := CreateMonitor(p, scriptTextEncoded) if err != nil { fmt.Println(err) os.Exit(1) return } if returnValue.IsContinue == false { fmt.Println(returnValue.OriginalError) fmt.Println(returnValue.TypicalError) os.Exit(1) return } tracker.PrintStatisticsInfo(tracker.GlobalRESTCallResultList) fmt.Println() os.Exit(0) }, } func CreateMonitor(p *newrelic.Monitor, scriptTextEncoded *newrelic.Script) (string, error, tracker.ReturnValue) { client, err := utils.GetNewRelicClient("synthetics") if err != nil { fmt.Println(err) ret := tracker.ToReturnValue(false, tracker.OPERATION_NAME_CREATE_MONITOR, err, tracker.ERR_CREATE_NR_CLINET, "") return "", err, ret } p.ID = nil createdMonitor, resp, err := client.SyntheticsMonitors.Create(context.Background(), p) if err != nil { fmt.Println(err) ret := tracker.ToReturnValue(false, tracker.OPERATION_NAME_CREATE_MONITOR, err, tracker.ERR_REST_CALL, "") return "", err, ret } else { tracker.AppendRESTCallResult(client.SyntheticsMonitors, tracker.OPERATION_NAME_CREATE_MONITOR, resp.StatusCode, "monitor name :"+(*p.Name)) if resp.StatusCode >= 400 { var statusCode = resp.StatusCode fmt.Printf("Response status code: %d. Creating monitor '%s'\n", statusCode, *createdMonitor.Name) var ret tracker.ReturnValue if resp.StatusCode == 400 { ret = tracker.ToReturnValue(true, tracker.OPERATION_NAME_CREATE_MONITOR, tracker.ERR_REST_CALL_NOT_2XX, tracker.ERR_REST_CALL_400, "") } else { ret = tracker.ToReturnValue(false, tracker.OPERATION_NAME_CREATE_MONITOR, tracker.ERR_REST_CALL_NOT_2XX, tracker.ERR_REST_CALL_NOT_2XX, "") } return "", err, ret } } if *p.Type == "SCRIPT_BROWSER" || *p.Type == "SCRIPT_API" { if scriptTextEncoded != nil && scriptTextEncoded.ScriptText != nil { id := *createdMonitor.ID resp, err := client.SyntheticsScript.UpdateByID(context.Background(), scriptTextEncoded, id) if err != nil { fmt.Println(err) ret := tracker.ToReturnValue(false, tracker.OPERATION_NAME_UPDATE_MONITOR_SCRIPT, err, tracker.ERR_REST_CALL, "") return id, err, ret } else { tracker.AppendRESTCallResult(client.SyntheticsMonitors, tracker.OPERATION_NAME_UPDATE_MONITOR_SCRIPT, resp.StatusCode, "monitor name :"+(*p.Name)) if resp.StatusCode >= 400 { var statusCode = resp.StatusCode fmt.Printf("Response status code: %d. Update script to monitor '%s', monitor id: '%s'\n", statusCode, *createdMonitor.Name, id) ret := tracker.ToReturnValue(false, tracker.OPERATION_NAME_UPDATE_MONITOR_SCRIPT, tracker.ERR_REST_CALL_NOT_2XX, tracker.ERR_REST_CALL_NOT_2XX, "") return "", err, ret } } } } //add labels var labelsLen = len(p.Labels) if labelsLen > 0 { for _, label := range p.Labels { id := createdMonitor.ID var monitorLabel *newrelic.MonitorLabel monitorLabel = &newrelic.MonitorLabel{} arr := strings.Split(*label, ":") monitorLabel.Category = &arr[0] monitorLabel.Label = &arr[1] err, returnValue := add.AddLabelToMonitor(*id, monitorLabel) // if err != nil { // fmt.Println(err) // return *id, "failed to add labels to monitor", err // } if returnValue.IsContinue == false { return "", err, returnValue } } } ret := tracker.ToReturnValue(true, tracker.OPERATION_NAME_CREATE_MONITOR, nil, nil, "") return *createdMonitor.ID, err, ret } func init() { CreateCmd.AddCommand(monitorCmd) // Here you will define your flags and configuration settings. // Cobra supports Persistent Flags which will work for this command // and all subcommands, e.g.: // alertspoliciesCmd.PersistentFlags().String("foo", "", "A help for foo") // Cobra supports local flags which will only run when this command // is called directly, e.g.: // alertspoliciesCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") }
package org.jms.eureka.demo.client.consumer; import cn.hutool.core.util.URLUtil; import lombok.extern.slf4j.Slf4j; import org.apache.http.protocol.RequestUserAgent; import org.apache.tomcat.util.http.RequestUtil; import org.jms.eureka.demo.client.consumer.rpac.foeign.ProducerService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.cloud.client.ServiceInstance; import org.springframework.cloud.client.discovery.DiscoveryClient; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.client.RestTemplate; import org.springframework.web.servlet.support.RequestContextUtils; import java.nio.charset.Charset; import java.util.List; import java.util.Map; /** * 消费者Controller */ @RestController @Slf4j @RequestMapping("/consumer") public class ConsumerController { @Autowired private RestTemplate restTemplate; @Autowired private DiscoveryClient discoveryClient; @Autowired private ProducerService producerService; @GetMapping("/restTemplate") public String restTemplateMethod(@RequestParam Map<String, String> param) { List<ServiceInstance> serviceInstances = this.common(); // 因为现在是单体服务,并不是集群模式,所以只可能有一个服务,直接调用第一个即可 ServiceInstance serviceInstance = serviceInstances.get(0); String paramStr = URLUtil.buildQuery(param, Charset.defaultCharset()); String url = "%s/producer?%s"; url = url.formatted(serviceInstance.getUri().toString(), paramStr); return restTemplate.getForObject(url, String.class); } @GetMapping("/feign") public String feignMethod(@RequestParam String param) { this.common(); return producerService.producer(param); } /** * 公共部分 */ private List<ServiceInstance> common() { // 获取所有远程服务的调用地址 List<String> services = discoveryClient.getServices(); log.info("注册中心上服务列表有: " + services); // 通过服务名获取调用的服务信息 List<ServiceInstance> serviceInstances = discoveryClient.getInstances("client-producer"); for (ServiceInstance serviceInstance : serviceInstances) { log.info("服务实例:{}\t{}\t{}\t{}", serviceInstance.getServiceId(), serviceInstance.getHost(), serviceInstance.getPort(), serviceInstance.getUri()); } return serviceInstances; } }
from matplotlib import pyplot as plt from scipy import stats as st import numpy as np marks = ("H3K27ac", "H3K4me1", "H3K4me3", "H3K9ac", "H3K36me3", "H2AZ", "H3K79me2", "H3K27me3", "EZH2", "enhancer", "transcription", "polycomb") datasets = [] for celltype in ("GM12878", "K562"): for mark in marks: datasets.append("{}_{}".format(celltype, mark)) overrepresentation = np.zeros_like(datasets, dtype=float) ps = np.zeros_like(datasets, dtype=float) for i, dataset in enumerate(datasets): peaks_coverage = np.loadtxt("peaks_b_comp_filtered_independent_{}_coverage.bed".format(dataset), usecols=8) background_coverage = np.loadtxt("B_background_independent_filtered_{}_coverage.bed".format(dataset), usecols=8) t, p = st.ttest_ind(peaks_coverage, background_coverage) ps[i] = p overrepresentation[i] = (np.mean(peaks_coverage)/np.mean(background_coverage) - 1)*100 ys = overrepresentation #start with a frameless plot (extra room on the left) plt.subplot2grid((10,10), (0,0), 7, 10, frameon=False) #label axes plt.ylabel("Percent enrichment", fontsize=12) #define offsets xs = range(len(ys)) xmin = min(xs) xmax = max(xs) x_range = xmax - xmin x_start = xmin - x_range/15. #bigger offset for bar plot x_end = xmax + x_range/15. ymin = min(ys) ymax = max(ys) y_range = ymax - ymin y_start = ymin - y_range/50. y_end = ymax + y_range/30. #plot data plt.bar(xs, ys, width=0.4, bottom=0) #define axes with offsets plt.axis([x_start, x_end, y_start, y_end], frameon=False) #plot axes (black with line width of 4) plt.axvline(x=x_start, color="k", lw=4) plt.axhline(y=0, color="k", lw=3) for x, p in zip(xs, ps): if p < 0.01: plt.scatter([x], [ymax + 0.5], marker="*", s=6, color="k") #plot ticks plt.xticks(xs, datasets, rotation=90) plt.tick_params(direction="out", top=False, right=False, bottom=False, length=12, width=3, labelsize=9) plt.savefig("sup18_a_comp") plt.show()
/* * Copyright 2018 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.webauthn4j.converter.jackson.deserializer; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.databind.DeserializationContext; import com.fasterxml.jackson.databind.deser.std.StdDeserializer; import com.fasterxml.jackson.databind.exc.InvalidFormatException; import com.webauthn4j.data.attestation.statement.*; import com.webauthn4j.util.UnsignedNumberUtil; import com.webauthn4j.util.exception.NotImplementedException; import org.checkerframework.checker.nullness.qual.NonNull; import java.io.IOException; import java.nio.ByteBuffer; /** * Jackson Deserializer for {@link TPMTPublic} */ public class TPMTPublicDeserializer extends StdDeserializer<TPMTPublic> { public TPMTPublicDeserializer() { super(TPMTPublic.class); } @Override public @NonNull TPMTPublic deserialize(@NonNull JsonParser p, @NonNull DeserializationContext ctxt) throws IOException { byte[] value = p.getBinaryValue(); try { return deserialize(value); } catch (IllegalArgumentException e) { throw new InvalidFormatException(p, "input byte array contains surplus data", value, TPMTPublic.class); } } @NonNull TPMTPublic deserialize(@NonNull byte[] value) { ByteBuffer buffer = ByteBuffer.wrap(value); int typeValue = UnsignedNumberUtil.getUnsignedShort(buffer); TPMIAlgPublic type = TPMIAlgPublic.create(typeValue); TPMIAlgHash nameAlgValue = TPMIAlgHash.create(UnsignedNumberUtil.getUnsignedShort(buffer)); TPMAObject objectAttributes = extractTPMAObject(buffer); int authPolicySize = UnsignedNumberUtil.getUnsignedShort(buffer); byte[] authPolicy = new byte[authPolicySize]; buffer.get(authPolicy); TPMUPublicParms parameters = extractTPMUPublicParms(type, buffer); TPMUPublicId unique = extractTPMUPublicId(type, buffer); if (buffer.remaining() > 0) { throw new IllegalArgumentException("input byte array contains surplus data"); } return new TPMTPublic(type, nameAlgValue, objectAttributes, authPolicy, parameters, unique); } private @NonNull TPMAObject extractTPMAObject(@NonNull ByteBuffer buffer) { int value = buffer.getInt(); return new TPMAObject(value); } private @NonNull TPMUPublicParms extractTPMUPublicParms(@NonNull TPMIAlgPublic type, @NonNull ByteBuffer buffer) { switch (type) { case TPM_ALG_RSA: return extractTPMSRSAParms(buffer); case TPM_ALG_ECDSA: return extractTPMSECDSAParms(buffer); default: throw new NotImplementedException(); } } private @NonNull TPMSRSAParms extractTPMSRSAParms(@NonNull ByteBuffer buffer) { byte[] symmetric = new byte[2]; buffer.get(symmetric); byte[] scheme = new byte[2]; buffer.get(scheme); byte[] keyBits = new byte[2]; buffer.get(keyBits); byte[] exponent = new byte[4]; buffer.get(exponent); return new TPMSRSAParms(symmetric, scheme, keyBits, exponent); } private @NonNull TPMSECCParms extractTPMSECDSAParms(@NonNull ByteBuffer buffer) { byte[] symmetric = new byte[2]; buffer.get(symmetric); byte[] scheme = new byte[2]; buffer.get(scheme); byte[] curveId = new byte[2]; buffer.get(curveId); byte[] kdf = new byte[2]; buffer.get(kdf); return new TPMSECCParms(symmetric, scheme, TPMEccCurve.create(UnsignedNumberUtil.getUnsignedShort(curveId)), kdf); } private @NonNull TPMUPublicId extractTPMUPublicId(@NonNull TPMIAlgPublic type, @NonNull ByteBuffer buffer) { if (type == TPMIAlgPublic.TPM_ALG_RSA) { int nSize = UnsignedNumberUtil.getUnsignedShort(buffer); byte[] n = new byte[nSize]; buffer.get(n); return new RSAUnique(n); } else if (type == TPMIAlgPublic.TPM_ALG_ECDSA) { int xSize = UnsignedNumberUtil.getUnsignedShort(buffer); byte[] x = new byte[xSize]; buffer.get(x); int ySize = UnsignedNumberUtil.getUnsignedShort(buffer); byte[] y = new byte[ySize]; buffer.get(y); return new ECCUnique(x, y); } else { throw new NotImplementedException(); } } }
import numpy as np import pandas as pd from sklearn.linear_model import LinearRegression # Load the data data = pd.read_csv('house_data.csv') # Separate the predictors and target X = data[['area', 'rooms']] y = data['price'] # Fit the model model = LinearRegression() model.fit(X, y) # Make predictions predictions = model.predict(X)
#!/usr/bin/env bats source "${BATS_TEST_DIRNAME}/test_helper.sh" @test "It should install PostgreSQL 10.11" { /usr/lib/postgresql/10/bin/postgres --version | grep "10.11" } @test "It should support pg_cron" { initialize_and_start_pg sudo -u postgres psql --command "CREATE EXTENSION pg_cron;" }
#!/bin/bash #SBATCH -t 24:00:00 #SBATCH -J pico #SBATCH --mail-user=simpson@ukp.informatik.tu-darmstadt.de #SBATCH --mail-type=FAIL #SBATCH -e ./pico.err.%j #SBATCH -o ./pico.out.%j #SBATCH -n 1 #SBATCH -c 16 #SBATCH --mem-per-cpu=8182 #SBATCH --exclusive #SBATCH -C avx # ---------------------------------- module load intel python/3.6.8 python -u ./src/run_pico_experiments.py
#!/bin/bash set -eo pipefail shopt -s nullglob # logging functions mysql_log() { local type="$1"; shift printf '%s [%s] [Entrypoint]: %s\n' "$(date --rfc-3339=seconds)" "$type" "$*" } mysql_note() { mysql_log Note "$@" } mysql_warn() { mysql_log Warn "$@" >&2 } mysql_error() { mysql_log ERROR "$@" >&2 exit 1 } # usage: file_env VAR [DEFAULT] # ie: file_env 'XYZ_DB_PASSWORD' 'example' # (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of # "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature) file_env() { local var="$1" local fileVar="${var}_FILE" local def="${2:-}" if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then mysql_error "Both $var and $fileVar are set (but are exclusive)" fi local val="$def" if [ "${!var:-}" ]; then val="${!var}" elif [ "${!fileVar:-}" ]; then val="$(< "${!fileVar}")" fi export "$var"="$val" unset "$fileVar" } # check to see if this file is being run or sourced from another script _is_sourced() { # https://unix.stackexchange.com/a/215279 [ "${#FUNCNAME[@]}" -ge 2 ] \ && [ "${FUNCNAME[0]}" = '_is_sourced' ] \ && [ "${FUNCNAME[1]}" = 'source' ] } # usage: docker_process_init_files [file [file [...]]] # ie: docker_process_init_files /always-initdb.d/* # process initializer files, based on file extensions docker_process_init_files() { # mysql here for backwards compatibility "${mysql[@]}" mysql=( docker_process_sql ) echo local f for f; do case "$f" in *.sh) # https://github.com/docker-library/postgres/issues/450#issuecomment-393167936 # https://github.com/docker-library/postgres/pull/452 if [ -x "$f" ]; then mysql_note "$0: running $f" "$f" else mysql_note "$0: sourcing $f" . "$f" fi ;; *.sql) mysql_note "$0: running $f"; docker_process_sql < "$f"; echo ;; *.sql.gz) mysql_note "$0: running $f"; gunzip -c "$f" | docker_process_sql; echo ;; *.sql.xz) mysql_note "$0: running $f"; xzcat "$f" | docker_process_sql; echo ;; *) mysql_warn "$0: ignoring $f" ;; esac echo done } # arguments necessary to run "mysqld --verbose --help" successfully (used for testing configuration validity and for extracting default/configured values) _verboseHelpArgs=( --verbose --help --log-bin-index="$(mktemp -u)" # https://github.com/docker-library/mysql/issues/136 ) mysql_check_config() { local toRun=( "$@" "${_verboseHelpArgs[@]}" ) errors if ! errors="$("${toRun[@]}" 2>&1 >/dev/null)"; then mysql_error $'mysqld failed while attempting to check config\n\tcommand was: '"${toRun[*]}"$'\n\t'"$errors" fi } # Fetch value from server config # We use mysqld --verbose --help instead of my_print_defaults because the # latter only show values present in config files, and not server defaults mysql_get_config() { local conf="$1"; shift "$@" "${_verboseHelpArgs[@]}" 2>/dev/null \ | awk -v conf="$conf" '$1 == conf && /^[^ \t]/ { sub(/^[^ \t]+[ \t]+/, ""); print; exit }' # match "datadir /some/path with/spaces in/it here" but not "--xyz=abc\n datadir (xyz)" } # Do a temporary startup of the MySQL server, for init purposes docker_temp_server_start() { if [ "${MYSQL_MAJOR}" = '5.6' ] || [ "${MYSQL_MAJOR}" = '5.7' ]; then "$@" --skip-networking --socket="${SOCKET}" & mysql_note "Waiting for server startup" local i for i in {30..0}; do # only use the root password if the database has already been initialized # so that it won't try to fill in a password file when it hasn't been set yet extraArgs=() if [ -z "$DATABASE_ALREADY_EXISTS" ]; then extraArgs+=( '--dont-use-mysql-root-password' ) fi if docker_process_sql "${extraArgs[@]}" --database=mysql <<<'SELECT 1' &> /dev/null; then break fi sleep 1 done if [ "$i" = 0 ]; then mysql_error "Unable to start server." fi else # For 5.7+ the server is ready for use as soon as startup command unblocks if ! "$@" --daemonize --skip-networking --socket="${SOCKET}"; then mysql_error "Unable to start server." fi fi } # Stop the server. When using a local socket file mysqladmin will block until # the shutdown is complete. docker_temp_server_stop() { if ! mysqladmin --defaults-extra-file=<( _mysql_passfile ) shutdown -uroot --socket="${SOCKET}"; then mysql_error "Unable to shut down server." fi } # Verify that the minimally required password settings are set for new databases. docker_verify_minimum_env() { if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" -a -z "$MYSQL_RANDOM_ROOT_PASSWORD" ]; then mysql_error $'Database is uninitialized and password option is not specified\n\tYou need to specify one of MYSQL_ROOT_PASSWORD, MYSQL_ALLOW_EMPTY_PASSWORD and MYSQL_RANDOM_ROOT_PASSWORD' fi } # creates folders for the database # also ensures permission for user mysql of run as root docker_create_db_directories() { local user; user="$(id -u)" # TODO other directories that are used by default? like /var/lib/mysql-files # see https://github.com/docker-library/mysql/issues/562 mkdir -p "$DATADIR" if [ "$user" = "0" ]; then # this will cause less disk access than `chown -R` find "$DATADIR" \! -user mysql -exec chown mysql '{}' + fi } # initializes the database directory docker_init_database_dir() { mysql_note "Initializing database files" if [ "$MYSQL_MAJOR" = '5.6' ]; then mysql_install_db --datadir="$DATADIR" --rpm --keep-my-cnf "${@:2}" else "$@" --initialize-insecure fi mysql_note "Database files initialized" if command -v mysql_ssl_rsa_setup > /dev/null && [ ! -e "$DATADIR/server-key.pem" ]; then # https://github.com/mysql/mysql-server/blob/23032807537d8dd8ee4ec1c4d40f0633cd4e12f9/packaging/deb-in/extra/mysql-systemd-start#L81-L84 mysql_note "Initializing certificates" mysql_ssl_rsa_setup --datadir="$DATADIR" mysql_note "Certificates initialized" fi } # Loads various settings that are used elsewhere in the script # This should be called after mysql_check_config, but before any other functions docker_setup_env() { # Get config declare -g DATADIR SOCKET DATADIR="$(mysql_get_config 'datadir' "$@")" SOCKET="$(mysql_get_config 'socket' "$@")" # Initialize values that might be stored in a file file_env 'MYSQL_ROOT_HOST' '%' file_env 'MYSQL_DATABASE' file_env 'MYSQL_USER' file_env 'MYSQL_PASSWORD' file_env 'MYSQL_ROOT_PASSWORD' declare -g DATABASE_ALREADY_EXISTS if [ -d "$DATADIR/mysql" ]; then DATABASE_ALREADY_EXISTS='true' fi } # Execute sql script, passed via stdin # usage: docker_process_sql [--dont-use-mysql-root-password] [mysql-cli-args] # ie: docker_process_sql --database=mydb <<<'INSERT ...' # ie: docker_process_sql --dont-use-mysql-root-password --database=mydb <my-file.sql docker_process_sql() { passfileArgs=() if [ '--dont-use-mysql-root-password' = "$1" ]; then passfileArgs+=( "$1" ) shift fi # args sent in can override this db, since they will be later in the command if [ -n "$MYSQL_DATABASE" ]; then set -- --database="$MYSQL_DATABASE" "$@" fi mysql --defaults-extra-file=<( _mysql_passfile "${passfileArgs[@]}") --protocol=socket -uroot -hlocalhost --socket="${SOCKET}" --comments "$@" } # Initializes database with timezone info and root password, plus optional extra db/user docker_setup_db() { # Load timezone info into database if [ -z "$MYSQL_INITDB_SKIP_TZINFO" ]; then # sed is for https://bugs.mysql.com/bug.php?id=20545 mysql_tzinfo_to_sql /usr/share/zoneinfo \ | sed 's/Local time zone must be set--see zic manual page/FCTY/' \ | docker_process_sql --dont-use-mysql-root-password --database=mysql # tell docker_process_sql to not use MYSQL_ROOT_PASSWORD since it is not set yet fi # Generate random root password if [ -n "$MYSQL_RANDOM_ROOT_PASSWORD" ]; then export MYSQL_ROOT_PASSWORD="$(pwgen -1 32)" mysql_note "GENERATED ROOT PASSWORD: $MYSQL_ROOT_PASSWORD" fi # Sets root password and creates root users for non-localhost hosts local rootCreate= # default root to listen for connections from anywhere if [ -n "$MYSQL_ROOT_HOST" ] && [ "$MYSQL_ROOT_HOST" != 'localhost' ]; then # no, we don't care if read finds a terminating character in this heredoc # https://unix.stackexchange.com/questions/265149/why-is-set-o-errexit-breaking-this-read-heredoc-expression/265151#265151 read -r -d '' rootCreate <<-EOSQL || true CREATE USER 'root'@'${MYSQL_ROOT_HOST}' IDENTIFIED WITH mysql_native_password BY '${MYSQL_ROOT_PASSWORD}' ; GRANT ALL ON *.* TO 'root'@'${MYSQL_ROOT_HOST}' WITH GRANT OPTION ; EOSQL fi #mysql -u root -pPASSWORD -e "ALTER USER root IDENTIFIED WITH mysql_native_password BY 'PASSWORD';" local passwordSet= if [ "$MYSQL_MAJOR" = '5.6' ]; then # no, we don't care if read finds a terminating character in this heredoc (see above) read -r -d '' passwordSet <<-EOSQL || true DELETE FROM mysql.user WHERE user NOT IN ('mysql.sys', 'mysqlxsys', 'root') OR host NOT IN ('localhost') ; SET PASSWORD FOR 'root'@'localhost'=PASSWORD('${MYSQL_ROOT_PASSWORD}') ; -- 5.5: https://github.com/mysql/mysql-server/blob/e48d775c6f066add457fa8cfb2ebc4d5ff0c7613/scripts/mysql_secure_installation.sh#L192-L210 -- 5.6: https://github.com/mysql/mysql-server/blob/06bc670db0c0e45b3ea11409382a5c315961f682/scripts/mysql_secure_installation.sh#L218-L236 -- 5.7: https://github.com/mysql/mysql-server/blob/913071c0b16cc03e703308250d795bc381627e37/client/mysql_secure_installation.cc#L792-L818 -- 8.0: https://github.com/mysql/mysql-server/blob/b93c1661d689c8b7decc7563ba15f6ed140a4eb6/client/mysql_secure_installation.cc#L726-L749 DELETE FROM mysql.db WHERE Db='test' OR Db='test\_%' ; -- https://github.com/docker-library/mysql/pull/479#issuecomment-414561272 ("This is only needed for 5.5 and 5.6") EOSQL else # no, we don't care if read finds a terminating character in this heredoc (see above) read -r -d '' passwordSet <<-EOSQL || true ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY '${MYSQL_ROOT_PASSWORD}' ; EOSQL fi # tell docker_process_sql to not use MYSQL_ROOT_PASSWORD since it is just now being set docker_process_sql --dont-use-mysql-root-password --database=mysql <<-EOSQL -- What's done in this file shouldn't be replicated -- or products like mysql-fabric won't work SET @@SESSION.SQL_LOG_BIN=0; ${passwordSet} GRANT ALL ON *.* TO 'root'@'localhost' WITH GRANT OPTION ; FLUSH PRIVILEGES ; ${rootCreate} DROP DATABASE IF EXISTS test ; EOSQL # Creates a custom database and user if specified if [ -n "$MYSQL_DATABASE" ]; then mysql_note "Creating database ${MYSQL_DATABASE}" docker_process_sql --database=mysql <<<"CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;" fi if [ -n "$MYSQL_USER" ] && [ -n "$MYSQL_PASSWORD" ]; then mysql_note "Creating user ${MYSQL_USER}" docker_process_sql --database=mysql <<<"CREATE USER '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD' ;" if [ -n "$MYSQL_DATABASE" ]; then mysql_note "Giving user ${MYSQL_USER} access to schema ${MYSQL_DATABASE}" docker_process_sql --database=mysql <<<"GRANT ALL ON \`${MYSQL_DATABASE//_/\\_}\`.* TO '$MYSQL_USER'@'%' ;" fi fi } _mysql_passfile() { # echo the password to the "file" the client uses # the client command will use process substitution to create a file on the fly # ie: --defaults-extra-file=<( _mysql_passfile ) if [ '--dont-use-mysql-root-password' != "$1" ] && [ -n "$MYSQL_ROOT_PASSWORD" ]; then cat <<-EOF [client] password="${MYSQL_ROOT_PASSWORD}" EOF fi } # Mark root user as expired so the password must be changed before anything # else can be done (only supported for 5.6+) mysql_expire_root_user() { if [ -n "$MYSQL_ONETIME_PASSWORD" ]; then docker_process_sql --database=mysql <<-EOSQL ALTER USER 'root'@'%' PASSWORD EXPIRE; EOSQL fi } # check arguments for an option that would cause mysqld to stop # return true if there is one _mysql_want_help() { local arg for arg; do case "$arg" in -'?'|--help|--print-defaults|-V|--version) return 0 ;; esac done return 1 } _main() { /etc/init.d/php7.3-fpm restart nginx -g "daemon off;" & # if command starts with an option, prepend mysqld if [ "${1:0:1}" = '-' ]; then set -- mysqld "$@" fi # skip setup if they aren't running mysqld or want an option that stops mysqld if [ "$1" = 'mysqld' ] && ! _mysql_want_help "$@"; then mysql_note "Entrypoint script for MySQL Server ${MYSQL_VERSION} started." mysql_check_config "$@" # Load various environment variables docker_setup_env "$@" docker_create_db_directories # If container is started as root user, restart as dedicated mysql user if [ "$(id -u)" = "0" ]; then mysql_note "Switching to dedicated user 'mysql'" exec gosu mysql "$BASH_SOURCE" "$@" fi # there's no database, so it needs to be initialized if [ -z "$DATABASE_ALREADY_EXISTS" ]; then docker_verify_minimum_env # check dir permissions to reduce likelihood of half-initialized database ls /docker-entrypoint-initdb.d/ > /dev/null docker_init_database_dir "$@" mysql_note "Starting temporary server" docker_temp_server_start "$@" mysql_note "Temporary server started." docker_setup_db docker_process_init_files /docker-entrypoint-initdb.d/* mysql_expire_root_user mysql_note "Stopping temporary server" docker_temp_server_stop mysql_note "Temporary server stopped" echo mysql_note "MySQL init process done. Ready for start up." echo fi fi exec "$@" } # If we are sourced from elsewhere, don't perform any further actions if ! _is_sourced; then _main "$@" fi
#ifndef __MACH_SUNXI_CLK_PERIPH_H #define __MACH_SUNXI_CLK_PERIPH_H #include<clk/clk_plat.h> #include<clk/clk.h> /** * struct sunxi_clk_periph_gate - peripheral gate clock * * @hw: handle between common and hardware-specific interfaces * @flags: hardware-specific flags * @enable: enable register * @reset: reset register * @bus: bus gating resiter * @dram: dram gating register * @enb_shift: enable gate bit shift * @rst_shift: reset gate bit shift * @bus_shift: bus gate bit shift * @ddr_shift: dram gate bit shift * * Flags: * SUNXI_PERIPH_NO_GATE - this flag indicates that module gate is not allowed for this module. * SUNXI_PERIPH_NO_RESET - This flag indicates that reset is not allowed for this module. * SUNXI_PERIPH_NO_BUS_GATE - This flag indicates that bus gate is not allowed for this module. * SUNXI_PERIPH_NO_DDR_GATE - This flag indicates that dram gate is not allowed for this module. */ struct sunxi_clk_periph_gate { u32 flags; void *enable; void *reset; void *bus; void *dram; u8 enb_shift; u8 rst_shift; u8 bus_shift; u8 ddr_shift; }; /** * struct sunxi_clk_periph_div - periph divider clock * * @hw: handle between common and hardware-specific interfaces * @reg: register containing divider * @flags: hardware-specific flags * @mshift: shift to the divider-m bit field, div = (m+1) * @mwidth: width of the divider-m bit field * @nshift: shift to the divider-n bit field, div = (1<<n) * @nwidth: width of the divider-n bit field * @lock: register lock * * Flags: */ struct sunxi_clk_periph_div { void *reg; u8 mshift; u8 mwidth; u8 nshift; u8 nwidth; }; /** * struct sunxi_clk_periph_mux - multiplexer clock * * @hw: handle between common and hardware-specific interfaces * @reg: register controlling multiplexer * @shift: shift to multiplexer bit field * @width: width of mutliplexer bit field * @lock: register lock * * Clock with multiple selectable parents. Implements .get_parent, .set_parent * and .recalc_rate * */ struct sunxi_clk_periph_mux { void *reg; u8 shift; u8 width; }; struct sunxi_clk_comgate { const char* name; u8 val; u8 mask; u8 share; u8 res; }; #define BUS_GATE_SHARE 0x01 #define RST_GATE_SHARE 0x02 #define MBUS_GATE_SHARE 0x04 #define MOD_GATE_SHARE 0x08 #define IS_SHARE_BUS_GATE(x) (x->com_gate?((x->com_gate->share & BUS_GATE_SHARE)?1:0):0) #define IS_SHARE_RST_GATE(x) (x->com_gate?((x->com_gate->share & RST_GATE_SHARE)?1:0):0) #define IS_SHARE_MBUS_GATE(x) (x->com_gate?((x->com_gate->share & MBUS_GATE_SHARE)?1:0):0) #define IS_SHARE_MOD_GATE(x) (x->com_gate?((x->com_gate->share & MOD_GATE_SHARE)?1:0):0) /** * struct sunxi-clk-periph - peripheral clock * * @hw: handle between common and hardware-specific interfaces * @mux: mux clock * @divider: divider clock * @gate: gate clock * @mux_ops: mux clock ops * @div_ops: divider clock ops * @gate_ops: gate clock ops */ struct sunxi_clk_periph { struct clk_hw hw; unsigned long flags; void *lock; struct sunxi_clk_periph_mux mux; struct sunxi_clk_periph_gate gate; struct sunxi_clk_periph_div divider; struct sunxi_clk_comgate* com_gate; u8 com_gate_off; struct clk_ops *priv_clkops; void *priv_regops; }; struct periph_init_data { const char *name; unsigned long flags; const char **parent_names; int num_parents; struct sunxi_clk_periph *periph; }; static inline u32 periph_readl(struct sunxi_clk_periph * periph, void * reg) { //return (((unsigned int)periph->priv_regops)?periph->priv_regops->reg_readl(reg):readl(reg)); return (readl(reg)); } static inline void periph_writel(struct sunxi_clk_periph * periph, unsigned int val, void * reg) { //(((unsigned int)periph->priv_regops)?periph->priv_regops->reg_writel(val,reg):writel(val,reg)); writel(val,reg); } int sunxi_clk_register_periph(struct periph_init_data *pd, void *base); extern void sunxi_clk_get_periph_ops(struct clk_ops* ops); #define to_clk_periph(_hw) container_of(_hw, struct sunxi_clk_periph, hw) #define SUNXI_CLK_PERIPH(name, _mux_reg, _mux_shift, _mux_width, \ _div_reg, _div_mshift, _div_mwidth, _div_nshift, _div_nwidth, \ _gate_flags, _enable_reg, _reset_reg, _bus_gate_reg, _drm_gate_reg, \ _enable_shift, _reset_shift, _bus_gate_shift, _dram_gate_shift, _lock,_com_gate,_com_gate_off) \ static struct sunxi_clk_periph sunxi_clk_periph_##name ={ \ .lock = _lock, \ \ .mux = { \ .reg = (void*)_mux_reg, \ .shift = _mux_shift, \ .width = _mux_width, \ }, \ \ .divider = { \ .reg = (void*)_div_reg, \ .mshift = _div_mshift, \ .mwidth = _div_mwidth, \ .nshift = _div_nshift, \ .nwidth = _div_nwidth, \ }, \ .gate = { \ .flags = _gate_flags, \ .enable = (void*)_enable_reg, \ .reset = (void*)_reset_reg, \ .bus = (void*)_bus_gate_reg, \ .dram = (void*)_drm_gate_reg, \ .enb_shift = _enable_shift, \ .rst_shift = _reset_shift, \ .bus_shift = _bus_gate_shift, \ .ddr_shift = _dram_gate_shift, \ }, \ .com_gate = _com_gate, \ .com_gate_off = _com_gate_off, \ } #endif
import { getMenu } from '@/api/sys'; import { RouteLocationNormalized, Router } from 'vue-router'; import { MenuVO } from '@/api/model/sys-model'; import { MenuOptions } from '@/constant/StoreOption'; import AdminLayout from '@/layout/admin/index.vue'; import store from '@/store'; import { ElMessage } from 'element-plus'; const modules = import.meta.glob('../views/**/**.vue'); export function asyncRouters (router: Router) { getMenu().then(res => { const fmtRouter = formatRoutes(res); fmtRouter.forEach(item => { if (item.children.length !== 0) { router.addRoute(item); } else { router.addRoute('Admin', item); } }); store.commit('setPermissionMenu', fmtRouter); }); } function formatRoutes(menuRouter: Array<MenuVO>): Array<MenuOptions>{ const resRouters: Array<MenuOptions> = []; menuRouter.forEach(route => { if (route.children){ route.children = formatRoutes(route.children); } const fmtRoute = { path: route.path, component: route.component === 'layout' ? AdminLayout : modules[`../views/admin${route.component}/index.vue`], nameZh: route.nameZh, icon: route.icon, children: route.children, meta: { requireAuth: true, title: route.nameZh, tag: route.component === 'layout' ? 'dashboard' : route.nameZh, } }; resRouters.push(fmtRoute); }); return resRouters; } export function weChartTitle(to: RouteLocationNormalized, defTitle: string, adminTitle: string) { if(to.path.includes('admin')){ document.title = adminTitle; } else { if(to.meta.title === undefined){ if(!to.path.includes('home')){ ElMessage.error({ type: 'error', message: 'error: this page didn`t set title' }); document.title = defTitle; } return; } document.title = to.meta.title as string; } }
"""Infobip transport.""" from vumi.transports.infobip.infobip import InfobipTransport, InfobipError __all__ = ['InfobipTransport', 'InfobipError']
package com.example.mypc.esports2.config; /** * Created by MyPC on 2016/8/2. */ public class UrlConfig { public static class Path { public static final String BASE_URL = "http://139.196.106.200/"; } public static class ThumbnailPath{ public static final String GAMES="match"; public static final String INFO="info"; } public static class KEY { public static final String GAME_ID = "game_id"; public static final String UID = "uid"; public static final String PAGE = "p"; public static final String MATCH_ID = "match_id"; public static final String ROW = "row"; public static final String ORDERTYPE = "ordertype"; public static final String ISHOT = "ishot"; } public static class MiddlePath{ public static final String MIDDLE="Circle/lists"; } public static class PostPath{ public static final String POST="Circle/postlists"; } public static class TopicPath{ public static final String TOPIC="topic/lists"; } public static class CircleHotPath{ public static final String CIRCLEHOT="Circle/getalllists"; } public static class PostHotPath{ public static final String POSTHOT="Circle/postlists"; } }
# # SPDX-License-Identifier: Apache-2.0 # echo 'Generating new channel update tx....' export FABRIC_CFG_PATH=$PWD /Users/nkl/fabric/fabric-samples/bin/configtxgen -profile TwoOrgsChannel -outputAnchorPeersUpdate ../channel-config/mychannel-org1anchor.tx -channelID mychannel -asOrg Org1MSP /Users/nkl/fabric/fabric-samples/bin/configtxgen -profile TwoOrgsChannel -outputAnchorPeersUpdate ../channel-config/discovery_anchor.tx -channelID discovery -asOrg Org1MSP
#! /bin/bash set -e if [ -z "$OPT" ] || [ "$OPT" -ne "0" ]; then echo "Debug build" FILE=ninja_dbg else FILE=ninja_opt fi rm -f src/ninja_build && ./generate_ninja.py ninja -C src -f $FILE
/******************************************************************************* * histogram.h - histogram algorithms for voting ******************************************************************************* * Add license here... *******************************/ #ifndef HISTOGRAM_H #define HISTOGRAM_H #include <patch.h> #include <vector> namespace pm { /** * Channel range */ struct Range { float from, to; Range(float a, float b) : from(a), to(b) {} Range() : from(0.0f), to(0.0f) {} inline float size() const { return to - from; } }; namespace voting { template <typename Patch, typename Scalar = float> class Histogram { public: Histogram() {} Histogram(const Image *image, int channel, int numBins, const Range &r) : bins(numBins, 0), range(r), binCount(numBins), binSize(r.size() / numBins){ // populate the histogram from an image channel for(int y = 0; y < image->rows; ++y) { for(int x = 0; x < image->cols; ++x) { const Scalar *ptr = image->ptr<Scalar>(y, x); Scalar v = ptr[channel]; int bi = binOf(v); bins[bi] += 1; } } } /// Return the bin index corresponding to a given value inline int binOf(float value) const { int idx = std::floor(float(value - range.from) / binSize); return std::max(0, std::min(idx, binCount - 1)); } /// Access operators inline int count(Scalar v) const { return bins[binOf(v)]; } inline int &count(Scalar v) { return bins[binOf(v)]; } private: std::vector<int> bins; Range range; int binCount; float binSize; }; } } #endif /* HISTOGRAM_H */
public class ConcurrentStack<T> { private volatile Node<T> _head; public void Push(T obj) { Node<T> newNode = new Node<T>(obj); while (true) { Node<T> oldHead = _head; newNode.Next = oldHead; if (Interlocked.CompareExchange(ref _head, newNode, oldHead) == oldHead) { return; } } } public T Pop() { while (true) { Node<T> oldHead = _head; if (oldHead == null) { throw new InvalidOperationException("Stack is empty"); } Node<T> newHead = oldHead.Next; if (Interlocked.CompareExchange(ref _head, newHead, oldHead) == oldHead) { return oldHead.Value; } } } private class Node<U> { public U Value; public Node<U> Next; public Node(U value) { Value = value; Next = null; } } }
############################################################################### [[ $ZSH_VERBOSE ]] && echo "Mail Defaults" ############################################################################### # Disable send and reply animations in Mail.app defaults write com.apple.mail DisableReplyAnimations -bool true defaults write com.apple.mail DisableSendAnimations -bool true # Copy email addresses as `foo@example.com` instead of `Foo Bar <foo@example.com>` in Mail.app defaults write com.apple.mail AddressesIncludeNameOnPasteboard -bool false # Add the keyboard shortcut ⌘ + Enter to send an email in Mail.app defaults write com.apple.mail NSUserKeyEquivalents -dict-add "Send" -string "@\\U21a9" # Display emails in threaded mode, sorted by date (oldest on the bottom) defaults write com.apple.mail DraftsViewerAttributes -dict-add "DisplayInThreadedMode" -string "yes" defaults write com.apple.mail DraftsViewerAttributes -dict-add "SortedDescending" -string "no" defaults write com.apple.mail DraftsViewerAttributes -dict-add "SortOrder" -string "received-date" # Disable inline attachments (just show the icons) defaults write com.apple.mail DisableInlineAttachmentViewing -bool true # Disable automatic spell checking defaults write com.apple.mail SpellCheckingBehavior -string "NoSpellCheckingEnabled"
#!/bin/bash url="https://www.example.org" start=$(date +%s.%N) wget --spider --quiet $url end=$(date +%s.%N) runtime=$(echo "$end - $start" | bc) echo "The loading time for $url is $runtime seconds"
#!/bin/bash ######################################################################################### # License information ######################################################################################### # Copyright 2018 Jamf Professional Services # # Permission is hereby granted, free of charge, to any person obtaining a copy of this # software and associated documentation files (the "Software"), to deal in the Software # without restriction, including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons # to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE # FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ######################################################################################### # General Information ######################################################################################### # This script is designed to make implementation of DEPNotify very easy with limited # scripting knowledge. The section below has variables that may be modified to customize # the end user experience. DO NOT modify things in or below the CORE LOGIC area unless # major testing and validation is performed. # More information at: https://github.com/jamfprofessionalservices/DEP-Notify ######################################################################################### # Variables to Modify ######################################################################################### # Testing flag will enable the following things to change: # - Auto removal of BOM files to reduce errors # - Sleep commands instead of polcies being called # - Quit Key set to command + control + x TESTING_MODE=false # Set variable to true or false # Flag the app to open fullscreen or as a window FULLSCREEN=true # Set variable to true or false # Banner image can be 600px wide by 100px high. Images will be scaled to fit # If this variable is left blank, the generic image will appear BANNER_IMAGE_PATH="/Applications/Self Service.app/Contents/Resources/AppIcon.icns" # Flag for using the custom branding icon from Self Service and Jamf Pro # This will override the banner image specified above SELF_SERVICE_CUSTOM_BRANDING=true # Set variable to true or false # Main heading that will be displayed under the image # If this variable is left blank, the generic banner will appear BANNER_TITLE="Welcome to Tenenz, Inc." # Paragraph text that will display under the main heading. For a new line, use \n # this variable is left blank, the generic message will appear. Leave single # quotes below as double quotes will break the new line. MAIN_TEXT='Thanks for joining us at Tenenz! We want you to have a few applications and settings configured before you get started with your new Mac. This process should take 10 to 20 minutes to complete. \n \n If you need addtional software or help, please visit the Self Service app in your Applications folder or on your Dock.' # URL for support or help that will open when the ? is clicked # If this variable is left blank, the ? will not appear # If using fullscreen mode, Safari will be launched behind the DEP Notify window SUPPORT_URL= # Initial Start Status text that shows as things are firing up INITAL_START_STATUS="Initial Configuration Starting..." # EULA configuration # CURRENTLY BROKEN - seeing issues with the EULA and contiune buttons EULA_ENABLED=false # Set variable to true or false # The policy array must be formatted "Progress Bar text,customTrigger". These will be # run in order as they appear below. POLICY_ARRAY=( "Setting Timezone...,set_timezone" "Naming Mac...,name_mac" "Installing Utilities...,dockutil" "Installing Utilities...,desktoppr" "Installing Java for Mac...,java" "Installing FileMaker Pro Advanced 18...,filemaker18" "Installing Slack...,slack" "Installing Microsoft Teams...,teams" "Adding Printers,install_printers" "Swabbing Deck...,set_dock_items_whs" "Polishing Apple...,set_tenenz_wallpaper" "Updating Inventory...,dep_update_inventory" ) # Text that will display in the progress bar INSTALL_COMPLETE_TEXT="Setup Complete!" # Script designed to automatically logout user to start FileVault process if # deferred enablement is detected. Text displayed if deferred status is on. FV_LOGOUT_TEXT="Your Mac must logout to start the encryption process. You will be asked to enter your password and click OK or Contiune a few times. Your Mac will be usable while encryption takes place." # Text that will display inside the alert once policies have finished COMPLETE_ALERT_TEXT="Your Mac is now finished with initial setup and configuration. Press Quit to get started!" ######################################################################################### # Core Script Logic - Don't Change Without Major Testing ######################################################################################### # Variables for File Paths JAMF_BINARY="/usr/local/bin/jamf" FDE_SETUP_BINARY="/usr/bin/fdesetup" DEP_NOTIFY_APP="/Applications/Utilities/DEPNotify.app" DEP_NOTIFY_CONFIG="/var/tmp/depnotify.log" DEP_NOTIFY_DONE="/var/tmp/com.depnotify.provisioning.done" DEP_NOTIFY_EULA="/var/tmp/com.depnotify.agreement.done" TMP_DEBUG_LOG="/var/tmp/depNotifyDebug.log" # Validating true/false flags if [ "$TESTING_MODE" != true ] && [ "$TESTING_MODE" != false ]; then echo "$(date "+%a %h %d %H:%M:%S"): Testing configuration not set properly. Currently set to '$TESTING_MODE'. Please update to true or false." >> "$TMP_DEBUG_LOG" exit 1 fi if [ "$FULLSCREEN" != true ] && [ "$FULLSCREEN" != false ]; then echo "$(date "+%a %h %d %H:%M:%S"): Fullscreen configuration not set properly. Currently set to '$FULLSCREEN'. Please update to true or false." >> "$TMP_DEBUG_LOG" exit 1 fi if [ "$EULA_ENABLED" != true ] && [ "$EULA_ENABLED" != false ]; then echo "$(date "+%a %h %d %H:%M:%S"): EULA configuration not set properly. Currently set to '$EULA_ENABLED'. Please update to true or false." >> "$TMP_DEBUG_LOG" exit 1 fi # Run DEP Notify will run after Apple Setup Assistant and must be run as the end user. SETUP_ASSISTANT_PROCESS=$(pgrep -l "Setup Assistant") until [ "$SETUP_ASSISTANT_PROCESS" = "" ]; do echo "$(date "+%a %h %d %H:%M:%S"): Setup Assistant Still Running. PID $SETUP_ASSISTANT_PROCESS." >> "$TMP_DEBUG_LOG" sleep 1 SETUP_ASSISTANT_PROCESS=$(pgrep -l "Setup Assistant") done # Checking to see if the Finder is running now before continuing. This can help # in scenarios where an end user is not configuring the device. FINDER_PROCESS=$(pgrep -l "Finder") until [ "$FINDER_PROCESS" != "" ]; do echo "$(date "+%a %h %d %H:%M:%S"): Finder process not found. Assuming device is at login screen." >> "$TMP_DEBUG_LOG" sleep 1 FINDER_PROCESS=$(pgrep -l "Finder") done # After the Apple Setup completed. Now safe to grab the current user. CURRENT_USER=$(stat -f "%Su" "/dev/console") echo "$(date "+%a %h %d %H:%M:%S"): Current user set to $CURRENT_USER." >> "$TMP_DEBUG_LOG" # If SELF_SERVICE_CUSTOM_BRANDING is set to true. Loading the updated icon if [ "$SELF_SERVICE_CUSTOM_BRANDING" = true ]; then open -a "/Applications/Self Service.app" --hide # Loop waiting on the branding image to properly show in the users library CUSTOM_BRANDING_PNG="/Users/$CURRENT_USER/Library/Application Support/com.jamfsoftware.selfservice.mac/Documents/Images/brandingimage.png" until [ -f "$CUSTOM_BRANDING_PNG" ]; do echo "$(date "+%a %h %d %H:%M:%S"): Waiting for branding image from Jamf Pro." >> "$TMP_DEBUG_LOG" sleep 1 done # Setting Banner Image for DEP Notify to Self Service Custom Branding BANNER_IMAGE_PATH="$CUSTOM_BRANDING_PNG" fi # Testing Mode Enhancements if [ "$TESTING_MODE" = true ]; then # Setting Quit Key set to command + control + x (Testing Mode Only) echo "Command: QuitKey: x" >> "$DEP_NOTIFY_CONFIG" # Removing old config file if present (Testing Mode Only) if [ -f "$DEP_NOTIFY_CONFIG" ]; then rm "$DEP_NOTIFY_CONFIG" fi if [ -f "$DEP_NOTIFY_DONE" ]; then rm "$DEP_NOTIFY_DONE" fi if [ -f "$DEP_NOTIFY_EULA" ]; then rm "$DEP_NOTIFY_EULA" fi fi # Setting custom image if specified if [ "$BANNER_IMAGE_PATH" != "" ]; then echo "Command: Image: $BANNER_IMAGE_PATH" >> "$DEP_NOTIFY_CONFIG" fi # Setting custom title if specified if [ "$BANNER_TITLE" != "" ]; then echo "Command: MainTitle: $BANNER_TITLE" >> "$DEP_NOTIFY_CONFIG" fi # Setting custom main text if specified if [ "$MAIN_TEXT" != "" ]; then echo "Command: MainText: $MAIN_TEXT" >> "$DEP_NOTIFY_CONFIG" fi # Adding help url and button if specified if [ "$SUPPORT_URL" != "" ]; then echo "Command: Help: $SUPPORT_URL" >> "$DEP_NOTIFY_CONFIG" fi # Opening the app after initial configuration if [ "$FULLSCREEN" = true ]; then sudo -u "$CURRENT_USER" "$DEP_NOTIFY_APP"/Contents/MacOS/DEPNotify -path "$DEP_NOTIFY_CONFIG" -fullScreen& elif [ "$FULLSCREEN" = false ]; then sudo -u "$CURRENT_USER" "$DEP_NOTIFY_APP"/Contents/MacOS/DEPNotify -path "$DEP_NOTIFY_CONFIG"& fi # Adding nice text and a brief pause for prettiness echo "Status: $INITAL_START_STATUS" >> "$DEP_NOTIFY_CONFIG" sleep 5 # Setting the status bar # Counter is for making the determinate look nice. Starts at one and adds # more based on EULA or register options. ADDITIONAL_OPTIONS_COUNTER=1 if [ "$EULA_ENABLED" = true ]; then ((ADDITIONAL_OPTIONS_COUNTER++)) fi # Checking policy array and adding the count from the additional options above. ARRAY_LENGTH="$((${#POLICY_ARRAY[@]}+ADDITIONAL_OPTIONS_COUNTER))" echo "Command: Determinate: $ARRAY_LENGTH" >> "$DEP_NOTIFY_CONFIG" # EULA prompt prior to configuration if [ "$EULA_ENABLED" = true ]; then echo "Status: Waiting on EULA Acceptance" >> "$DEP_NOTIFY_CONFIG" echo "Command: ContinueButtonEULA: EULA" >> "$DEP_NOTIFY_CONFIG" while [ ! -f "$DEP_NOTIFY_EULA" ]; do sleep 1 done fi # Loop to run policies for POLICY in "${POLICY_ARRAY[@]}"; do echo "Status: $(echo "$POLICY" | cut -d ',' -f1)" >> "$DEP_NOTIFY_CONFIG" if [ "$TESTING_MODE" = true ]; then sleep 10 elif [ "$TESTING_MODE" = false ]; then "$JAMF_BINARY" policy -event "$(echo "$POLICY" | cut -d ',' -f2)" fi done # Check to see if FileVault Deferred enablement is active FV_DEFERRED_STATUS=$($FDE_SETUP_BINARY status | grep "Deferred" | cut -d ' ' -f6) # Exit gracefully after things are finished echo "Status: $INSTALL_COMPLETE_TEXT" >> "$DEP_NOTIFY_CONFIG" if [ "$FV_DEFERRED_STATUS" = "active" ]; then echo "Command: Logout: $FV_LOGOUT_TEXT" >> "$DEP_NOTIFY_CONFIG" else echo "Command: Quit: $COMPLETE_ALERT_TEXT" >> "$DEP_NOTIFY_CONFIG" fi exit 0
#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -ex usage() { echo " usage: $0 <options> Required not-so-options: --build-dir=DIR path to dist dir --prefix=PREFIX path to install into Optional options: --doc-dir=DIR path to install docs into [/usr/share/doc/giraph] --lib-dir=DIR path to install giraph home [/usr/lib/giraph] --installed-lib-dir=DIR path where lib-dir will end up on target system --bin-dir=DIR path to install bins [/usr/bin] --conf-dir=DIR path to configuration files provided by the package [/etc/giraph/conf.dist] --examples-dir=DIR path to install examples [doc-dir/examples] ... [ see source for more similar options ] " exit 1 } OPTS=$(getopt \ -n $0 \ -o '' \ -l 'prefix:' \ -l 'doc-dir:' \ -l 'lib-dir:' \ -l 'conf-dir:' \ -l 'installed-lib-dir:' \ -l 'bin-dir:' \ -l 'examples-dir:' \ -l 'build-dir:' -- "$@") if [ $? != 0 ] ; then usage fi eval set -- "$OPTS" set -ex while true ; do case "$1" in --prefix) PREFIX=$2 ; shift 2 ;; --build-dir) BUILD_DIR=$2 ; shift 2 ;; --doc-dir) DOC_DIR=$2 ; shift 2 ;; --lib-dir) LIB_DIR=$2 ; shift 2 ;; --conf-dir) CONF_DIR=$2 ; shift 2 ;; --installed-lib-dir) INSTALLED_LIB_DIR=$2 ; shift 2 ;; --bin-dir) BIN_DIR=$2 ; shift 2 ;; --examples-dir) EXAMPLES_DIR=$2 ; shift 2 ;; --) shift ; break ;; *) echo "Unknown option: $1" usage exit 1 ;; esac done for var in PREFIX BUILD_DIR ; do if [ -z "$(eval "echo \$$var")" ]; then echo Missing param: $var usage fi done DOC_DIR=${DOC_DIR:-/usr/share/doc/giraph} LIB_DIR=${LIB_DIR:-/usr/lib/giraph} BIN_DIR=${BIN_DIR:-/usr/lib/giraph/bin} ETC_DIR=${ETC_DIR:-/etc/giraph} MAN_DIR=${MAN_DIR:-/usr/share/man/man1} CONF_DIR=${CONF_DIR:-${ETC_DIR}/conf.dist} install -d -m 0755 ${PREFIX}/${LIB_DIR} # Installing Giraph core install -d -m 0755 ${PREFIX}/${LIB_DIR} cp -r $BUILD_DIR/giraph-dist/target/giraph*-bin/*/* ${PREFIX}/${LIB_DIR} # Installing docs and examples install -d -m 0755 $PREFIX/${DOC_DIR} cp -r $BUILD_DIR/target/staging/* $PREFIX/${DOC_DIR} mv ${PREFIX}/${LIB_DIR}/giraph-examples*.jar $PREFIX/${DOC_DIR} # Install executable wrappers install -d -m 0755 $PREFIX/usr/bin for i in giraph ; do #echo "Copying manpage $i" #cp ${BUILD_DIR}/docs/man/$i* $PREFIX/$MAN_DIR echo "Creating wrapper for $i" wrapper=$PREFIX/usr/bin/$i mkdir -p `dirname $wrapper` cat > $wrapper <<EOF #!/bin/bash # Autodetect JAVA_HOME if not defined . /usr/lib/bigtop-utils/bigtop-detect-javahome # Workaround for GIRAPH-199 export HADOOP_HOME=\${HADOOP_HOME:-/usr/lib/hadoop} export HADOOP_CONF_DIR=\${HADOOP_CONF_DIR:-/etc/hadoop/conf} export GIRAPH_HOME=$LIB_DIR exec $BIN_DIR/$i "\$@" EOF chmod 0755 $wrapper done install -d -m 0755 $PREFIX/$CONF_DIR (cd ${BUILD_DIR}/conf && tar cf - .) | (cd $PREFIX/$CONF_DIR && tar xf -) unlink $PREFIX/$LIB_DIR/conf || /bin/true ln -s $ETC_DIR/conf $PREFIX/$LIB_DIR/conf # Create version independent symlinks for i in accumulo core gora hbase hcatalog hive kibble rexster ; do (cd $PREFIX/$LIB_DIR ; ln -s `ls giraph-$i-*jar` giraph-$i.jar) done # Enforcing dependency on the Bigtop's version of Zookeeper ln -fs /usr/lib/zookeeper/zookeeper.jar $PREFIX/$LIB_DIR/lib/zookeeper*.jar
<gh_stars>1-10 package io.casperlabs.benchmarks import java.io.File import java.nio.charset.StandardCharsets import java.nio.file.StandardOpenOption import cats._ import cats.effect.{Sync, Timer} import cats.implicits._ import io.casperlabs.client.{DeployRuntime, DeployService} import io.casperlabs.client.configuration.DeployConfig import io.casperlabs.crypto.Keys import io.casperlabs.crypto.Keys.{PrivateKey, PublicKey} import io.casperlabs.crypto.codec.Base64 import io.casperlabs.crypto.signatures.SignatureAlgorithm import io.casperlabs.shared.{FilesAPI, Log} import scala.concurrent.duration._ object Benchmarks { /** Each round consists of many token transfer deploys from different accounts to single recipient * TODO: Remove Sync * */ def run[F[_]: Log: DeployService: Timer: FilesAPI: Monad: Sync]( outputStats: File, initialFundsPrivateKeyFile: File, initialFundsPublicKeyFile: File, accountsNum: Int = 250, roundsNum: Int = 100, approximateTransferCost: Long = 10000000 ): F[Unit] = { // TODO: Probably can cause overflow problems, for the time being it can stay as is. val initialFundsPerAccount = roundsNum * approximateTransferCost def readPrivateKey = FilesAPI[F].readString(initialFundsPrivateKeyFile.toPath, StandardCharsets.UTF_8).map { rawKey => SignatureAlgorithm.Ed25519.tryParsePrivateKey(rawKey).get } def readPublicKey = FilesAPI[F].readString(initialFundsPublicKeyFile.toPath, StandardCharsets.UTF_8).map { rawKey => SignatureAlgorithm.Ed25519.tryParsePublicKey(rawKey).get } def writeStatsFileHeader: F[Unit] = FilesAPI[F] .writeString( outputStats.toPath, "Deploy time, Propose time, Total time, Deploys/sec in propose\n", StandardCharsets.UTF_8, StandardOpenOption.CREATE :: StandardOpenOption.TRUNCATE_EXISTING :: StandardOpenOption.WRITE :: Nil ) def send( recipientPublicKey: PublicKey, senderPrivateKey: PrivateKey, senderPublicKey: PublicKey, amount: Long ): F[Unit] = DeployRuntime.transfer[F]( deployConfig = DeployConfig.empty, senderPublicKey = senderPublicKey, senderPrivateKey = senderPrivateKey, recipientPublicKey = recipientPublicKey, amount = amount, exit = false, ignoreOutput = true, false, 3.minutes ) def createAccountKeyPair(): (Keys.PrivateKey, Keys.PublicKey) = SignatureAlgorithm.Ed25519.newKeyPair val senders: List[(Keys.PrivateKey, Keys.PublicKey)] = List.fill(accountsNum)(createAccountKeyPair()) val recipient @ (_, recipientPublicKey): (Keys.PrivateKey, Keys.PublicKey) = createAccountKeyPair() def initializeAccounts( initialFundsPrivateKey: PrivateKey, initialFundsPublicKey: PublicKey ): F[Unit] = for { _ <- Log[F].info("Initializing accounts...") _ <- (recipient :: senders).traverse { case (_, pk) => for { _ <- send( recipientPublicKey = pk, senderPrivateKey = initialFundsPrivateKey, senderPublicKey = initialFundsPublicKey, amount = initialFundsPerAccount ) blockHash <- propose(print = false) _ <- checkSuccess(blockHash, 1) } yield () } } yield () def oneRoundTransfer(): F[Unit] = for { _ <- Log[F].info("Sending deploys...") _ <- senders.traverse { case (sk, pk) => send( recipientPublicKey = recipientPublicKey, senderPrivateKey = sk, senderPublicKey = pk, amount = 1 ) } } yield () def propose(print: Boolean): F[String] = for { _ <- Log[F].info("Proposing...").whenA(print) blockHash <- DeployService[F].propose().rethrow } yield blockHash def checkSuccess(blockHash: String, expectedDeployNum: Int): F[Unit] = for { blockInfo <- DeployService[F].showBlock(blockHash).rethrow deployCount = blockInfo.getSummary.getHeader.deployCount _ <- Sync[F] .raiseError( new IllegalStateException( s"Proposed block $blockInfo contains $deployCount!=$expectedDeployNum" ) ) .whenA(deployCount != expectedDeployNum) deployErrorCount = blockInfo.getStatus.getStats.deployErrorCount _ <- Sync[F] .raiseError( new IllegalStateException( s"Proposed block $blockInfo contains $deployErrorCount!=0 failed deploys" ) ) .whenA(deployErrorCount != 0) } yield () def measure[A](task: F[A]): F[(FiniteDuration, A)] = for { start <- Timer[F].clock.monotonic(MILLISECONDS) a <- task end <- Timer[F].clock.monotonic(MILLISECONDS) } yield (FiniteDuration(end - start, MILLISECONDS), a) def writeResults( deployTime: FiniteDuration, proposeTime: FiniteDuration, total: FiniteDuration, round: Long ): F[Unit] = { def format(fd: FiniteDuration): String = fd.toCoarsest.toString() val message = s"${format(deployTime)}, ${format(proposeTime)}, ${format(total)}, ${((accountsNum * 1000.0) / proposeTime.toMillis.toDouble) .formatted("%1.2f")}" FilesAPI[F].writeString( outputStats.toPath, message ++ "\n", StandardCharsets.UTF_8, StandardOpenOption.WRITE :: StandardOpenOption.APPEND :: Nil ) >> Log[F].info(s"${round - 1 -> "round"}: $message") } def round(round: Long): F[Unit] = for { _ <- Log[F].info(s"Starting new round: $round") (deployTime, _) <- measure(oneRoundTransfer()) (proposeTime, blockHash) <- measure(propose(print = true)) _ <- checkSuccess(blockHash, accountsNum) totalTime = deployTime + proposeTime _ <- writeResults(deployTime, proposeTime, totalTime, round) } yield () def rounds(n: Int): F[Unit] = { def loop(roundNum: Long): F[Unit] = if (roundNum == n) { Monad[F].unit } else { round(roundNum).flatMap(_ => loop(roundNum + 1)) } for { _ <- Log[F].info("Running...") _ <- writeStatsFileHeader privateKey <- readPrivateKey publicKey <- readPublicKey _ <- initializeAccounts(privateKey, publicKey) _ <- loop(1) _ <- Log[F].info("Done") } yield () } rounds(roundsNum) } }
<filename>app/controllers/api/v1/leaderboards_controller.rb class Api::V1::LeaderboardsController < ApplicationController before_action :find_leaderboard, only: [:update, :show ] def index @leaderboards = Leaderboard.all render json: @leaderboards end def new @leaderboard = Leaderboard.new end def create @leaderboard = Leaderboard.new(leaderboard_params) respond_to do |format| if @leaderboard.save format.html { redirect_to @leaderboard, notice: "Leaderboard successfully created"} format.json { render :show, status: :created, location: @leaderboard } else format.html { render :new} format.json { render json: @leaderboard.errors, status: :unprocessible_entity} end end end def show render json: @leaderboard end def update @leaderboard.update(leaderboard_params) if @leaderboard.save render json: @leaderboard, status: :accepted else render json: { errors: @leaderboard.errors.full_messages }, status: :unprocessible_entity end end private def leaderboard_params params.permit(:user_name, :score) end def find_leaderboard @leaderboard = Leaderboard.find(params[:id]) end end
#!/bin/bash # LinuxGSM command_test_alert.sh function # Author: Daniel Gibbs # Website: https://linuxgsm.com # Description: Sends a test alert. local commandname="ALERT" local commandaction="Alert" local function_selfname="$(basename "$(readlink -f "${BASH_SOURCE[0]}")")" fn_print_dots "${servername}" sleep 1 check.sh info_config.sh alert="test" alert.sh core_exit.sh
/*- * Copyright (c) 2019 <NAME> <<EMAIL>> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <sys/cdefs.h> #include <sys/of.h> #include <libfdt/libfdt.h> static bool fdt_is_enabled(void *dtb, int offset) { const char *status; status = fdt_getprop(dtb, offset, "status", NULL); if (status == NULL) return (true); if (strcmp(status, "disabled")) return (false); return (true); } static bool fdt_is_compatible(void *dtb, int offset, const char *check) { const char *compat; int len; compat = fdt_getprop(dtb, offset, "compatible", &len); if (compat == NULL) return (false); while (len > 0) { if (strcasecmp(compat, check) == 0) return (true); compat += (strlen(compat) + 1); len -= (strlen(compat) + 1); } return (false); } int fdt_find_first_compatible(void *dtb, const char *compat) { int offset; int depth; offset = 0; depth = 1; do { offset = fdt_next_node(dtb, offset, &depth); if (!fdt_is_enabled(dtb, offset)) continue; if (fdt_is_compatible(dtb, offset, compat)) return (offset); } while (offset > 0); return (MDX_OK); }
import os import utils def _process_genomic_data(inputs, backgrounds, work_dir): def _bam_to_outbase(align_bam, raw_work_dir, cur_input): # Implementation of _bam_to_outbase function # This function processes the alignment BAM file and returns the output base name pass def _get_target_access_files(cov_interval, cur_input, work_dir): # Implementation of _get_target_access_files function # This function retrieves the raw_target_bed and access_bed files based on the coverage interval and input sample pass background_name = dd.get_sample_name(backgrounds[0]) if backgrounds else "flat" background_cnn = os.path.join(raw_work_dir, "%s_background.cnn" % (background_name)) ckouts = [] for cur_input in inputs: cur_raw_work_dir = utils.safe_makedir(os.path.join(_sv_workdir(cur_input), "raw")) out_base, out_base_old = _bam_to_outbase(dd.get_align_bam(cur_input), cur_raw_work_dir, cur_input) if utils.file_exists(out_base_old + ".cns"): out_base = out_base_old ckouts.append({"cnr": "%s.cnr" % out_base, "cns": "%s.cns" % out_base, "back_cnn": background_cnn}) if not utils.file_exists(ckouts[0]["cns"]): cov_interval = dd.get_coverage_interval(inputs[0]) raw_target_bed, access_bed = _get_target_access_files(cov_interval, inputs[0], work_dir) # bail out if we ended up with no regions
#!/usr/bin/env bash # Tags: long, no-unbundled, no-fasttest set -e CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS contributors" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE contributors (name String) ENGINE = Memory" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.contributors ORDER BY name DESC FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO contributors FORMAT Parquet" # random results ${CLICKHOUSE_CLIENT} --query="SELECT * FROM contributors LIMIT 10" > /dev/null ${CLICKHOUSE_CLIENT} --query="DROP TABLE contributors" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_numbers" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_numbers (number UInt64) ENGINE = Memory" # less than default block size (65k) ${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.numbers LIMIT 10000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10" ${CLICKHOUSE_CLIENT} --query="TRUNCATE TABLE parquet_numbers" # More than default block size ${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.numbers LIMIT 100000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10" ${CLICKHOUSE_CLIENT} --query="TRUNCATE TABLE parquet_numbers" ${CLICKHOUSE_CLIENT} --max_block_size=2 --query="SELECT * FROM system.numbers LIMIT 3 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10" ${CLICKHOUSE_CLIENT} --query="TRUNCATE TABLE parquet_numbers" ${CLICKHOUSE_CLIENT} --max_block_size=1 --query="SELECT * FROM system.numbers LIMIT 1000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10" ${CLICKHOUSE_CLIENT} --query="DROP TABLE parquet_numbers" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_events" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_events (event String, value UInt64, description String) ENGINE = Memory" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.events FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_events FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT event, description FROM parquet_events WHERE event IN ('ContextLock', 'Query') ORDER BY event" ${CLICKHOUSE_CLIENT} --query="DROP TABLE parquet_events" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_types1" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_types2" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_types3" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_types4" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_types1 (int8 Int8, uint8 UInt8, int16 Int16, uint16 UInt16, int32 Int32, uint32 UInt32, int64 Int64, uint64 UInt64, float32 Float32, float64 Float64, string String, fixedstring FixedString(15), date Date, datetime DateTime) ENGINE = Memory" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_types2 (int8 Int8, uint8 UInt8, int16 Int16, uint16 UInt16, int32 Int32, uint32 UInt32, int64 Int64, uint64 UInt64, float32 Float32, float64 Float64, string String, fixedstring FixedString(15), date Date, datetime DateTime) ENGINE = Memory" # convert min type ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_types3 (int8 Int8, uint8 Int8, int16 Int8, uint16 Int8, int32 Int8, uint32 Int8, int64 Int8, uint64 Int8, float32 Int8, float64 Int8, string FixedString(15), fixedstring FixedString(15), date Date, datetime Date) ENGINE = Memory" # convert max type ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_types4 (int8 Int64, uint8 Int64, int16 Int64, uint16 Int64, int32 Int64, uint32 Int64, int64 Int64, uint64 Int64, float32 Int64, float64 Int64, string String, fixedstring String, date DateTime, datetime DateTime) ENGINE = Memory" ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types1 values ( -108, 108, -1016, 1116, -1032, 1132, -1064, 1164, -1.032, -1.064, 'string-0', 'fixedstring', '2001-02-03', '2002-02-03 04:05:06')" # min ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types1 values ( -128, 0, -32768, 0, -2147483648, 0, -9223372036854775808, 0, -1.032, -1.064, 'string-1', 'fixedstring-1', '2003-04-05', '2003-02-03 04:05:06')" # max ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types1 values ( 127, 255, 32767, 65535, 2147483647, 4294967295, 9223372036854775807, 9223372036854775807, -1.032, -1.064, 'string-2', 'fixedstring-2', '2004-06-07', '2004-02-03 04:05:06')" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types1 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types2 FORMAT Parquet" echo original: ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types1 ORDER BY int8" | tee "${CLICKHOUSE_TMP}"/parquet_all_types_1.dump echo converted: ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types2 ORDER BY int8" | tee "${CLICKHOUSE_TMP}"/parquet_all_types_2.dump ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types1 ORDER BY int8 FORMAT Parquet" > "${CLICKHOUSE_TMP}"/parquet_all_types_1.parquet ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types2 ORDER BY int8 FORMAT Parquet" > "${CLICKHOUSE_TMP}"/parquet_all_types_2.parquet echo diff: diff "${CLICKHOUSE_TMP}"/parquet_all_types_1.dump "${CLICKHOUSE_TMP}"/parquet_all_types_2.dump ${CLICKHOUSE_CLIENT} --query="TRUNCATE TABLE parquet_types2" ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types3 values ( 79, 81, 82, 83, 84, 85, 86, 87, 88, 89, 'str01', 'fstr1', '2003-03-04', '2004-05-06')" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types3 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types2 FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types1 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types3 FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types4 values ( 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 'str02', 'fstr2', '2005-03-04 05:06:07', '2006-08-09 10:11:12')" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types4 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types2 FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types1 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types4 FORMAT Parquet" echo dest: ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types2 ORDER BY int8" echo min: ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types3 ORDER BY int8" echo max: ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types4 ORDER BY int8" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_types5" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_types6" ${CLICKHOUSE_CLIENT} --query="TRUNCATE TABLE parquet_types2" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_types5 (int8 Nullable(Int8), uint8 Nullable(UInt8), int16 Nullable(Int16), uint16 Nullable(UInt16), int32 Nullable(Int32), uint32 Nullable(UInt32), int64 Nullable(Int64), uint64 Nullable(UInt64), float32 Nullable(Float32), float64 Nullable(Float64), string Nullable(String), fixedstring Nullable(FixedString(15)), date Nullable(Date), datetime Nullable(DateTime)) ENGINE = Memory" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_types6 (int8 Nullable(Int8), uint8 Nullable(UInt8), int16 Nullable(Int16), uint16 Nullable(UInt16), int32 Nullable(Int32), uint32 Nullable(UInt32), int64 Nullable(Int64), uint64 Nullable(UInt64), float32 Nullable(Float32), float64 Nullable(Float64), string Nullable(String), fixedstring Nullable(FixedString(15)), date Nullable(Date), datetime Nullable(DateTime)) ENGINE = Memory" ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types5 values ( NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types5 ORDER BY int8 FORMAT Parquet" > "${CLICKHOUSE_TMP}"/parquet_all_types_5.parquet ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types5 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types6 FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types1 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types6 FORMAT Parquet" echo dest from null: ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types6 ORDER BY int8" ${CLICKHOUSE_CLIENT} --query="DROP TABLE parquet_types5" ${CLICKHOUSE_CLIENT} --query="DROP TABLE parquet_types6" ${CLICKHOUSE_CLIENT} --query="DROP TABLE parquet_types1" ${CLICKHOUSE_CLIENT} --query="DROP TABLE parquet_types2" ${CLICKHOUSE_CLIENT} --query="DROP TABLE parquet_types3" ${CLICKHOUSE_CLIENT} --query="DROP TABLE parquet_types4" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_arrays" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_arrays (id UInt32, a1 Array(Int8), a2 Array(UInt8), a3 Array(Int16), a4 Array(UInt16), a5 Array(Int32), a6 Array(UInt32), a7 Array(Int64), a8 Array(UInt64), a9 Array(String), a10 Array(FixedString(4)), a11 Array(Float32), a12 Array(Float64), a13 Array(Date), a14 Array(DateTime), a15 Array(Decimal(4, 2)), a16 Array(Decimal(10, 2)), a17 Array(Decimal(25, 2))) engine=Memory()" ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_arrays VALUES (1, [1,-2,3], [1,2,3], [100, -200, 300], [100, 200, 300], [10000000, -20000000, 30000000], [10000000, 2000000, 3000000], [100000000000000, -200000000000, 3000000000000], [100000000000000, 20000000000000, 3000000000000], ['Some string', 'Some string', 'Some string'], ['0000', '1111', '2222'], [42.42, 424.2, 0.4242], [424242.424242, 4242042420.242424, 42], ['2000-01-01', '2001-01-01', '2002-01-01'], ['2000-01-01', '2001-01-01', '2002-01-01'], [0.2, 10.003, 4.002], [4.000000001, 10000.10000, 10000.100001], [1000000000.000000001123, 90.0000000010010101, 0101001.0112341001])" ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_arrays VALUES (2, [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [])" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_arrays FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_arrays ORDER BY id" ${CLICKHOUSE_CLIENT} --query="DROP TABLE parquet_arrays" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_nullable_arrays" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_nullable_arrays (id UInt32, a1 Array(Nullable(UInt32)), a2 Array(Nullable(String)), a3 Array(Nullable(Decimal(4, 2)))) engine=Memory()" ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_nullable_arrays VALUES (1, [1, Null, 2], [Null, 'Some string', Null], [0.001, Null, 42.42]), (2, [Null], [Null], [Null]), (3, [], [], [])" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_nullable_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_nullable_arrays FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_nullable_arrays ORDER BY id" ${CLICKHOUSE_CLIENT} --query="DROP TABLE parquet_nullable_arrays" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_nested_arrays" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_nested_arrays (a1 Array(Array(Array(UInt32))), a2 Array(Array(Array(String))), a3 Array(Array(Nullable(UInt32))), a4 Array(Array(Nullable(String)))) engine=Memory() " ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_nested_arrays VALUES ([[[1,2,3], [1,2,3]], [[1,2,3]], [[], [1,2,3]]], [[['Some string', 'Some string'], []], [['Some string']], [[]]], [[Null, 1, 2], [Null], [1, 2], []], [['Some string', Null, 'Some string'], [Null], []])" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_nested_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_nested_arrays FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_nested_arrays" ${CLICKHOUSE_CLIENT} --query="DROP TABLE parquet_nested_arrays" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_decimal" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_decimal (d1 Decimal32(4), d2 Decimal64(8), d3 Decimal128(16), d4 Decimal256(32)) ENGINE = Memory" ${CLICKHOUSE_CLIENT} --query="INSERT INTO TABLE parquet_decimal VALUES (0.123, 0.123123123, 0.123123123123, 0.123123123123123123)" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_decimal FORMAT Arrow" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_decimal FORMAT Arrow" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_decimal" ${CLICKHOUSE_CLIENT} --query="DROP TABLE parquet_decimal"
TERMUX_PKG_HOMEPAGE=https://www.isc.org/downloads/bind/ TERMUX_PKG_DESCRIPTION="Clients provided with BIND" TERMUX_PKG_LICENSE="MPL-2.0" TERMUX_PKG_VERSION=9.14.1 TERMUX_PKG_SHA256=c3c7485d900a03271a9918a071c123e8951871a219f4c1c4383e37717f11db48 TERMUX_PKG_SRCURL="ftp://ftp.isc.org/isc/bind9/${TERMUX_PKG_VERSION}/bind-${TERMUX_PKG_VERSION}.tar.gz" TERMUX_PKG_DEPENDS="openssl, readline, resolv-conf, zlib" TERMUX_PKG_EXTRA_CONFIGURE_ARGS=" --disable-linux-caps --without-python --with-ecdsa=no --with-gost=no --with-gssapi=no --with-libjson=no --with-libtool --with-libxml2=no --with-openssl=$TERMUX_PREFIX --with-randomdev=/dev/random --with-readline=-lreadline --with-eddsa=no " termux_step_pre_configure() { export BUILD_AR=ar export BUILD_CC=gcc export BUILD_CFLAGS= export BUILD_CPPFLAGS= export BUILD_LDFLAGS= export BUILD_RANLIB= _RESOLV_CONF=$TERMUX_PREFIX/etc/resolv.conf CFLAGS+=" $CPPFLAGS -DRESOLV_CONF=\\\"$_RESOLV_CONF\\\"" LDFLAGS+=" -llog" } termux_step_make() { make -C lib/isc make -C lib/dns make -C lib/ns make -C lib/isccc make -C lib/isccfg make -C lib/bind9 make -C lib/irs make -C bin/dig make -C bin/delv make -C bin/nsupdate } termux_step_make_install() { make -C lib/isc install make -C lib/dns install make -C lib/ns install make -C lib/isccc install make -C lib/isccfg install make -C lib/bind9 install make -C lib/irs install make -C bin/dig install make -C bin/delv install make -C bin/nsupdate install }
<filename>src/vuejsclient/ts/components/dashboard_builder/widgets/page_switch_widget/options/PageSwitchWidgetOptions.ts<gh_stars>0 import DefaultTranslation from "../../../../../../../shared/modules/Translation/vos/DefaultTranslation"; export default class PageSwitchWidgetOptions { public static TITLE_CODE_PREFIX: string = "PageSwitchWidgetOptions.title."; public constructor( public page_id: number ) { } public get_title_name_code_text(page_widget_id: number): string { if ((!page_widget_id) || (!this.page_id)) { return null; } return PageSwitchWidgetOptions.TITLE_CODE_PREFIX + this.page_id + '.' + page_widget_id + DefaultTranslation.DEFAULT_LABEL_EXTENSION; } }
#!/usr/bin/env bats load helpers BATS_TESTS_DIR=test/bats/tests/gcp WAIT_TIME=60 SLEEP_TIME=1 NAMESPACE=default PROVIDER_NAMESPACE=kube-system PROVIDER_YAML=https://raw.githubusercontent.com/GoogleCloudPlatform/secrets-store-csi-driver-provider-gcp/main/deploy/provider-gcp-plugin.yaml BASE64_FLAGS="-w 0" export RESOURCE_NAME=${RESOURCE_NAME:-"projects/735463103342/secrets/test-secret-a/versions/latest"} export FILE_NAME=${FILE_NAME:-"secret"} export SECRET_VALUE=${SECRET_VALUE:-"aHVudGVyMg=="} @test "install gcp provider" { run kubectl apply -f $PROVIDER_YAML --namespace $PROVIDER_NAMESPACE assert_success kubectl wait --for=condition=Ready --timeout=120s pod -l app=csi-secrets-store-provider-gcp --namespace $PROVIDER_NAMESPACE GCP_PROVIDER_POD=$(kubectl get pod --namespace $PROVIDER_NAMESPACE -l app=csi-secrets-store-provider-gcp -o jsonpath="{.items[0].metadata.name}") run kubectl get pod/$GCP_PROVIDER_POD --namespace $PROVIDER_NAMESPACE assert_success } @test "secretproviderclasses crd is established" { kubectl wait --for condition=established --timeout=60s crd/secretproviderclasses.secrets-store.csi.x-k8s.io run kubectl get crd/secretproviderclasses.secrets-store.csi.x-k8s.io assert_success } @test "Test rbac roles and role bindings exist" { run kubectl get clusterrole/secretproviderclasses-role assert_success run kubectl get clusterrole/secretproviderrotation-role assert_success run kubectl get clusterrole/secretprovidersyncing-role assert_success run kubectl get clusterrolebinding/secretproviderclasses-rolebinding assert_success run kubectl get clusterrolebinding/secretproviderrotation-rolebinding assert_success run kubectl get clusterrolebinding/secretprovidersyncing-rolebinding assert_success } @test "deploy gcp secretproviderclass crd" { envsubst < $BATS_TESTS_DIR/gcp_v1alpha1_secretproviderclass.yaml | kubectl apply --namespace=$NAMESPACE -f - cmd="kubectl get secretproviderclasses.secrets-store.csi.x-k8s.io/gcp --namespace=$NAMESPACE -o yaml | grep gcp" wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd" } @test "CSI inline volume test with pod portability" { envsubst < $BATS_TESTS_DIR/pod-secrets-store-inline-volume-crd.yaml | kubectl apply --namespace=$NAMESPACE -f - kubectl wait --for=condition=Ready --timeout=60s --namespace=$NAMESPACE pod/secrets-store-inline-crd run kubectl get pod/secrets-store-inline-crd --namespace=$NAMESPACE assert_success } @test "CSI inline volume test with pod portability - read gcp kv secret from pod" { result=$(kubectl exec secrets-store-inline-crd --namespace=$NAMESPACE -- cat /mnt/secrets-store/$FILE_NAME) [[ "${result//$'\r'}" == "${SECRET_VALUE}" ]] } @test "CSI inline volume test with pod portability - unmount succeeds" { # https://github.com/kubernetes/kubernetes/pull/96702 # kubectl wait --for=delete does not work on already deleted pods. # Instead we will start the wait before initiating the delete. kubectl wait --for=delete --timeout=${WAIT_TIME}s --namespace=$NAMESPACE pod/secrets-store-inline-crd & WAIT_PID=$! sleep 1 run kubectl delete pod secrets-store-inline-crd --namespace=$NAMESPACE # On Linux a failure to unmount the tmpfs will block the pod from being # deleted. run wait $WAIT_PID assert_success # Sleep to allow time for logs to propagate. sleep 10 # save debug information to archive in case of failure archive_info # On Windows, the failed unmount calls from: https://github.com/kubernetes-sigs/secrets-store-csi-driver/pull/545 # do not prevent the pod from being deleted. Search through the driver logs # for the error. run bash -c "kubectl logs -l app=secrets-store-csi-driver --tail -1 -c secrets-store -n kube-system | grep '^E.*failed to clean and unmount target path.*$'" assert_failure } teardown_file() { archive_provider "app=csi-secrets-store-provider-gcp" || true archive_info || true }
#!/bin/bash # ============================================================================== # Copyright (C) 2021 Intel Corporation # # SPDX-License-Identifier: MIT # ============================================================================== set -e INPUT=${1:-https://github.com/intel-iot-devkit/sample-videos/raw/master/face-demographics-walking.mp4} DEVICE=${2:-CPU} if [[ $3 == "display" ]] || [[ -z $3 ]]; then SINK_ELEMENT="gvawatermark ! videoconvert ! fpsdisplaysink video-sink=xvimagesink sync=false" elif [[ $3 == "fps" ]]; then SINK_ELEMENT="gvafpscounter ! fakesink async=false " else echo Error wrong value for SINK_ELEMENT parameter echo Possible values: display - render, fps - show FPS only exit fi HPE_MODEL=human-pose-estimation-0001 if [[ $INPUT == "/dev/video"* ]]; then SOURCE_ELEMENT="v4l2src device=${INPUT}" elif [[ $INPUT == *"://"* ]]; then SOURCE_ELEMENT="urisourcebin buffer-size=4096 uri=${INPUT}" else SOURCE_ELEMENT="filesrc location=${INPUT}" fi PROC_PATH() { echo $(dirname "$0")/model_proc/$1.json } HPE_MODEL_PATH=${MODELS_PATH}/intel/${HPE_MODEL}/FP32/${HPE_MODEL}.xml HPE_MODEL_PROC=$(PROC_PATH $HPE_MODEL) PIPELINE="gst-launch-1.0 $SOURCE_ELEMENT ! decodebin ! \ gvaclassify model=$HPE_MODEL_PATH model-proc=$HPE_MODEL_PROC device=$DEVICE inference-region=full-frame ! queue ! \ $SINK_ELEMENT" echo ${PIPELINE} ${PIPELINE}
#!/bin/bash # # 12/10/2010: link and unlink files. # if [ $# -ne 3 ] then echo "Usage: ./unlink.sh basename suffix count" exit 0 fi for ((i=0; i < $3; i++)) do filename=$1-$i.$2 if [ -e $filename ] then echo "$filename exist" exit 0 fi touch $filename if [ $? -ne 0 ] then echo "fail to create file $filename" exit 0 fi id=`stat -c "%i" $filename` id=`echo "obase=16; $id" | bc` echo "The ID of $filename is $id" unlink $filename done
#!/usr/bin/env bash set -ev docker run --rm -d -p 28080:8080 --name delphix xebialabsunsupported/xl-docker-demo-delphix:latest ./gradlew compileDocker docker stop delphix
#!/bin/bash # Download latest TEI files, prepare, upload to kiln & reload kiln and django # Optional argument: jetty port pushd preprocess && python3 download/download.py dl && cd prepare && bash prepare_and_publish.sh $1 && popd
import random def mutate_individual(individual, mutation_rate): mutated_individual = list(individual) # Create a copy of the individual to avoid modifying the original for i in range(len(mutated_individual)): if random.random() < mutation_rate: # Check if the mutation should occur based on the mutation rate # Mutate the gene at index i mutated_individual[i] = random.choice([0, 1]) # For binary genes, mutate to 0 or 1 # For numerical genes, you can use a different mutation strategy, e.g., adding a random value return tuple(mutated_individual) # Convert back to the original type if necessary (e.g., tuple for immutable individuals)
package com.yan.demo.bean; import java.util.ArrayList; import java.util.List; public class EmployeeExample { protected String orderByClause; protected boolean distinct; protected List<Criteria> oredCriteria; public EmployeeExample() { oredCriteria = new ArrayList<Criteria>(); } public void setOrderByClause(String orderByClause) { this.orderByClause = orderByClause; } public String getOrderByClause() { return orderByClause; } public void setDistinct(boolean distinct) { this.distinct = distinct; } public boolean isDistinct() { return distinct; } public List<Criteria> getOredCriteria() { return oredCriteria; } public void or(Criteria criteria) { oredCriteria.add(criteria); } public Criteria or() { Criteria criteria = createCriteriaInternal(); oredCriteria.add(criteria); return criteria; } public Criteria createCriteria() { Criteria criteria = createCriteriaInternal(); if (oredCriteria.size() == 0) { oredCriteria.add(criteria); } return criteria; } protected Criteria createCriteriaInternal() { Criteria criteria = new Criteria(); return criteria; } public void clear() { oredCriteria.clear(); orderByClause = null; distinct = false; } protected abstract static class GeneratedCriteria { protected List<Criterion> criteria; protected GeneratedCriteria() { super(); criteria = new ArrayList<Criterion>(); } public boolean isValid() { return criteria.size() > 0; } public List<Criterion> getAllCriteria() { return criteria; } public List<Criterion> getCriteria() { return criteria; } protected void addCriterion(String condition) { if (condition == null) { throw new RuntimeException("Value for condition cannot be null"); } criteria.add(new Criterion(condition)); } protected void addCriterion(String condition, Object value, String property) { if (value == null) { throw new RuntimeException("Value for " + property + " cannot be null"); } criteria.add(new Criterion(condition, value)); } protected void addCriterion(String condition, Object value1, Object value2, String property) { if (value1 == null || value2 == null) { throw new RuntimeException("Between values for " + property + " cannot be null"); } criteria.add(new Criterion(condition, value1, value2)); } public Criteria andEmpIdIsNull() { addCriterion("emp_id is null"); return (Criteria) this; } public Criteria andEmpIdIsNotNull() { addCriterion("emp_id is not null"); return (Criteria) this; } public Criteria andEmpIdEqualTo(Integer value) { addCriterion("emp_id =", value, "empId"); return (Criteria) this; } public Criteria andEmpIdNotEqualTo(Integer value) { addCriterion("emp_id <>", value, "empId"); return (Criteria) this; } public Criteria andEmpIdGreaterThan(Integer value) { addCriterion("emp_id >", value, "empId"); return (Criteria) this; } public Criteria andEmpIdGreaterThanOrEqualTo(Integer value) { addCriterion("emp_id >=", value, "empId"); return (Criteria) this; } public Criteria andEmpIdLessThan(Integer value) { addCriterion("emp_id <", value, "empId"); return (Criteria) this; } public Criteria andEmpIdLessThanOrEqualTo(Integer value) { addCriterion("emp_id <=", value, "empId"); return (Criteria) this; } public Criteria andEmpIdIn(List<Integer> values) { addCriterion("emp_id in", values, "empId"); return (Criteria) this; } public Criteria andEmpIdNotIn(List<Integer> values) { addCriterion("emp_id not in", values, "empId"); return (Criteria) this; } public Criteria andEmpIdBetween(Integer value1, Integer value2) { addCriterion("emp_id between", value1, value2, "empId"); return (Criteria) this; } public Criteria andEmpIdNotBetween(Integer value1, Integer value2) { addCriterion("emp_id not between", value1, value2, "empId"); return (Criteria) this; } public Criteria andEmpNameIsNull() { addCriterion("emp_name is null"); return (Criteria) this; } public Criteria andEmpNameIsNotNull() { addCriterion("emp_name is not null"); return (Criteria) this; } public Criteria andEmpNameEqualTo(String value) { addCriterion("emp_name =", value, "empName"); return (Criteria) this; } public Criteria andEmpNameNotEqualTo(String value) { addCriterion("emp_name <>", value, "empName"); return (Criteria) this; } public Criteria andEmpNameGreaterThan(String value) { addCriterion("emp_name >", value, "empName"); return (Criteria) this; } public Criteria andEmpNameGreaterThanOrEqualTo(String value) { addCriterion("emp_name >=", value, "empName"); return (Criteria) this; } public Criteria andEmpNameLessThan(String value) { addCriterion("emp_name <", value, "empName"); return (Criteria) this; } public Criteria andEmpNameLessThanOrEqualTo(String value) { addCriterion("emp_name <=", value, "empName"); return (Criteria) this; } public Criteria andEmpNameLike(String value) { addCriterion("emp_name like", value, "empName"); return (Criteria) this; } public Criteria andEmpNameNotLike(String value) { addCriterion("emp_name not like", value, "empName"); return (Criteria) this; } public Criteria andEmpNameIn(List<String> values) { addCriterion("emp_name in", values, "empName"); return (Criteria) this; } public Criteria andEmpNameNotIn(List<String> values) { addCriterion("emp_name not in", values, "empName"); return (Criteria) this; } public Criteria andEmpNameBetween(String value1, String value2) { addCriterion("emp_name between", value1, value2, "empName"); return (Criteria) this; } public Criteria andEmpNameNotBetween(String value1, String value2) { addCriterion("emp_name not between", value1, value2, "empName"); return (Criteria) this; } public Criteria andSexIsNull() { addCriterion("sex is null"); return (Criteria) this; } public Criteria andSexIsNotNull() { addCriterion("sex is not null"); return (Criteria) this; } public Criteria andSexEqualTo(String value) { addCriterion("sex =", value, "sex"); return (Criteria) this; } public Criteria andSexNotEqualTo(String value) { addCriterion("sex <>", value, "sex"); return (Criteria) this; } public Criteria andSexGreaterThan(String value) { addCriterion("sex >", value, "sex"); return (Criteria) this; } public Criteria andSexGreaterThanOrEqualTo(String value) { addCriterion("sex >=", value, "sex"); return (Criteria) this; } public Criteria andSexLessThan(String value) { addCriterion("sex <", value, "sex"); return (Criteria) this; } public Criteria andSexLessThanOrEqualTo(String value) { addCriterion("sex <=", value, "sex"); return (Criteria) this; } public Criteria andSexLike(String value) { addCriterion("sex like", value, "sex"); return (Criteria) this; } public Criteria andSexNotLike(String value) { addCriterion("sex not like", value, "sex"); return (Criteria) this; } public Criteria andSexIn(List<String> values) { addCriterion("sex in", values, "sex"); return (Criteria) this; } public Criteria andSexNotIn(List<String> values) { addCriterion("sex not in", values, "sex"); return (Criteria) this; } public Criteria andSexBetween(String value1, String value2) { addCriterion("sex between", value1, value2, "sex"); return (Criteria) this; } public Criteria andSexNotBetween(String value1, String value2) { addCriterion("sex not between", value1, value2, "sex"); return (Criteria) this; } public Criteria andEmailIsNull() { addCriterion("email is null"); return (Criteria) this; } public Criteria andEmailIsNotNull() { addCriterion("email is not null"); return (Criteria) this; } public Criteria andEmailEqualTo(String value) { addCriterion("email =", value, "email"); return (Criteria) this; } public Criteria andEmailNotEqualTo(String value) { addCriterion("email <>", value, "email"); return (Criteria) this; } public Criteria andEmailGreaterThan(String value) { addCriterion("email >", value, "email"); return (Criteria) this; } public Criteria andEmailGreaterThanOrEqualTo(String value) { addCriterion("email >=", value, "email"); return (Criteria) this; } public Criteria andEmailLessThan(String value) { addCriterion("email <", value, "email"); return (Criteria) this; } public Criteria andEmailLessThanOrEqualTo(String value) { addCriterion("email <=", value, "email"); return (Criteria) this; } public Criteria andEmailLike(String value) { addCriterion("email like", value, "email"); return (Criteria) this; } public Criteria andEmailNotLike(String value) { addCriterion("email not like", value, "email"); return (Criteria) this; } public Criteria andEmailIn(List<String> values) { addCriterion("email in", values, "email"); return (Criteria) this; } public Criteria andEmailNotIn(List<String> values) { addCriterion("email not in", values, "email"); return (Criteria) this; } public Criteria andEmailBetween(String value1, String value2) { addCriterion("email between", value1, value2, "email"); return (Criteria) this; } public Criteria andEmailNotBetween(String value1, String value2) { addCriterion("email not between", value1, value2, "email"); return (Criteria) this; } public Criteria andDeptIdIsNull() { addCriterion("dept_id is null"); return (Criteria) this; } public Criteria andDeptIdIsNotNull() { addCriterion("dept_id is not null"); return (Criteria) this; } public Criteria andDeptIdEqualTo(Integer value) { addCriterion("dept_id =", value, "deptId"); return (Criteria) this; } public Criteria andDeptIdNotEqualTo(Integer value) { addCriterion("dept_id <>", value, "deptId"); return (Criteria) this; } public Criteria andDeptIdGreaterThan(Integer value) { addCriterion("dept_id >", value, "deptId"); return (Criteria) this; } public Criteria andDeptIdGreaterThanOrEqualTo(Integer value) { addCriterion("dept_id >=", value, "deptId"); return (Criteria) this; } public Criteria andDeptIdLessThan(Integer value) { addCriterion("dept_id <", value, "deptId"); return (Criteria) this; } public Criteria andDeptIdLessThanOrEqualTo(Integer value) { addCriterion("dept_id <=", value, "deptId"); return (Criteria) this; } public Criteria andDeptIdIn(List<Integer> values) { addCriterion("dept_id in", values, "deptId"); return (Criteria) this; } public Criteria andDeptIdNotIn(List<Integer> values) { addCriterion("dept_id not in", values, "deptId"); return (Criteria) this; } public Criteria andDeptIdBetween(Integer value1, Integer value2) { addCriterion("dept_id between", value1, value2, "deptId"); return (Criteria) this; } public Criteria andDeptIdNotBetween(Integer value1, Integer value2) { addCriterion("dept_id not between", value1, value2, "deptId"); return (Criteria) this; } } public static class Criteria extends GeneratedCriteria { protected Criteria() { super(); } } public static class Criterion { private String condition; private Object value; private Object secondValue; private boolean noValue; private boolean singleValue; private boolean betweenValue; private boolean listValue; private String typeHandler; public String getCondition() { return condition; } public Object getValue() { return value; } public Object getSecondValue() { return secondValue; } public boolean isNoValue() { return noValue; } public boolean isSingleValue() { return singleValue; } public boolean isBetweenValue() { return betweenValue; } public boolean isListValue() { return listValue; } public String getTypeHandler() { return typeHandler; } protected Criterion(String condition) { super(); this.condition = condition; this.typeHandler = null; this.noValue = true; } protected Criterion(String condition, Object value, String typeHandler) { super(); this.condition = condition; this.value = value; this.typeHandler = typeHandler; if (value instanceof List<?>) { this.listValue = true; } else { this.singleValue = true; } } protected Criterion(String condition, Object value) { this(condition, value, null); } protected Criterion(String condition, Object value, Object secondValue, String typeHandler) { super(); this.condition = condition; this.value = value; this.secondValue = secondValue; this.typeHandler = typeHandler; this.betweenValue = true; } protected Criterion(String condition, Object value, Object secondValue) { this(condition, value, secondValue, null); } } }
<reponame>Jasig/ssp-data-importer /** * Licensed to Jasig under one or more contributor license * agreements. See the NOTICE file distributed with this work * for additional information regarding copyright ownership. * Jasig licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a * copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.jasig.ssp.util.importer.job.validation.map.metadata.validation.rules; import java.math.BigDecimal; import org.jarbframework.constraint.metadata.database.ColumnMetadata; import org.jasig.ssp.util.importer.job.validation.map.metadata.utils.MapReference; import org.jasig.ssp.util.importer.job.validation.map.metadata.validation.DatabaseConstraintMapValidationContext; import org.jasig.ssp.util.importer.job.validation.map.metadata.validation.MapViolation; import org.jasig.ssp.util.importer.job.validation.map.metadata.validation.violation.LengthConstraintMapViolation; class MapLengthValidationRule implements MapValueValidationRule { @Override public void validate(Object propertyValue, MapReference MapReference, ColumnMetadata columnMetadata, DatabaseConstraintMapValidationContext context) { if (lengthExceeded(propertyValue, columnMetadata)) { context.addViolation(new LengthConstraintMapViolation(MapReference, propertyValue)); } } private boolean lengthExceeded(Object propertyValue, ColumnMetadata columnMetadata) { boolean lengthExceeded = false; if (columnMetadata.hasMaximumLength()) { if (propertyValue instanceof String) { lengthExceeded = ((String) propertyValue).length() > columnMetadata.getMaximumLength(); } else if (propertyValue instanceof Number) { lengthExceeded = numberOfDigits((Number) propertyValue) > columnMetadata.getMaximumLength(); } } return lengthExceeded; } private int numberOfDigits(Number number) { return new BigDecimal(number.toString()).precision(); } }
pip freeze coverage run --source='django_excel' manage.py test && flake8 . --exclude=.moban.d --builtins=unicode
#!/usr/bin/env bash # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # This script will check out llvm and clang into third_party/llvm and build it. # Do NOT CHANGE this if you don't know what you're doing -- see # https://code.google.com/p/chromium/wiki/UpdatingClang # Reverting problematic clang rolls is safe, though. CLANG_REVISION=209387 THIS_DIR="$(dirname "${0}")" LLVM_DIR="${THIS_DIR}/../../../third_party/llvm" LLVM_BUILD_DIR="${LLVM_DIR}/../llvm-build" LLVM_BOOTSTRAP_DIR="${LLVM_DIR}/../llvm-bootstrap" LLVM_BOOTSTRAP_INSTALL_DIR="${LLVM_DIR}/../llvm-bootstrap-install" CLANG_DIR="${LLVM_DIR}/tools/clang" CLANG_TOOLS_EXTRA_DIR="${CLANG_DIR}/tools/extra" COMPILER_RT_DIR="${LLVM_DIR}/projects/compiler-rt" LIBCXX_DIR="${LLVM_DIR}/projects/libcxx" LIBCXXABI_DIR="${LLVM_DIR}/projects/libcxxabi" ANDROID_NDK_DIR="${LLVM_DIR}/../android_tools/ndk" STAMP_FILE="${LLVM_BUILD_DIR}/cr_build_revision" ABS_LIBCXX_DIR="${PWD}/${LIBCXX_DIR}" ABS_LIBCXXABI_DIR="${PWD}/${LIBCXXABI_DIR}" # Use both the clang revision and the plugin revisions to test for updates. BLINKGCPLUGIN_REVISION=\ $(grep LIBRARYNAME "$THIS_DIR"/../blink_gc_plugin/Makefile \ | cut -d '_' -f 2) CLANG_AND_PLUGINS_REVISION="${CLANG_REVISION}-${BLINKGCPLUGIN_REVISION}" # ${A:-a} returns $A if it's set, a else. LLVM_REPO_URL=${LLVM_URL:-https://llvm.org/svn/llvm-project} if [[ -z "$GYP_DEFINES" ]]; then GYP_DEFINES= fi if [[ -z "$GYP_GENERATORS" ]]; then GYP_GENERATORS= fi # Die if any command dies, error on undefined variable expansions. set -eu OS="$(uname -s)" # Parse command line options. if_needed= force_local_build= run_tests= bootstrap= with_android=yes chrome_tools="plugins blink_gc_plugin" gcc_toolchain= if [[ "${OS}" = "Darwin" ]]; then with_android= fi if [ "${OS}" = "FreeBSD" ]; then MAKE=gmake else MAKE=make fi while [[ $# > 0 ]]; do case $1 in --bootstrap) bootstrap=yes ;; --if-needed) if_needed=yes ;; --force-local-build) force_local_build=yes ;; --print-revision) echo $CLANG_REVISION exit 0 ;; --run-tests) run_tests=yes ;; --without-android) with_android= ;; --with-chrome-tools) shift if [[ $# == 0 ]]; then echo "--with-chrome-tools requires an argument." exit 1 fi chrome_tools=$1 ;; --gcc-toolchain) shift if [[ $# == 0 ]]; then echo "--gcc-toolchain requires an argument." exit 1 fi if [[ -x "$1/bin/gcc" ]]; then gcc_toolchain=$1 else echo "Invalid --gcc-toolchain: '$1'." echo "'$1/bin/gcc' does not appear to be valid." exit 1 fi ;; --help) echo "usage: $0 [--force-local-build] [--if-needed] [--run-tests] " echo "--bootstrap: First build clang with CC, then with itself." echo "--force-local-build: Don't try to download prebuilt binaries." echo "--if-needed: Download clang only if the script thinks it is needed." echo "--run-tests: Run tests after building. Only for local builds." echo "--print-revision: Print current clang revision and exit." echo "--without-android: Don't build ASan Android runtime library." echo "--with-chrome-tools: Select which chrome tools to build." \ "Defaults to plugins." echo " Example: --with-chrome-tools 'plugins empty-string'" echo "--gcc-toolchain: Set the prefix for which GCC version should" echo " be used for building. For example, to use gcc in" echo " /opt/foo/bin/gcc, use '--gcc-toolchain '/opt/foo" echo exit 1 ;; *) echo "Unknown argument: '$1'." echo "Use --help for help." exit 1 ;; esac shift done if [[ -n "$if_needed" ]]; then if [[ "${OS}" == "Darwin" ]]; then # clang is used on Mac. true elif [[ "$GYP_DEFINES" =~ .*(clang|tsan|asan|lsan|msan)=1.* ]]; then # clang requested via $GYP_DEFINES. true elif [[ -d "${LLVM_BUILD_DIR}" ]]; then # clang previously downloaded, remove third_party/llvm-build to prevent # updating. true elif [[ "${OS}" == "Linux" ]]; then # Temporarily use clang on linux. Leave a stamp file behind, so that # this script can remove clang again on machines where it was autoinstalled. mkdir -p "${LLVM_BUILD_DIR}" touch "${LLVM_BUILD_DIR}/autoinstall_stamp" true else # clang wasn't needed, not doing anything. exit 0 fi fi # Check if there's anything to be done, exit early if not. if [[ -f "${STAMP_FILE}" ]]; then PREVIOUSLY_BUILT_REVISON=$(cat "${STAMP_FILE}") if [[ -z "$force_local_build" ]] && \ [[ "${PREVIOUSLY_BUILT_REVISON}" = \ "${CLANG_AND_PLUGINS_REVISION}" ]]; then echo "Clang already at ${CLANG_AND_PLUGINS_REVISION}" exit 0 fi fi # To always force a new build if someone interrupts their build half way. rm -f "${STAMP_FILE}" if [[ -z "$force_local_build" ]]; then # Check if there's a prebuilt binary and if so just fetch that. That's faster, # and goma relies on having matching binary hashes on client and server too. CDS_URL=https://commondatastorage.googleapis.com/chromium-browser-clang CDS_FILE="clang-${CLANG_REVISION}.tgz" CDS_OUT_DIR=$(mktemp -d -t clang_download.XXXXXX) CDS_OUTPUT="${CDS_OUT_DIR}/${CDS_FILE}" if [ "${OS}" = "Linux" ]; then CDS_FULL_URL="${CDS_URL}/Linux_x64/${CDS_FILE}" elif [ "${OS}" = "Darwin" ]; then CDS_FULL_URL="${CDS_URL}/Mac/${CDS_FILE}" fi echo Trying to download prebuilt clang if which curl > /dev/null; then curl -L --fail "${CDS_FULL_URL}" -o "${CDS_OUTPUT}" || \ rm -rf "${CDS_OUT_DIR}" elif which wget > /dev/null; then wget "${CDS_FULL_URL}" -O "${CDS_OUTPUT}" || rm -rf "${CDS_OUT_DIR}" else echo "Neither curl nor wget found. Please install one of these." exit 1 fi if [ -f "${CDS_OUTPUT}" ]; then rm -rf "${LLVM_BUILD_DIR}/Release+Asserts" mkdir -p "${LLVM_BUILD_DIR}/Release+Asserts" tar -xzf "${CDS_OUTPUT}" -C "${LLVM_BUILD_DIR}/Release+Asserts" echo clang "${CLANG_REVISION}" unpacked echo "${CLANG_AND_PLUGINS_REVISION}" > "${STAMP_FILE}" rm -rf "${CDS_OUT_DIR}" exit 0 else echo Did not find prebuilt clang at r"${CLANG_REVISION}", building fi fi if [[ -n "${with_android}" ]] && ! [[ -d "${ANDROID_NDK_DIR}" ]]; then echo "Android NDK not found at ${ANDROID_NDK_DIR}" echo "The Android NDK is needed to build a Clang whose -fsanitize=address" echo "works on Android. See " echo "http://code.google.com/p/chromium/wiki/AndroidBuildInstructions for how" echo "to install the NDK, or pass --without-android." exit 1 fi echo Getting LLVM r"${CLANG_REVISION}" in "${LLVM_DIR}" if ! svn co --force "${LLVM_REPO_URL}/llvm/trunk@${CLANG_REVISION}" \ "${LLVM_DIR}"; then echo Checkout failed, retrying rm -rf "${LLVM_DIR}" svn co --force "${LLVM_REPO_URL}/llvm/trunk@${CLANG_REVISION}" "${LLVM_DIR}" fi echo Getting clang r"${CLANG_REVISION}" in "${CLANG_DIR}" svn co --force "${LLVM_REPO_URL}/cfe/trunk@${CLANG_REVISION}" "${CLANG_DIR}" echo Getting compiler-rt r"${CLANG_REVISION}" in "${COMPILER_RT_DIR}" svn co --force "${LLVM_REPO_URL}/compiler-rt/trunk@${CLANG_REVISION}" \ "${COMPILER_RT_DIR}" # clang needs a libc++ checkout, else -stdlib=libc++ won't find includes # (i.e. this is needed for bootstrap builds). if [ "${OS}" = "Darwin" ]; then echo Getting libc++ r"${CLANG_REVISION}" in "${LIBCXX_DIR}" svn co --force "${LLVM_REPO_URL}/libcxx/trunk@${CLANG_REVISION}" \ "${LIBCXX_DIR}" fi # While we're bundling our own libc++ on OS X, we need to compile libc++abi # into it too (since OS X 10.6 doesn't have libc++abi.dylib either). if [ "${OS}" = "Darwin" ]; then echo Getting libc++abi r"${CLANG_REVISION}" in "${LIBCXXABI_DIR}" svn co --force "${LLVM_REPO_URL}/libcxxabi/trunk@${CLANG_REVISION}" \ "${LIBCXXABI_DIR}" fi # Apply patch for test failing with --disable-pthreads (llvm.org/PR11974) pushd "${CLANG_DIR}" svn revert test/Index/crash-recovery-modules.m cat << 'EOF' | --- third_party/llvm/tools/clang/test/Index/crash-recovery-modules.m (revision 202554) +++ third_party/llvm/tools/clang/test/Index/crash-recovery-modules.m (working copy) @@ -12,6 +12,8 @@ // REQUIRES: crash-recovery // REQUIRES: shell +// XFAIL: * +// (PR11974) @import Crash; EOF patch -p4 popd # Echo all commands. set -x NUM_JOBS=3 if [[ "${OS}" = "Linux" ]]; then NUM_JOBS="$(grep -c "^processor" /proc/cpuinfo)" elif [ "${OS}" = "Darwin" -o "${OS}" = "FreeBSD" ]; then NUM_JOBS="$(sysctl -n hw.ncpu)" fi if [[ -n "${gcc_toolchain}" ]]; then # Use the specified gcc installation for building. export CC="$gcc_toolchain/bin/gcc" export CXX="$gcc_toolchain/bin/g++" # Set LD_LIBRARY_PATH to make auxiliary targets (tablegen, bootstrap compiler, # etc.) find the .so. export LD_LIBRARY_PATH="$(dirname $(${CXX} -print-file-name=libstdc++.so.6))" fi export CFLAGS="" export CXXFLAGS="" # LLVM uses C++11 starting in llvm 3.5. On Linux, this means libstdc++4.7+ is # needed, on OS X it requires libc++. clang only automatically links to libc++ # when targeting OS X 10.9+, so add stdlib=libc++ explicitly so clang can run on # OS X versions as old as 10.7. # TODO(thakis): Some bots are still on 10.6, so for now bundle libc++.dylib. # Remove this once all bots are on 10.7+, then use --enable-libcpp=yes and # change all MACOSX_DEPLOYMENT_TARGET values to 10.7. if [ "${OS}" = "Darwin" ]; then # When building on 10.9, /usr/include usually doesn't exist, and while # Xcode's clang automatically sets a sysroot, self-built clangs don't. export CFLAGS="-isysroot $(xcrun --show-sdk-path)" export CPPFLAGS="${CFLAGS}" export CXXFLAGS="-stdlib=libc++ -nostdinc++ -I${ABS_LIBCXX_DIR}/include ${CFLAGS}" fi # Build bootstrap clang if requested. if [[ -n "${bootstrap}" ]]; then ABS_INSTALL_DIR="${PWD}/${LLVM_BOOTSTRAP_INSTALL_DIR}" echo "Building bootstrap compiler" mkdir -p "${LLVM_BOOTSTRAP_DIR}" pushd "${LLVM_BOOTSTRAP_DIR}" if [[ ! -f ./config.status ]]; then # The bootstrap compiler only needs to be able to build the real compiler, # so it needs no cross-compiler output support. In general, the host # compiler should be as similar to the final compiler as possible, so do # keep --disable-threads & co. ../llvm/configure \ --enable-optimized \ --enable-targets=host-only \ --enable-libedit=no \ --disable-threads \ --disable-pthreads \ --without-llvmgcc \ --without-llvmgxx \ --prefix="${ABS_INSTALL_DIR}" fi ${MAKE} -j"${NUM_JOBS}" if [[ -n "${run_tests}" ]]; then ${MAKE} check-all fi ${MAKE} install if [[ -n "${gcc_toolchain}" ]]; then # Copy that gcc's stdlibc++.so.6 to the build dir, so the bootstrap # compiler can start. cp -v "$(${CXX} -print-file-name=libstdc++.so.6)" \ "${ABS_INSTALL_DIR}/lib/" fi popd export CC="${ABS_INSTALL_DIR}/bin/clang" export CXX="${ABS_INSTALL_DIR}/bin/clang++" if [[ -n "${gcc_toolchain}" ]]; then # Tell the bootstrap compiler to use a specific gcc prefix to search # for standard library headers and shared object file. export CFLAGS="--gcc-toolchain=${gcc_toolchain}" export CXXFLAGS="--gcc-toolchain=${gcc_toolchain}" fi echo "Building final compiler" fi # Build clang (in a separate directory). # The clang bots have this path hardcoded in built/scripts/slave/compile.py, # so if you change it you also need to change these links. mkdir -p "${LLVM_BUILD_DIR}" pushd "${LLVM_BUILD_DIR}" # Build libc++.dylib while some bots are still on OS X 10.6. if [ "${OS}" = "Darwin" ]; then rm -rf libcxxbuild LIBCXXFLAGS="-O3 -std=c++11 -fstrict-aliasing" # libcxx and libcxxabi both have a file stdexcept.cpp, so put their .o files # into different subdirectories. mkdir -p libcxxbuild/libcxx pushd libcxxbuild/libcxx ${CXX:-c++} -c ${CXXFLAGS} ${LIBCXXFLAGS} "${ABS_LIBCXX_DIR}"/src/*.cpp popd mkdir -p libcxxbuild/libcxxabi pushd libcxxbuild/libcxxabi ${CXX:-c++} -c ${CXXFLAGS} ${LIBCXXFLAGS} "${ABS_LIBCXXABI_DIR}"/src/*.cpp -I"${ABS_LIBCXXABI_DIR}/include" popd pushd libcxxbuild ${CC:-cc} libcxx/*.o libcxxabi/*.o -o libc++.1.dylib -dynamiclib \ -nodefaultlibs -current_version 1 -compatibility_version 1 \ -lSystem -install_name @executable_path/libc++.dylib \ -Wl,-unexported_symbols_list,${ABS_LIBCXX_DIR}/lib/libc++unexp.exp \ -Wl,-force_symbols_not_weak_list,${ABS_LIBCXX_DIR}/lib/notweak.exp \ -Wl,-force_symbols_weak_list,${ABS_LIBCXX_DIR}/lib/weak.exp ln -sf libc++.1.dylib libc++.dylib popd export LDFLAGS+="-stdlib=libc++ -L${PWD}/libcxxbuild" fi if [[ ! -f ./config.status ]]; then ../llvm/configure \ --enable-optimized \ --enable-libedit=no \ --disable-threads \ --disable-pthreads \ --without-llvmgcc \ --without-llvmgxx fi if [[ -n "${gcc_toolchain}" ]]; then # Copy in the right stdlibc++.so.6 so clang can start. mkdir -p Release+Asserts/lib cp -v "$(${CXX} ${CXXFLAGS} -print-file-name=libstdc++.so.6)" \ Release+Asserts/lib/ fi MACOSX_DEPLOYMENT_TARGET=10.5 ${MAKE} -j"${NUM_JOBS}" STRIP_FLAGS= if [ "${OS}" = "Darwin" ]; then # See http://crbug.com/256342 STRIP_FLAGS=-x cp libcxxbuild/libc++.1.dylib Release+Asserts/bin fi strip ${STRIP_FLAGS} Release+Asserts/bin/clang popd if [[ -n "${with_android}" ]]; then # Make a standalone Android toolchain. ${ANDROID_NDK_DIR}/build/tools/make-standalone-toolchain.sh \ --platform=android-14 \ --install-dir="${LLVM_BUILD_DIR}/android-toolchain" \ --system=linux-x86_64 \ --stl=stlport # Android NDK r9d copies a broken unwind.h into the toolchain, see # http://crbug.com/357890 rm -v "${LLVM_BUILD_DIR}"/android-toolchain/include/c++/*/unwind.h # Build ASan runtime for Android. # Note: LLVM_ANDROID_TOOLCHAIN_DIR is not relative to PWD, but to where we # build the runtime, i.e. third_party/llvm/projects/compiler-rt. pushd "${LLVM_BUILD_DIR}" ${MAKE} -C tools/clang/runtime/ \ LLVM_ANDROID_TOOLCHAIN_DIR="../../../llvm-build/android-toolchain" popd fi # Build Chrome-specific clang tools. Paths in this list should be relative to # tools/clang. # For each tool directory, copy it into the clang tree and use clang's build # system to compile it. for CHROME_TOOL_DIR in ${chrome_tools}; do TOOL_SRC_DIR="${THIS_DIR}/../${CHROME_TOOL_DIR}" TOOL_DST_DIR="${LLVM_DIR}/tools/clang/tools/chrome-${CHROME_TOOL_DIR}" TOOL_BUILD_DIR="${LLVM_BUILD_DIR}/tools/clang/tools/chrome-${CHROME_TOOL_DIR}" rm -rf "${TOOL_DST_DIR}" cp -R "${TOOL_SRC_DIR}" "${TOOL_DST_DIR}" rm -rf "${TOOL_BUILD_DIR}" mkdir -p "${TOOL_BUILD_DIR}" cp "${TOOL_SRC_DIR}/Makefile" "${TOOL_BUILD_DIR}" MACOSX_DEPLOYMENT_TARGET=10.5 ${MAKE} -j"${NUM_JOBS}" -C "${TOOL_BUILD_DIR}" done if [[ -n "$run_tests" ]]; then # Run a few tests. for CHROME_TOOL_DIR in ${chrome_tools}; do TOOL_SRC_DIR="${THIS_DIR}/../${CHROME_TOOL_DIR}" if [[ -f "${TOOL_SRC_DIR}/tests/test.sh" ]]; then "${TOOL_SRC_DIR}/tests/test.sh" "${LLVM_BUILD_DIR}/Release+Asserts" fi done pushd "${LLVM_BUILD_DIR}" ${MAKE} check-all popd fi # After everything is done, log success for this revision. echo "${CLANG_AND_PLUGINS_REVISION}" > "${STAMP_FILE}"
#!/bin/bash HELP_NOTIFICATION="use -h argument for detailed information" function usage { cat << END $(basename "$0") -e gtest_exec [-h] [-f \"filter[ filter...]\"] -- script to calculate peak memory usage of google unit tests where: -e path to google unit tests executable file -h show this help text -f filters for separate tests measurments END } TEST_EXEC="" TESTS_STR="" while getopts ':he:f:' option; do case "$option" in h) usage exit ;; e) TEST_EXEC=$OPTARG ;; f) TESTS_STR=$OPTARG ;; :) echo >&2 "missing argument for -$OPTARG" echo >&2 "$HELP_NOTIFICATION" exit 1 ;; \?) echo >&2 "illegal option: -$OPTARG" echo >&2 "$HELP_NOTIFICATION" exit 1 ;; esac done if [ -z "$TEST_EXEC" ] then echo test executable should be provided echo $HELP_NOTIFICATION exit 1 fi PROFILER_COMMAND="valgrind --tool=massif --pages-as-heap=yes --massif-out-file=massif.out" TESTS=() for TEST in $TESTS_STR do TESTS+=($TEST) done MEM_USAGE=() if [ -z "$TESTS_STR" ] then $PROFILER_COMMAND $TEST_EXEC MEM_USAGE=`cat massif.out | grep mem_heap_B | sed -e 's/mem_heap_B=\(.*\)/\1/' | sort -g | tail -n 1` printf "\nPeak memory usage: $MEM_USAGE \n" else for TEST in ${TESTS[@]} do $PROFILER_COMMAND $TEST_EXEC --gtest_filter=$TEST MEM_USAGE+=(`cat massif.out | grep mem_heap_B | sed -e 's/mem_heap_B=\(.*\)/\1/' | sort -g | tail -n 1`) done echo echo Peak memory usage per test: for i in $(seq 0 $((${#MEM_USAGE[@]}-1))) do echo ${TESTS[$i]} memory usage: ${MEM_USAGE[$i]} done fi # vim: set noet ts=4 sw=4:
const settings = require("./settings.json"); const db = require('./database.js'); const Discord = require('discord.js'); const client = new Discord.Client( { presence: { activity: { name: 'students', type: 'WATCHING' }, status: 'online' }, messageCacheLifetime: 300, messageSweepInterval: 600 } ); client.on('ready', async () => { //console.log('[Bot] Bot is ready!'); }); client.on('message', async message => { try { let prefix = await db.get(`${message.guild.id}-prefix`); prefix = prefix ? prefix : settings.default.prefix; if (message.content.startsWith(prefix)) { let cmd = message.content.slice(prefix.length).split(" ")[0].toLowerCase(); if (cmd == "assignment") { let assignment = await db.get(`${message.guild.id}-assignment`); if (assignment) { message.channel.send({ embed: { color: 0x023FF00, title: assignment.name, description: assignment.description, footer: { text: 'This text has been generated by user input.' } } }); } else { message.channel.send({ embed: { color: 0x0FF0000, title: "There are no assignments currently!", description: "[insert random message here]" } }); } } } } catch(err) { console.log(err); }; }); client.login(settings.discord.bot.token);
<reponame>fromariz/globo_sport_add_blocker_chrome_extension<gh_stars>0 hideStuff = () => { $('div[data-tracking-action="esporte"]').hide(); $(".shopping").hide(); $("#tns1-item1").hide(); $(".medium-8").addClass("medium-12").removeClass("medium-8"); $(".columns-container").css( "grid-template-columns", "repeat(2,minmax(0,1fr))" ); $(".highlight-column-1").hide(); $(".highlight-container").css( "grid-template-columns", "78.1% calc((100% - 75.1%) - 38.35px)" ); $(".highlight-container").css("grid-template-areas", '"e b" "c b" "d b"'); $(".homeui-tc-esporte").hide(); $(".header-titles").css("grid-template-columns", "repeat(4,auto)"); }; $(document).ready(function () { document.addEventListener("load", hideStuff()); });