text stringlengths 1 1.05M |
|---|
#!/bin/bash
if [[ $DEBUG == true ]]; then
set -ex
else
set -e
fi
chmod +x om-cli/om-linux
CMD=./om-cli/om-linux
$CMD -t https://$OPS_MGR_HOST -k -u $OPS_MGR_USR -p $OPS_MGR_PWD apply-changes --ignore-warnings true
|
cd segger && sh install.sh
|
<gh_stars>1-10
def solution(S):
rs = ""
for i in S:
if i != " ":
rs += i
else:
rs += "%20"
return rs
S = "Mr <NAME>"
print(solution(S))
|
<reponame>googleapis/googleapis-gen<filename>google/example/library/v1/google-cloud-example-library-v1-ruby/lib/google/example/library/v1/library_pb.rb
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/example/library/v1/library.proto
require 'google/api/annotations_pb'
require 'google/api/client_pb'
require 'google/api/field_behavior_pb'
require 'google/api/resource_pb'
require 'google/protobuf/empty_pb'
require 'google/protobuf/field_mask_pb'
require 'google/protobuf'
Google::Protobuf::DescriptorPool.generated_pool.build do
add_file("google/example/library/v1/library.proto", :syntax => :proto3) do
add_message "google.example.library.v1.Book" do
optional :name, :string, 1
optional :author, :string, 2
optional :title, :string, 3
optional :read, :bool, 4
end
add_message "google.example.library.v1.Shelf" do
optional :name, :string, 1
optional :theme, :string, 2
end
add_message "google.example.library.v1.CreateShelfRequest" do
optional :shelf, :message, 1, "google.example.library.v1.Shelf"
end
add_message "google.example.library.v1.GetShelfRequest" do
optional :name, :string, 1
end
add_message "google.example.library.v1.ListShelvesRequest" do
optional :page_size, :int32, 1
optional :page_token, :string, 2
end
add_message "google.example.library.v1.ListShelvesResponse" do
repeated :shelves, :message, 1, "google.example.library.v1.Shelf"
optional :next_page_token, :string, 2
end
add_message "google.example.library.v1.DeleteShelfRequest" do
optional :name, :string, 1
end
add_message "google.example.library.v1.MergeShelvesRequest" do
optional :name, :string, 1
optional :other_shelf, :string, 2
end
add_message "google.example.library.v1.CreateBookRequest" do
optional :parent, :string, 1
optional :book, :message, 2, "google.example.library.v1.Book"
end
add_message "google.example.library.v1.GetBookRequest" do
optional :name, :string, 1
end
add_message "google.example.library.v1.ListBooksRequest" do
optional :parent, :string, 1
optional :page_size, :int32, 2
optional :page_token, :string, 3
end
add_message "google.example.library.v1.ListBooksResponse" do
repeated :books, :message, 1, "google.example.library.v1.Book"
optional :next_page_token, :string, 2
end
add_message "google.example.library.v1.UpdateBookRequest" do
optional :book, :message, 1, "google.example.library.v1.Book"
optional :update_mask, :message, 2, "google.protobuf.FieldMask"
end
add_message "google.example.library.v1.DeleteBookRequest" do
optional :name, :string, 1
end
add_message "google.example.library.v1.MoveBookRequest" do
optional :name, :string, 1
optional :other_shelf_name, :string, 2
end
end
end
module Google
module Example
module Library
module V1
Book = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.Book").msgclass
Shelf = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.Shelf").msgclass
CreateShelfRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.CreateShelfRequest").msgclass
GetShelfRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.GetShelfRequest").msgclass
ListShelvesRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.ListShelvesRequest").msgclass
ListShelvesResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.ListShelvesResponse").msgclass
DeleteShelfRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.DeleteShelfRequest").msgclass
MergeShelvesRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.MergeShelvesRequest").msgclass
CreateBookRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.CreateBookRequest").msgclass
GetBookRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.GetBookRequest").msgclass
ListBooksRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.ListBooksRequest").msgclass
ListBooksResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.ListBooksResponse").msgclass
UpdateBookRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.UpdateBookRequest").msgclass
DeleteBookRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.DeleteBookRequest").msgclass
MoveBookRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.example.library.v1.MoveBookRequest").msgclass
end
end
end
end
|
#!/bin/bash
set -aueo pipefail
if [ ! -f .env ]; then
echo -e "\nThere is no .env file in the root of this repository."
echo -e "Copy the values from .env.example into .env."
echo -e "Modify the values in .env to match your setup.\n"
echo -e " cat .env.example > .env\n\n"
exit 1
fi
# shellcheck disable=SC1091
source .env
# Set meaningful defaults for env vars we expect from .env
CI="${CI:-false}" # This is set to true by Github Actions
MESH_NAME="${MESH_NAME:-osm}"
K8S_NAMESPACE="${K8S_NAMESPACE:-osm-system}"
BOOKBUYER_NAMESPACE="${BOOKBUYER_NAMESPACE:-bookbuyer}"
BOOKSTORE_NAMESPACE="${BOOKSTORE_NAMESPACE:-bookstore}"
BOOKTHIEF_NAMESPACE="${BOOKTHIEF_NAMESPACE:-bookthief}"
BOOKWAREHOUSE_NAMESPACE="${BOOKWAREHOUSE_NAMESPACE:-bookwarehouse}"
CERT_MANAGER="${CERT_MANAGER:-tresor}"
CTR_REGISTRY="${CTR_REGISTRY:-localhost:5000}"
CTR_REGISTRY_CREDS_NAME="${CTR_REGISTRY_CREDS_NAME:-acr-creds}"
DEPLOY_TRAFFIC_SPLIT="${DEPLOY_TRAFFIC_SPLIT:-true}"
CTR_TAG="${CTR_TAG:-$(git rev-parse HEAD)}"
IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-Always}"
ENABLE_DEBUG_SERVER="${ENABLE_DEBUG_SERVER:-true}"
ENABLE_EGRESS="${ENABLE_EGRESS:-false}"
DEPLOY_GRAFANA="${DEPLOY_GRAFANA:-false}"
DEPLOY_JAEGER="${DEPLOY_JAEGER:-false}"
ENABLE_FLUENTBIT="${ENABLE_FLUENTBIT:-false}"
DEPLOY_PROMETHEUS="${DEPLOY_PROMETHEUS:-false}"
ENABLE_PROMETHEUS_SCRAPING="${ENABLE_PROMETHEUS_SCRAPING:-true}"
DEPLOY_WITH_SAME_SA="${DEPLOY_WITH_SAME_SA:-false}"
ENVOY_LOG_LEVEL="${ENVOY_LOG_LEVEL:-debug}"
DEPLOY_ON_OPENSHIFT="${DEPLOY_ON_OPENSHIFT:-false}"
# For any additional installation arguments. Used heavily in CI.
optionalInstallArgs=$*
exit_error() {
error="$1"
echo "$error"
exit 1
}
# Check if Docker daemon is running
docker info > /dev/null || { echo "Docker daemon is not running"; exit 1; }
make build-osm
# cleanup stale resources from previous runs
./demo/clean-kubernetes.sh
# The demo uses osm's namespace as defined by environment variables, K8S_NAMESPACE
# to house the control plane components.
#
# Note: `osm install` creates the namespace via Helm only if such a namespace already
# doesn't exist. We explicitly create the namespace below because of the need to
# create container registry credentials in this namespace for the purpose of testing.
# The side effect of creating the namespace here instead of letting Helm create it is
# that Helm no longer manages namespace creation, and as a result labels that it
# otherwise adds for using as a namespace selector are no longer available.
kubectl create namespace "$K8S_NAMESPACE"
# Mimic Helm namespace label behavior: https://github.com/helm/helm/blob/release-3.2/pkg/action/install.go#L292
kubectl label namespace "$K8S_NAMESPACE" name="$K8S_NAMESPACE"
echo "Certificate Manager in use: $CERT_MANAGER"
if [ "$CERT_MANAGER" = "vault" ]; then
echo "Installing Hashi Vault"
./demo/deploy-vault.sh
fi
if [ "$CERT_MANAGER" = "cert-manager" ]; then
echo "Installing cert-manager"
./demo/deploy-cert-manager.sh
fi
if [ "$DEPLOY_ON_OPENSHIFT" = true ] ; then
optionalInstallArgs+=" --set=OpenServiceMesh.enablePrivilegedInitContainer=true"
fi
make docker-push
./scripts/create-container-registry-creds.sh "$K8S_NAMESPACE"
# Deploys Xds and Prometheus
echo "Certificate Manager in use: $CERT_MANAGER"
if [ "$CERT_MANAGER" = "vault" ]; then
# shellcheck disable=SC2086
bin/osm install \
--osm-namespace "$K8S_NAMESPACE" \
--mesh-name "$MESH_NAME" \
--set=OpenServiceMesh.certificateManager="$CERT_MANAGER" \
--set=OpenServiceMesh.vault.host="$VAULT_HOST" \
--set=OpenServiceMesh.vault.token="$VAULT_TOKEN" \
--set=OpenServiceMesh.vault.protocol="$VAULT_PROTOCOL" \
--set=OpenServiceMesh.image.registry="$CTR_REGISTRY" \
--set=OpenServiceMesh.imagePullSecrets[0].name="$CTR_REGISTRY_CREDS_NAME" \
--set=OpenServiceMesh.image.tag="$CTR_TAG" \
--set=OpenServiceMesh.image.pullPolicy="$IMAGE_PULL_POLICY" \
--set=OpenServiceMesh.enableDebugServer="$ENABLE_DEBUG_SERVER" \
--set=OpenServiceMesh.enableEgress="$ENABLE_EGRESS" \
--set=OpenServiceMesh.deployGrafana="$DEPLOY_GRAFANA" \
--set=OpenServiceMesh.deployJaeger="$DEPLOY_JAEGER" \
--set=OpenServiceMesh.enableFluentbit="$ENABLE_FLUENTBIT" \
--set=OpenServiceMesh.deployPrometheus="$DEPLOY_PROMETHEUS" \
--set=OpenServiceMesh.enablePrometheusScraping="$ENABLE_PROMETHEUS_SCRAPING" \
--set=OpenServiceMesh.envoyLogLevel="$ENVOY_LOG_LEVEL" \
--set=OpenServiceMesh.controllerLogLevel="trace" \
--timeout=90s \
$optionalInstallArgs
else
# shellcheck disable=SC2086
bin/osm install \
--osm-namespace "$K8S_NAMESPACE" \
--mesh-name "$MESH_NAME" \
--set=OpenServiceMesh.certificateManager="$CERT_MANAGER" \
--set=OpenServiceMesh.image.registry="$CTR_REGISTRY" \
--set=OpenServiceMesh.imagePullSecrets[0].name="$CTR_REGISTRY_CREDS_NAME" \
--set=OpenServiceMesh.image.tag="$CTR_TAG" \
--set=OpenServiceMesh.image.pullPolicy="$IMAGE_PULL_POLICY" \
--set=OpenServiceMesh.enableDebugServer="$ENABLE_DEBUG_SERVER" \
--set=OpenServiceMesh.enableEgress="$ENABLE_EGRESS" \
--set=OpenServiceMesh.deployGrafana="$DEPLOY_GRAFANA" \
--set=OpenServiceMesh.deployJaeger="$DEPLOY_JAEGER" \
--set=OpenServiceMesh.enableFluentbit="$ENABLE_FLUENTBIT" \
--set=OpenServiceMesh.deployPrometheus="$DEPLOY_PROMETHEUS" \
--set=OpenServiceMesh.enablePrometheusScraping="$ENABLE_PROMETHEUS_SCRAPING" \
--set=OpenServiceMesh.envoyLogLevel="$ENVOY_LOG_LEVEL" \
--set=OpenServiceMesh.controllerLogLevel="trace" \
--timeout=90s \
$optionalInstallArgs
fi
./demo/configure-app-namespaces.sh
./demo/deploy-apps.sh
# Apply SMI policies
if [ "$DEPLOY_TRAFFIC_SPLIT" = "true" ]; then
./demo/deploy-traffic-split.sh
fi
./demo/deploy-traffic-specs.sh
if [ "$DEPLOY_WITH_SAME_SA" = "true" ]; then
./demo/deploy-traffic-target-with-same-sa.sh
else
./demo/deploy-traffic-target.sh
fi
if [[ "$CI" != "true" ]]; then
watch -n5 "printf \"Namespace ${K8S_NAMESPACE}:\n\"; kubectl get pods -n ${K8S_NAMESPACE} -o wide; printf \"\n\n\"; printf \"Namespace ${BOOKBUYER_NAMESPACE}:\n\"; kubectl get pods -n ${BOOKBUYER_NAMESPACE} -o wide; printf \"\n\n\"; printf \"Namespace ${BOOKSTORE_NAMESPACE}:\n\"; kubectl get pods -n ${BOOKSTORE_NAMESPACE} -o wide; printf \"\n\n\"; printf \"Namespace ${BOOKTHIEF_NAMESPACE}:\n\"; kubectl get pods -n ${BOOKTHIEF_NAMESPACE} -o wide; printf \"\n\n\"; printf \"Namespace ${BOOKWAREHOUSE_NAMESPACE}:\n\"; kubectl get pods -n ${BOOKWAREHOUSE_NAMESPACE} -o wide"
fi
|
from flask import Flask, request, jsonify
app = Flask(__name__)
items = []
@app.route('/items', methods=['POST'])
def create_item():
data = request.get_json()
item = {'name': data['name'], 'price': data['price']}
items.append(item)
return jsonify(item)
@app.route('/items/<string:name>')
def get_item(name):
item = next(filter(lambda x: x['name'] == name, items), None)
return jsonify(item)
@app.route('/items/<string:name>', methods=['DELETE'])
def delete_item(name):
global items
items = list(filter(lambda x: x['name'] != name, items))
return jsonify({'message': 'Item deleted'}) |
<gh_stars>0
'use strict';
const _ = require('lodash');
const ObjectID = require("bson-objectid");
const {BAD_REQUEST, PRECONDITION_FAILED} = require('../../../config/errors');
const findLandings = require('./helpers/find-landings');
const getLandingMeta = require('./helpers/get-landing-meta');
const updateLandingData = require('./helpers/update-landing-data');
const getDbCollection = require('../../../common/utils/get-db-collection');
module.exports = async (ctx, next) => {
const id = ctx.params.id;
const body = ctx.request.body || {};
const name = (body.name || '').trim();
const previewUrl = (body.previewUrl || '').trim();
const landingUpdate = body.landing;
const baseVersion = +(body.baseVersion || '');
const html = body.html;
const update = {};
if (name) {
update.name = name;
}
if (previewUrl) {
update.previewUrl = previewUrl;
}
if (landingUpdate && !_.isEmpty(landingUpdate)) {
// update.landing = landingUpdate;
update.landing = html;
}
if (_.isEmpty(update) || !baseVersion) {
return ctx.throw(400, BAD_REQUEST);
}
let data = {};
try {
const landings = await findLandings(ctx, false, [id]);
const landing = landings[0];
if (landing) {
// prevent inconsistent updates
if (landing.currentVersion !== baseVersion) {
return ctx.throw(412, PRECONDITION_FAILED);
}
data = updateLandingData(ctx, landing, update);
const collection = getDbCollection.landings(ctx);
await ctx.mongoTransaction(
collection,
'updateOne',
[
{_id: ObjectID(id)},
{$set: data}
]
)
}
} catch (err) {
throw err
}
ctx.status = 200;
ctx.body = getLandingMeta(data);
next();
};
|
=begin rdoc
Base
This handles user interaction, loading the parameters, etc.
=end
require "open-uri"
require "ftools"
module PoolParty
class Base
include Configurable
extend MethodMissingSugar
default_options({
:user => "root", # This should change here
:base_keypair_path => "#{ENV["HOME"]}/.ec2",
:tmp_path => "/tmp/poolparty",
:remote_storage_path => "/var/poolparty",
:remote_gem_path => "/var/poolparty/gems",
:fileserver_base => "puppet://master/files",
:base_config_directory => "/etc/poolparty",
:template_directory => "templates",
:template_path => "/var/lib/puppet/templates",
:module_path => "/etc/puppet/modules/poolparty",
:default_specfile_name => "clouds.rb",
:port => "80",
:forwarding_port => "8080",
:proxy_mode => "http",
:messenger_client_port => 7050,
:minimum_runtime => 3000, #50.minutes in seconds
:agent_pid_file => ::File.readable?("/var/run/poolparty_agent.pid") ? "/var/run/agent.pid" : "#{Dir.pwd}/agent.pid",
:agent_port => 8081,
# EC2 Options
:ami => "ami-1cd73375",
:size => 'm1.small', # must be 'm1.small', 'm1.large', 'm1.xlarge', 'c1.medium', or 'c1.xlarge'
:availabilty_zone => "us-east-1a",
:security_group => ["default"],
# Options that should not be touched pretty much ever
:manifest_path => "/etc/puppet/manifests"
})
# Class methods
class << self
def options(h={})
@options ||= default_options.merge(h)
end
# Get the access_key
def access_key
@access_key ||= load_access_keys_from_environment_var || load_keys_from_file[:access_key]
end
def load_access_keys_from_environment_var
[ ENV["AWS_ACCESS_KEY"], ENV["AWS_ACCESS_KEY_ID"]].reject {|a| a.nil? }.first
end
def secret_access_key
@secret_access_key ||= load_secret_access_keys_from_environment_var || load_keys_from_file[:secret_access_key]
end
def load_secret_access_keys_from_environment_var
[ ENV["AWS_SECRET_ACCESS_KEY"] ].reject {|a| a.nil? }.first
end
def read_keyfile
open(get_working_key_file_locations).read
end
def load_keys_from_file
@keys ||= get_working_key_file_locations ? YAML::load( read_keyfile ) : {}
end
# Store the keys in a yaml format to give the master access
# So that the master has access to the files
def store_keys_in_file
unless access_key.nil? || secret_access_key.nil?
write_to_file( key_file_locations.first, YAML::dump({:access_key => access_key, :secret_access_key => secret_access_key}))
end
end
def store_keys_in_file_for(obj=nil)
if obj
@access_key = obj.access_key
@secret_access_key = obj.secret_access_key
end
store_keys_in_file
end
def reset!
@keys = nil
end
# Get the instance first instance file that exists on the system from the expected places
# denoted in the local_instances_list_file_locations
def get_working_key_file_locations
key_file_locations.reject {|f| f unless ::File.file?(f) }.first
end
# Expected places for the instances.list to be located at on the machine
def key_file_locations
[
".ppkeys",
"#{Base.base_config_directory}/.ppkeys",
"#{Base.storage_directory}/ppkeys",
"~/.ppkeys",
"ppkeys"
]
end
def storage_directory
[
"/var/poolparty"
].select do |dir|
dir if viable_directory?(dir)
end.first || ::File.join( "/tmp/poolparty")
end
def logger_location
[
"/var/log/poolparty"
].select do |dir|
dir if viable_directory?(dir)
end.first || ::File.join(Dir.pwd, "log")
end
# Assume the logs will be at the pool.log location within the
# logger_location set above
def pool_logger_location
::File.join(logger_location, "pool.log")
end
def custom_monitor_directories
[
"/var/poolparty/monitors",
"/etc/poolparty/monitors",
"#{Dir.pwd}/monitors"
].select {|d| d if viable_directory?(d) }
end
def custom_modules_directories
[
"/var/poolparty/modules",
"/etc/poolparty/modules",
"#{Dir.pwd}/modules"
].select {|d| d if viable_directory?(d) }
end
# Only return true if the directory we are reading is both readable
# and exists
def viable_directory?(dir)
::File.directory?(dir) && ::File.readable?(dir)
end
end
end
end |
<filename>src/data/legendary/slot/quiver.js
var quiver = [
{
name:"<NAME>",
type:"Quiver",
weight:0,
hc:false,
season:false,
craft:{
rp:40,ad:38,vc:30,fs:2
},
smartLoot:[
"Demon Hunter"
],
primary:{
AttackSpeed:null,
CritChance:null,
EliteDamage:{
min:5,
max:8
},
RANDOM:2
},
secondary:{
RANDOM:2
},
image:'//media.blizzard.com/d3/icons/items/large/unique_quiver_003_x1_demonhunter_male.png',
flavor:'Very rare. Made from bones harvested from a live demon—not an easy thing to do.'
},
{
name: "<NAME>",
type: "Quiver",
weight: 50,
hc: false,
season: false,
smartLoot: [
"Demon Hunter"
],
primary:{
Dexterity:null,
AttackSpeed:null,
CritChance:null,
RANDOM:2
},
secondary:{
Rucksack:null,
RANDOM:1
},
image:'//media.blizzard.com/d3/icons/items/large/unique_quiver_102_x1_demonhunter_male.png',
flavor:'Designed by a watchmaker\'s apprentice turned demon hunter, this rucksack makes use of its maker\'s knowledge of clockworks to greatly miniaturize the turret mechanism, allowing its owner to field more of the devices without the case being any more heavy or cumbersome than an ordinary quiver.'
},
{
name: "<NAME>",
type: "Quiver",
weight: 100,
hc: false,
season: true,
smartLoot: [
"Demon Hunter"
],
primary:{
Dexterity:null,
AttackSpeed:null,
CritChance:null,
RANDOM:2
},
secondary:{
DeadMans:{
min:50,
max:60
},
RANDOM:1
},
image:'//media.blizzard.com/d3/icons/items/large/p2_unique_quiver_007_demonhunter_male.png',
flavor:'"Marteks would not flee before their superior numbers. He knew the time of his death was at hand. He stood his ground, loosing arrow after arrow as he was slowly overwhelmed." —Tales of the Wasteland'
},
{
name: "<NAME>",
type: "Quiver",
weight: 100,
hc: false,
season: false,
smartLoot: [
"Demon Hunter"
],
primary:{
Dexterity:null,
AttackSpeed:null,
RANDOM:3
},
secondary:{
Emimei:null,
RANDOM:1
},
image:'//media.blizzard.com/d3/icons/items/large/unique_quiver_103_x1_demonhunter_male.png',
flavor:'"Emimei was rumored to be the offspring of the famed fletcher Cirri. Possibly even more talented than her father, she discovered a way to integrate a superior ignition system into demon hunter bola weapons. Exert enough pressure on a bola\'s spike and the explosive charge within triggers, causing the bola to detonate upon impact with a target." —The Craft of War by Diadra the Scholar'
},
{
name: "<NAME>",
type: "Quiver",
weight: 100,
hc: false,
season: false,
smartLoot: [
"Demon Hunter"
],
primary:{
Dexterity:null,
AttackSpeed:null,
CostReduc:null,
RANDOM:2
},
secondary:{
RANDOM:2
},
image:'//media.blizzard.com/d3/icons/items/large/unique_quiver_006_x1_demonhunter_male.png',
flavor:'The quality of these arrows is stunning. Even the fletchers of Jasender would be hard-pressed to match it.'
},
{
name: "<NAME>",
type: "Quiver",
weight: 100,
hc: false,
season: false,
smartLoot: [
"Demon Hunter"
],
primary:{
Dexterity:null,
AttackSpeed:null,
ELEMENTAL:{
min:5,
max:10
},
RANDOM:2
},
secondary:{
RANDOM:2
},
image:'//media.blizzard.com/d3/icons/items/large/unique_quiver_004_x1_demonhunter_male.png',
flavor:'Crafted by the Wandering Priest of the Low Hills during an ecstatic trance, this quiver bestows a blessing on the arrows it carries.'
},
{
name: "<NAME>",
type: "Quiver",
weight: 100,
hc: false,
season: false,
smartLoot: [
"Demon Hunter"
],
primary:{
Dexterity:null,
AttackSpeed:null,
RANDOM:3
},
secondary:{
Meticulous:{
min:30,
max:40
},
RANDOM:1
},
image:'//media.blizzard.com/d3/icons/items/large/unique_quiver_001_p1_demonhunter_male.png',
flavor:'"We must take time to enjoy the good things in life. The death throes of demons, for instance." -<NAME>'
},
{
name: "<NAME>",
type: "Quiver",
weight: 100,
hc: false,
season: false,
smartLoot: [
"Demon Hunter"
],
primary:{
Dexterity:null,
AttackSpeed:null,
CritChance:null,
RANDOM:2
},
secondary:{
RANDOM:2
},
image:'//media.blizzard.com/d3/icons/items/large/unique_quiver_002_x1_demonhunter_male.png',
flavor:'Bolts pulled from this quiver instinctively seek out demonic blood.'
},
{
name: "<NAME>",
type: "Quiver",
weight: 100,
hc: false,
season: false,
smartLoot: [
"Demon Hunter"
],
primary:{
Dexterity:null,
AttackSpeed:null,
RANDOM:3
},
secondary:{
SeethingHatred:{
min:3,
max:4
},
RANDOM:1
},
image:'//media.blizzard.com/d3/icons/items/large/unique_quiver_005_p1_demonhunter_male.png',
flavor:'Seeing their enemies flayed serves only to fuel the thirst for revenge that burns within a demon hunter’s breast.'
},
{
name: "<NAME>",
type: "Quiver",
weight: 100,
hc: false,
season: false,
smartLoot: [
"Demon Hunter"
],
primary:{
Dexterity:null,
AttackSpeed:null,
RANDOM:3
},
secondary:{
NinthCirri:{
min:20,
max:25
},
RANDOM:1
},
image:'//media.blizzard.com/d3/icons/items/large/unique_quiver_101_x1_demonhunter_male.png',
flavor:'"This satchel is cleverly designed so that the heads of arrows placed within fit into slots that serve as whetstones, sharpening the edges whenever an arrow is inserted or removed. Cirri\'s artistry remained unrecognized in his lifetime, so that only this quiver, numbered nine on the interior, survived to the present." —The Craft of War by Diadra the Scholar'
}
];
module.exports = quiver; |
#!/bin/sh
#
#-----------------------BEGIN NOTICE -- DO NOT EDIT-----------------------
# NASA Goddard Space Flight Center Land Information System (LIS) v7.1
#
# Copyright (c) 2015 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#-------------------------END NOTICE -- DO NOT EDIT-----------------------
#
# Script: wget_native_srtm30.sh
#
# Description:
# Downloads all required 2-D tiled-based 30arcsec SRTM v2.1 elevation files
# When all files are fully downloaded/unzipped, the target directory
# containing the files should be about ~2.8 GB.
#
# Sources:
# http://dds.cr.usgs.gov/srtm/version2_1/SRTM30/
# File Date: 8/23/2010
#
# Script Written by: K. Arsenault; Aug 30, 2013
# Updated: 07.04.2014 -- v1.1, David Mocko: Public testcase
# for LIS-7.0
# Updated: 03.04.2017 -- v1.2, K. Arsenault; latest LDT release
# ____________________________________________________________
rundir=$PWD
targetdir=./input/
echo "== Downloading the SRTM-30sec Elevation Tile Files == "
# Change directory to target directory where files are to be downloaded to:
mkdir -p $targetdir
mkdir -p ${targetdir}/SRTM30
cd $targetdir/SRTM30
echo "- Change to target directory: "${targetdir}/SRTM30
# Loop over each gridded tile and download *zip file:
for nstile in n90 n40 s10; do
for wetile in w180 w140 w100 w060 w020 e020 e060 e100 e140; do
wget --no-check-certificate http://dds.cr.usgs.gov/srtm/version2_1/SRTM30/${wetile}${nstile}/${wetile}${nstile}.dem.zip -nv -a ${rundir}/download_srtm30native.log
unzip ${wetile}${nstile}.dem.zip >> ${rundir}/download_srtm30native.log
done
done
# Obtain SRTM30 documentation and version release info:
wget --no-check-certificate http://dds.cr.usgs.gov/srtm/version2_1/SRTM30/srtm30_documentation.pdf -nv -a ${rundir}/download_srtm30native.log
wget --no-check-certificate http://dds.cr.usgs.gov/srtm/version2_1/SRTM30/srtm30_version_history.pdf -nv -a ${rundir}/download_srtm30native.log
echo "== Done downloading SRTM30 tile fields."
|
#include <iostream>
using namespace std;
int main()
{
int arr[] = {-2, 1, 3, -5, 6};
int n = sizeof(arr) / sizeof(arr[0]);
int maxSum = 0;
for (int i = 0; i < n; i++)
{
int sum = 0;
for (int j = i; j < n; j++)
{
sum += arr[j];
maxSum = max(maxSum, sum);
}
}
cout << "Maximum Sum = " << maxSum << endl;
return 0;
} |
#!/usr/bin/bash
logdirroot='/home/r2h2/logs/thermos'
lastlogfp='/var/log/sample_temp/lastlog'
sampling_interval=300
function main() {
while true; do
local todaydir=$(date --iso-8601)
mkdir -p $logdirroot/$todaydir
local fn=$(date --iso-8601=minutes)
fp=$logdirroot/$todaydir/$fn
write_temp
rm $logdirroot/* # remove empty regular files -> not found where created
sleep $sampling_interval
done
}
function write_temp() {
for x in $(ls /sys/bus/w1/devices/ | grep 28); do
echo "$x ""$(cat /sys/bus/w1/devices/w1_bus_master1/${x}/temperature)" >> $fp
done
printf "$fp created with " > $lastlogfp
stat --printf="%s" $fp >> $lastlogfp
printf " bytes\n" >> $lastlogfp
}
main $@
|
python train/train.py \
test-stl-nw-m-db-l-0 \
--experiment-name=test-stl-nw-m-db-l-0 \
--num-env-steps=100000000 \
--algo=ppo \
--use-gae \
--lr=2.5e-4 \
--clip-param=0.1 \
--value-loss-coef=0.5 \
--num-processes=100 \
--eval-num-processes=50 \
--num-steps=500 \
--num-mini-batch=4 \
--log-interval=1 \
--save-interval=10 \
--eval-interval=20 \
--use-linear-lr-decay \
--popart-reward \
--entropy-coef=0.01 \
--gamma=0.999
|
package notes
import (
"context"
"fmt"
"sort"
"time"
)
func Top(ctx context.Context, limit int, notes <-chan Note, less Less) (note <-chan Note, errors <-chan error) {
out := make(chan Note)
errs := make(chan error)
go func() {
defer close(out)
defer close(errs)
slice := collectNotes(ctx, notes)
sortNotes(slice, less, errs)
if len(slice) > limit {
slice = slice[:limit]
}
for _, note := range slice {
out <- note
}
}()
return out, errs
}
func collectNotes(ctx context.Context, notes <-chan Note) []Note {
var collectedNotes []Note
for {
select {
case note, ok := <-notes:
if !ok {
return collectedNotes
}
collectedNotes = append(collectedNotes, note)
case <-ctx.Done():
return nil
}
}
}
func sortNotes(slice []Note, less Less, errs chan<- error) {
sort.Slice(slice, func(i, j int) bool {
l, err := less(slice[i], slice[j])
if err != nil {
errs <- fmt.Errorf("comparing notes failed %s and %s: %v", slice[i].Path(), slice[j].Path(), err)
}
return l
})
}
type Less func(i, j Note) (bool, error)
var ModifiedDesc Less = func(i, j Note) (bool, error) {
firstModified, e := i.Modified()
if e != nil {
return false, e
}
secondModified, e := j.Modified()
if e != nil {
return false, e
}
return firstModified.After(secondModified), nil
}
var ModifiedAsc Less = func(i, j Note) (bool, error) {
firstModified, e := i.Modified()
if e != nil {
return false, e
}
secondModified, e := j.Modified()
if e != nil {
return false, e
}
return firstModified.Before(secondModified), nil
}
var CreatedDesc Less = func(first, second Note) (bool, error) {
firstCreated, e := first.Created()
if e != nil {
return false, e
}
secondCreated, e := second.Created()
if e != nil {
return false, e
}
return firstCreated.After(secondCreated), nil
}
var CreatedAsc Less = func(first, second Note) (bool, error) {
firstCreated, e := first.Created()
if e != nil {
return false, e
}
secondCreated, e := second.Created()
if e != nil {
return false, e
}
return firstCreated.Before(secondCreated), nil
}
func TagDateDesc(name string) Less {
return tagDateLess(name, func(first, second time.Time) bool {
return first.After(second)
})
}
func TagDateAsc(name string) Less {
return tagDateLess(name, func(first, second time.Time) bool {
return first.Before(second)
})
}
func tagDateLess(name string, less func(first, second time.Time) bool) Less {
return func(first, second Note) (bool, error) {
firstTag, found, err := FindTagByName(first, name)
if err != nil || !found {
return false, err
}
secondTag, found, err := FindTagByName(second, name)
if err != nil || !found {
return true, err
}
firstDate, err := firstTag.AbsoluteDate()
if err != nil {
return false, err
}
secondDate, err := secondTag.AbsoluteDate()
if err != nil {
return false, err
}
return less(firstDate, secondDate), nil
}
}
func TagNumberDesc(name string) Less {
return tagNumberLess(name, func(first, second int) bool {
return first > second
})
}
func TagNumberAsc(name string) Less {
return tagNumberLess(name, func(first, second int) bool {
return first < second
})
}
func tagNumberLess(name string, less func(first, second int) bool) Less {
return func(first, second Note) (bool, error) {
firstTag, found, err := FindTagByName(first, name)
if err != nil || !found {
return false, err
}
secondTag, found, err := FindTagByName(second, name)
if err != nil || !found {
return true, err
}
firstNumber, err := firstTag.Number()
if err != nil {
return false, err
}
secondNumber, err := secondTag.Number()
if err != nil {
return false, err
}
return less(firstNumber, secondNumber), nil
}
}
|
#!/bin/sh
if [[ ! -f "/certs/cert.pem" ]]; then
mkdir -p /certs
cd /certs/
generate_cert --cert=ca.pem --key=cakey.pem
hostlist="$(ip a | grep "inet " | sed 's/.*inet \(.*\)\/.*/\1/g' | tr "\n" ",")$(hostname)"
generate_cert --host=${hostlist} --ca=ca.pem --ca-key=cakey.pem --cert=servercert.pem --key=serverkey.pem
fi
export REGISTRY_HTTP_TLS_CERTIFICATE=/certs/servercert.pem
export REGISTRY_HTTP_TLS_KEY=/certs/serverkey.pem
registry serve /etc/docker/registry/config.yml
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.util.Random;
public class TestHadoopArchiveLogs {
private static final long CLUSTER_TIMESTAMP = System.currentTimeMillis();
private static final int FILE_SIZE_INCREMENT = 4096;
private static final byte[] DUMMY_DATA = new byte[FILE_SIZE_INCREMENT];
static {
new Random().nextBytes(DUMMY_DATA);
}
@Test(timeout = 10000)
public void testCheckFiles() throws Exception {
Configuration conf = new Configuration();
HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
FileSystem fs = FileSystem.getLocal(conf);
Path rootLogDir = new Path("target", "logs");
String suffix = "logs";
Path logDir = new Path(rootLogDir,
new Path(System.getProperty("user.name"), suffix));
fs.mkdirs(logDir);
Assert.assertEquals(0, hal.eligibleApplications.size());
ApplicationReport app1 = createAppReport(1); // no files found
ApplicationReport app2 = createAppReport(2); // too few files
Path app2Path = new Path(logDir, app2.getApplicationId().toString());
fs.mkdirs(app2Path);
createFile(fs, new Path(app2Path, "file1"), 1);
hal.minNumLogFiles = 2;
ApplicationReport app3 = createAppReport(3); // too large
Path app3Path = new Path(logDir, app3.getApplicationId().toString());
fs.mkdirs(app3Path);
createFile(fs, new Path(app3Path, "file1"), 2);
createFile(fs, new Path(app3Path, "file2"), 5);
hal.maxTotalLogsSize = FILE_SIZE_INCREMENT * 6;
ApplicationReport app4 = createAppReport(4); // has har already
Path app4Path = new Path(logDir, app4.getApplicationId().toString());
fs.mkdirs(app4Path);
createFile(fs, new Path(app4Path, app4.getApplicationId() + ".har"), 1);
ApplicationReport app5 = createAppReport(5); // just right
Path app5Path = new Path(logDir, app5.getApplicationId().toString());
fs.mkdirs(app5Path);
createFile(fs, new Path(app5Path, "file1"), 2);
createFile(fs, new Path(app5Path, "file2"), 3);
hal.eligibleApplications.add(app1);
hal.eligibleApplications.add(app2);
hal.eligibleApplications.add(app3);
hal.eligibleApplications.add(app4);
hal.eligibleApplications.add(app5);
hal.checkFiles(fs, rootLogDir, suffix);
Assert.assertEquals(1, hal.eligibleApplications.size());
Assert.assertEquals(app5, hal.eligibleApplications.iterator().next());
}
@Test(timeout = 10000)
public void testCheckMaxEligible() throws Exception {
Configuration conf = new Configuration();
HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
ApplicationReport app1 = createAppReport(1);
app1.setFinishTime(CLUSTER_TIMESTAMP - 5);
ApplicationReport app2 = createAppReport(2);
app2.setFinishTime(CLUSTER_TIMESTAMP - 10);
ApplicationReport app3 = createAppReport(3);
app3.setFinishTime(CLUSTER_TIMESTAMP + 5);
ApplicationReport app4 = createAppReport(4);
app4.setFinishTime(CLUSTER_TIMESTAMP + 10);
ApplicationReport app5 = createAppReport(5);
app5.setFinishTime(CLUSTER_TIMESTAMP);
Assert.assertEquals(0, hal.eligibleApplications.size());
hal.eligibleApplications.add(app1);
hal.eligibleApplications.add(app2);
hal.eligibleApplications.add(app3);
hal.eligibleApplications.add(app4);
hal.eligibleApplications.add(app5);
hal.maxEligible = -1;
hal.checkMaxEligible();
Assert.assertEquals(5, hal.eligibleApplications.size());
hal.maxEligible = 4;
hal.checkMaxEligible();
Assert.assertEquals(4, hal.eligibleApplications.size());
Assert.assertFalse(hal.eligibleApplications.contains(app4));
hal.maxEligible = 3;
hal.checkMaxEligible();
Assert.assertEquals(3, hal.eligibleApplications.size());
Assert.assertFalse(hal.eligibleApplications.contains(app3));
hal.maxEligible = 2;
hal.checkMaxEligible();
Assert.assertEquals(2, hal.eligibleApplications.size());
Assert.assertFalse(hal.eligibleApplications.contains(app5));
hal.maxEligible = 1;
hal.checkMaxEligible();
Assert.assertEquals(1, hal.eligibleApplications.size());
Assert.assertFalse(hal.eligibleApplications.contains(app1));
}
@Test(timeout = 10000)
public void testFindAggregatedApps() throws Exception {
MiniYARNCluster yarnCluster = null;
try {
Configuration conf = new Configuration();
conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
yarnCluster =
new MiniYARNCluster(TestHadoopArchiveLogs.class.getSimpleName(), 1,
1, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
conf = yarnCluster.getConfig();
RMContext rmContext = yarnCluster.getResourceManager().getRMContext();
RMAppImpl app1 = (RMAppImpl)createRMApp(1, conf, rmContext,
LogAggregationStatus.DISABLED);
RMAppImpl app2 = (RMAppImpl)createRMApp(2, conf, rmContext,
LogAggregationStatus.FAILED);
RMAppImpl app3 = (RMAppImpl)createRMApp(3, conf, rmContext,
LogAggregationStatus.NOT_START);
RMAppImpl app4 = (RMAppImpl)createRMApp(4, conf, rmContext,
LogAggregationStatus.SUCCEEDED);
RMAppImpl app5 = (RMAppImpl)createRMApp(5, conf, rmContext,
LogAggregationStatus.RUNNING);
RMAppImpl app6 = (RMAppImpl)createRMApp(6, conf, rmContext,
LogAggregationStatus.RUNNING_WITH_FAILURE);
RMAppImpl app7 = (RMAppImpl)createRMApp(7, conf, rmContext,
LogAggregationStatus.TIME_OUT);
rmContext.getRMApps().put(app1.getApplicationId(), app1);
rmContext.getRMApps().put(app2.getApplicationId(), app2);
rmContext.getRMApps().put(app3.getApplicationId(), app3);
rmContext.getRMApps().put(app4.getApplicationId(), app4);
rmContext.getRMApps().put(app5.getApplicationId(), app5);
rmContext.getRMApps().put(app6.getApplicationId(), app6);
rmContext.getRMApps().put(app7.getApplicationId(), app7);
HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
Assert.assertEquals(0, hal.eligibleApplications.size());
hal.findAggregatedApps();
Assert.assertEquals(2, hal.eligibleApplications.size());
} finally {
if (yarnCluster != null) {
yarnCluster.stop();
}
}
}
@Test(timeout = 10000)
public void testGenerateScript() throws Exception {
Configuration conf = new Configuration();
HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
ApplicationReport app1 = createAppReport(1);
ApplicationReport app2 = createAppReport(2);
hal.eligibleApplications.add(app1);
hal.eligibleApplications.add(app2);
File localScript = new File("target", "script.sh");
Path workingDir = new Path("/tmp", "working");
Path remoteRootLogDir = new Path("/tmp", "logs");
String suffix = "logs";
localScript.delete();
Assert.assertFalse(localScript.exists());
hal.generateScript(localScript, workingDir, remoteRootLogDir, suffix);
Assert.assertTrue(localScript.exists());
String script = IOUtils.toString(localScript.toURI());
String[] lines = script.split(System.lineSeparator());
Assert.assertEquals(16, lines.length);
Assert.assertEquals("#!/bin/bash", lines[0]);
Assert.assertEquals("set -e", lines[1]);
Assert.assertEquals("set -x", lines[2]);
Assert.assertEquals("if [ \"$YARN_SHELL_ID\" == \"1\" ]; then", lines[3]);
if (lines[4].contains(app1.getApplicationId().toString())) {
Assert.assertEquals("\tappId=\"" + app1.getApplicationId().toString()
+ "\"", lines[4]);
Assert.assertEquals("\tappId=\"" + app2.getApplicationId().toString()
+ "\"", lines[7]);
} else {
Assert.assertEquals("\tappId=\"" + app2.getApplicationId().toString()
+ "\"", lines[4]);
Assert.assertEquals("\tappId=\"" + app1.getApplicationId().toString()
+ "\"", lines[7]);
}
Assert.assertEquals("\tuser=\"" + System.getProperty("user.name") + "\"",
lines[5]);
Assert.assertEquals("elif [ \"$YARN_SHELL_ID\" == \"2\" ]; then", lines[6]);
Assert.assertEquals("\tuser=\"" + System.getProperty("user.name") + "\"",
lines[8]);
Assert.assertEquals("else", lines[9]);
Assert.assertEquals("\techo \"Unknown Mapping!\"", lines[10]);
Assert.assertEquals("\texit 1", lines[11]);
Assert.assertEquals("fi", lines[12]);
Assert.assertEquals("export HADOOP_CLIENT_OPTS=\"-Xmx1024m\"", lines[13]);
Assert.assertTrue(lines[14].startsWith("export HADOOP_CLASSPATH="));
Assert.assertEquals("\"$HADOOP_HOME\"/bin/hadoop org.apache.hadoop.tools." +
"HadoopArchiveLogsRunner -appId \"$appId\" -user \"$user\" -workingDir "
+ workingDir.toString() + " -remoteRootLogDir " +
remoteRootLogDir.toString() + " -suffix " + suffix, lines[15]);
}
private static ApplicationReport createAppReport(int id) {
ApplicationId appId = ApplicationId.newInstance(CLUSTER_TIMESTAMP, id);
return ApplicationReport.newInstance(
appId,
ApplicationAttemptId.newInstance(appId, 1),
System.getProperty("user.name"),
null, null, null, 0, null, YarnApplicationState.FINISHED, null,
null, 0L, 0L, FinalApplicationStatus.SUCCEEDED, null, null, 100f,
null, null);
}
private static void createFile(FileSystem fs, Path p, long sizeMultiple)
throws IOException {
FSDataOutputStream out = null;
try {
out = fs.create(p);
for (int i = 0 ; i < sizeMultiple; i++) {
out.write(DUMMY_DATA);
}
} finally {
if (out != null) {
out.close();
}
}
}
private static RMApp createRMApp(int id, Configuration conf, RMContext rmContext,
final LogAggregationStatus aggStatus) {
ApplicationId appId = ApplicationId.newInstance(CLUSTER_TIMESTAMP, id);
ApplicationSubmissionContext submissionContext =
ApplicationSubmissionContext.newInstance(appId, "test", "default",
Priority.newInstance(0), null, false, true,
2, Resource.newInstance(10, 2), "test");
return new RMAppImpl(appId, rmContext, conf, "test",
System.getProperty("user.name"), "default", submissionContext,
rmContext.getScheduler(),
rmContext.getApplicationMasterService(),
System.currentTimeMillis(), "test",
null, null) {
@Override
public ApplicationReport createAndGetApplicationReport(
String clientUserName, boolean allowAccess) {
ApplicationReport report =
super.createAndGetApplicationReport(clientUserName, allowAccess);
report.setLogAggregationStatus(aggStatus);
return report;
}
};
}
}
|
<filename>Scripts/js/cmds/view-alias-path/index.js
const { chalk, configPaths, polyfills } = require('../../helpers');
polyfills.load("stringCut");
const parsePath = (path, maxPathSize) => {
if (path.length < maxPathSize) return path;
const max = Math.floor((maxPathSize - 2) / 2);
const head = path.head(max);
const tail = path.tail(max);
return `${head}...${tail}`
}
const writePath = (key, path, maxKeySize, maxRowSize) => {
const tab = " ".repeat(2);
const pKey = `${tab}${chalk.cyan(key)}`
const indent = ' '.repeat(maxKeySize + 1 - key.length)
const pPath = parsePath(path, maxRowSize - maxKeySize - 6);
return `${pKey}: ${indent}${pPath}`
}
function main() {
console.log();
const categories = configPaths.getGroupedByCategories();
const maxKeySize = categories.reduce((catMax, { paths }) => paths.reduce(((pMax, { key }) => Math.max(pMax, key.length)), catMax), 0)
const maxRowSize = process.stdout.columns;
categories.forEach((category) => {
console.log(chalk.green(`[ ${category.name} ]`));
category.paths.forEach(({ path, key }) => {
console.log(writePath(key, path, maxKeySize, maxRowSize))
});
console.log();
})
}
try {
main();
} catch (err) {
console.error(chalk.red(err.message));
process.exit(1);
}
|
<gh_stars>0
import { call, put, takeLatest, all } from 'redux-saga/effects';
import { getRequest } from 'utils/request';
import { object } from 'prop-types';
import {
FETCH_ALPS_CLASS_LIST_SUCCESS,
FETCH_ALPS_CLASS_LIST_FAIL,
FETCH_ALPS_CLASS_STUDENT_LIST_SUCCESS,
FETCH_ALPS_CLASS_STUDENT_LIST_FAIL,
FETCH_ALPS_CLASS_LIST_REQUEST,
FETCH_ALPS_CLASS_STUDENT_LIST_REQUEST,
} from './constants';
export function* fetchAlpsClassListRequestSaga() {
const url = '/account/alms-classes/';
try {
const alpsClassList = yield call(getRequest, { url });
yield put({
type: FETCH_ALPS_CLASS_LIST_SUCCESS,
payload: { alpsClassList },
});
} catch (error) {
yield put({ type: FETCH_ALPS_CLASS_LIST_FAIL, error: error.response });
}
}
export function* fetchAlpsClassStudentListRequestSaga(action) {
const { alpsClassId } = action.payload;
console.log(alpsClassId);
const url = `/account/students/?alms-class=${alpsClassId}`;
try {
let studentList = yield call(getRequest, { url });
// console.log(studentList);
if (studentList.results.length > 0) {
studentList = studentList.results[0].student_users.map(student => {
const o = Object.assign({}, student);
o.full_name = student.last_name + student.first_name;
return o;
});
} else {
studentList = [];
}
yield put({
type: FETCH_ALPS_CLASS_STUDENT_LIST_SUCCESS,
payload: {
studentList,
},
});
} catch (error) {
yield put({
type: FETCH_ALPS_CLASS_STUDENT_LIST_FAIL,
error: error.response,
});
}
}
export default function* rootSaga() {
yield all([
takeLatest(FETCH_ALPS_CLASS_LIST_REQUEST, fetchAlpsClassListRequestSaga),
takeLatest(
FETCH_ALPS_CLASS_STUDENT_LIST_REQUEST,
fetchAlpsClassStudentListRequestSaga,
),
]);
}
|
<reponame>drago2308/WaniKani-Classroom<gh_stars>0
$(document).ready(function(){
size();
/* Search Stuff */
$('.submit-search-button').click(function(){
//Get VARIABLES
var suburb_or_town = $('.form-search-suburb').val();
var property_category = $('.form-search-type').val();
var min_price = $('.form-search-min-price').val();
var max_price = $('.form-search-max-price').val();
var property_id = $('.form-search-property-id-input').val();
if (property_id == ""){
property_id = "undefined";
}
window.location.href = window.location.pathname + "?suburb_or_town=" + suburb_or_town + "&property_category=" + property_category + "&min_price=" + min_price + "&max_price=" + max_price +"&property_id=" + property_id;
});
/* Search Styles */
//
/* Grid stuff */
$('.grid-item').click(function(){
$id = $(this).attr('id');
$id = '#' + $id + '';
console.log($id);
detailClick($id);
});
$('.fixed-close-button').click(function(){
$id = $(this).attr('id');
$id = '#' + $id + '';
console.log($id);
removeDetailClick($id);
});
function detailClick (property_id){
if ($(property_id + '.grid-item').hasClass('open')){
$(property_id + '.grid-item').removeClass('open');
}else {
$(property_id + '.grid-item').addClass('open');
}
}
function removeDetailClick (property_id){
$(property_id + '.fixed-large-property-box-background').css("display", "none");
}
//Added Grid system
// version 1.2
// changed to use container width rather than viewport so that it displays correctly on larger screens
//
//-------------------
//Jquery
// Setup Variables used for knowing the width of the view port and calculating the width of the blocks
// $('.grid-loader').text('Go!');
// $('.grid-loader').fadeOut();
// $('.grid-container').fadeIn();
function size() {
// Viewport width
var container = $('.grid-container').width();
// Breakpoints
var breakpointSmall = 640 - 58;
var breakpointMedium = 800;
var breakpointLarge = 1000;
var breakpointXLarge = 1200;
var breakpointXXLarge = 1600;
var breakpointXXXLarge = 1900;
var currentBreakPoint = "Small";
var oldBreakPoint = "blah";
//Default Block Sizes
var blockSizeG1 = 200;
var blockSizeG2 = 400;
var blockHeight = 300;
var blockHeightQ = 600;
// Default
var blocksPerRow = 5;
//Important Ordering arrays
var eOrder = new Array();
var sizePE = new Array();
//Startup functions (get blocks per row)
blocksPerRowC(container);
// Set the old breakpoint using the current one to beging with
oldBreakPoint = currentBreakPoint;
// Calculate the box sizing
calcBlockSize(container);
// Set the background size to go along with it
setBackgroundSize();
// Resize the actual elements
resize();
// Index all the elements // Important for ordering
// index();
// INTRODUCING THE ARRANGE FUNCTION
arrange();
// Work out how many blocks per row depending on screen size
$( window ).resize(function() {
// Viewport Width
container = $('.grid-container').width();
// Get blocks per row
blocksPerRowC(container);
// Calculate block sizing dependign on the viewport size
calcBlockSize(container);
// Set background size along with it
setBackgroundSize();
// Resize the actual elements
resize();
// INTRODUCING THE ARRANGE FUNCTION
// If a breakpoint has been reached
if (currentBreakPoint != oldBreakPoint){
// Run this Function
breakPointChange();
// Reset the old Breakpoint as the current one because now it is current
oldBreakPoint = currentBreakPoint;
}
});
//Set the placeholders
function setBackgroundSize(){
// Manipulate the background size Is only 4.2 for scaling issue on chrome
$('div.ccm-block-page-list-thumbnail-grid-wrapper').css("background-size", (blockSizeG1 + 10.2) + 'px ' + (blockHeight + 10.2) + 'px');
};
//Figure out how many block are meant per row
function blocksPerRowC(vp){
if (vp <= breakpointSmall){
blocksPerRow = 1;
currentBreakPoint = "Small";
} else if (vp <= breakpointMedium && vp > breakpointSmall){
blocksPerRow = 1;
currentBreakPoint = "Medium";
} else if (vp <= breakpointLarge && vp > breakpointMedium){
blocksPerRow = 2;
currentBreakPoint = "Large";
} else if (vp <= breakpointXLarge && vp > breakpointLarge){
blocksPerRow = 2;
currentBreakPoint = "XLarge";
} else if (vp <= breakpointXXLarge && vp > breakpointXLarge){
blocksPerRow = 3;
currentBreakPoint = "XXLarge";
} else if (vp <= breakpointXXXLarge && vp > breakpointXXLarge){
blocksPerRow = 4;
currentBreakPoint = "XXXLarge";
} else if (vp > breakpointXXXLarge){
blocksPerRow = 5;
currentBreakPoint = "XXXXLarge";
}
console.log(currentBreakPoint);
}
// On break point change
function breakPointChange () {
// Reset The Layout
// reset();
// Arrange the Layout again
// arrange();
}
//Calculate Block Sizes
function calcBlockSize(vp){
//Size of all grid elements without border and padding
var gridSize = (vp - (20*blocksPerRow));
//Single Block Size
blockSizeG1 = gridSize / blocksPerRow;
//Double Block Size
blockSizeG2 = (blockSizeG1 * 2) + 4;
//Heights * 1.3333 3:4 aspect ratio
//blockHeight = (blockSizeG1 * 0.6);
blockHeight = 150;
blockHeightQ = (blockSizeG2 * 1.33333) - 2;
}
function resize() {
//Element interaction is here
$('.grid-item').css("width", blockSizeG1);
$('.grid-item').css("height", blockHeight);
};
// Important indexing of grid for sorting
function index (){
// For each dom with the class ""
$('div.ccm-block-page-list-page-entry-grid-item').each(function(index, element){
var size = 0;
// Set Sizes
if ($(element).hasClass("Single")){ size = 1 };
if ($(element).hasClass("Double")){ size = 2 };
if ($(element).hasClass("Quad")){ size = 2 };
// Store element
eOrder.push(element);
// Store Sizes
sizePE.push(size);
// console.log(index);
});
};
// The arrange function
function arrange (){
// console.log("aranging")
var divRate = blocksPerRow;
// A column counter
var column = 0;
// A row counter
var rowCount = 0;
// Global variable to be use to determin if the next block is to be moved back to an empty spot
var toShiftBack;
// For each item in eOrder (declared above)
$(eOrder).each(function(index, element){
//Find elements size
var size = sizePE[index];
// Add to the column
column += size;
// If size is only 1 column wide
if (size == 1){
if (toShiftBack){
$(element).insertBefore(eOrder[index-1]);
// console.log("element shifted behind " + (index - 1));
rowCount += 1;
// console.log("Passed row " + rowCount);
toShiftBack = false;
}
}
if ($(element).hasClass("Quad")){
if (rowCount == 0){
// console.log("Quad to be insterted first in first row");
$(element).insertBefore(eOrder[0]);
// console.log("Quad moved behind" + 0);
}else{
var place = ((rowCount * divRate) - 1);
// console.log("Quad to be insterted first in other row place: " + place);
$(element).insertBefore(eOrder[place]);
}
}
if (column == divRate){
rowCount += 1;
// console.log("Passed Row " + rowCount);
column = 0;
} else if (column == divRate + 1) {
toShiftBack = true;
// console.log("ready to shift element back")
column -= divRate - 1;
}
lastElement = eOrder[index];
// console.log(column);
});
}
// Finally the reset function
function reset() {
// console.log('reseting');
// Simply goes through each item in eOrder which we indexed before and places the elements in order as they were before arranging effectivly reseting it.
$(eOrder).each(function(index, element){
$(eOrder[index + 1]).insertAfter(element);
});
}
}
});
|
<gh_stars>0
package xyz.brassgoggledcoders.opentransport.api.transporttypes;
import net.minecraft.creativetab.CreativeTabs;
import net.minecraft.entity.Entity;
import xyz.brassgoggledcoders.opentransport.api.blockwrappers.IBlockWrapper;
import javax.annotation.Nonnull;
import java.util.Map;
public interface ITransportType<E extends Entity> {
@Nonnull
String getName();
@Nonnull
Class<E> getBaseEntity();
@Nonnull
CreativeTabs getCreativeTab();
void registerItems(Map<String, IBlockWrapper> blockWrappers);
void registerEntities();
boolean getIsActive();
void setIsActive(boolean isActive);
}
|
package store;
import org.apache.commons.lang3.SerializationUtils;
import utils.StringUtil;
public class MerkleTrie implements Trie{
private Node root;
private DataStore db;
public MerkleTrie(DataStore db){
this.db = db;
root = new Node();
db.put(root.getHash(), root.serialize());
}
public MerkleTrie(DataStore db, byte[] rootHash) {
this.db = db;
root = getNode(rootHash);
}
private Node getNode(byte[] key) {
byte[] branch = db.get(key);
return SerializationUtils.deserialize(branch);
}
private int getIndex(String path, int i) {
return Integer.parseInt(String.valueOf(path.charAt(i)), 16);
}
@Override
public void put(byte[] key, byte[] value) {
String path = StringUtil.bytesToHex(key);
System.out.println("path : " + path);
if(path.length() != 64)
throw new IllegalArgumentException("key must be able to transform to 32 bytes hex string");
root = put(value, root, path, 0);
}
private Node put(byte[] value, Node root, String path, int pointer) {
// if this loop is end of input string,
if(pointer > 63) {
// create leaf
System.out.println("creating leaf");
Node node = new Node(value);
db.put(node.getHash(), node.serialize());
System.out.println("added leaf to db: " + node);
return node;
}
int index = getIndex(path, pointer);
if(root.getChild(index) == null) {
// create new branch if not exist
Node node = new Node();
db.put(node.getHash(), node.serialize());
root.addChild(index, node.getHash());
}
// if branch exist, retrieve node from db and do recursive
Node node = getNode(root.getChild(index));
Node node1 = put(value, node, path, pointer + 1);
// update state and db recursively after updating state
root.addChild(index, node1.getHash());
db.put(root.getHash(), root.serialize());
return root;
}
@Override
public byte[] get(byte[] key) {
String path = StringUtil.bytesToHex(key);
Node node = root;
if(path.length() != 64)
throw new IllegalArgumentException("key must be able to transform to 32 bytes hex string");
for (int i = 0; i < 64; i++) {
int index = getIndex(path, i);
if(node.getChild(index) == null){
return null;
}
node = getNode(node.getChild(index));
}
return node.getValue();
}
@Override
public byte[] getRootHash() {
return root.getHash();
}
@Override
public void setRoot(byte[] root) {
this.root = getNode(root);
}
}
|
<reponame>GeneralNZR/maths-and-javascript
/**
* Différentes fonctions pour manipuler des matrices.
* @author <NAME>
* @version 1.0
*/
/** *
* @description Matrice identité.
* @param {number} n - Dimension de la matrice.
* @return {Array} La matrice identité.
*/
const matriceIdentite = (n) => {
let matrice = [];
for (let i = 0; i < n; i++) {
matrice[i] = [];
for (let j = 0; j < n; j++) {
matrice[i][j] = (i == j) ? 1 : 0;
}
}
return matrice;
};
/**
* @description Vérifie la dimension d'une matrice.
* @param {Array} matrice - La matrice à vérifier.
* @param {Number} n - La dimension attendue.
* @return {boolean} - True si la matrice est valide, false sinon.
*/
const dimensionMatrice = (matrice, n) => (matrice.length == n && matrice[0].length == n);
/* ---------------- *
* Travail en cours *
* ---------------- */
// Tests
console.table({
'matriceIdentite(3)': matriceIdentite(3),
"dimensionMatrice([[0, 0, 0], [0, 0, 0], [0, 0, 0]], 3)": dimensionMatrice([[0, 0, 0], [0, 0, 0], [0, 0, 0]], 3),
}) |
<reponame>cstoquer/rtc-cars
var express = require('express');
var http = require('http');
var io = require('socket.io');
var app = express();
app.set('port', process.env.PORT || 3000);
app.use(express.favicon());
app.use('/', express.static(process.cwd() + '/www'));
//█████████████████████████████████████████████
//█████████████████████████████████████████████
//█▀▄▄▄▄ █▀▄▄▄▄▀█▄ ▀▄▄▄█▄ ▄██▄ ▄█▀▄▄▄▄▀█▄ ▀▄▄▄█
//██▄▄▄▄▀█ ▄▄▄▄▄██ ███████ ██ ███ ▄▄▄▄▄██ █████
//█ ▀▀▀▀▄█▄▀▀▀▀▀█▀ ▀▀▀█████ ████▄▀▀▀▀▀█▀ ▀▀▀██
//█████████████████████████████████████████████
var server = http.createServer(app);
server.listen(app.get('port'), function(){
console.log('Express server listening on port ' + app.get('port'));
});
//████████████████████████████████████████████████████
//████████████████████▄░████████████▀████████▄████████
//██▀▄▄▄░█▀▄▄▄▀█▀▄▄▀░██░█▄░▄█▀▄▄▄▀█▄░▄▄█████▄░██▀▄▄▄▀█
//███▄▄▄▀█░███░█░██████░▄░███░▄▄▄▄██░████████░██░███░█
//██░▀▀▀▄█▄▀▀▀▄█▄▀▀▀▄█▀░██░▀█▄▀▀▀▀██▄▀▀▄█░░█▀░▀█▄▀▀▀▄█
//████████████████████████████████████████████████████
var sockets = {};
var sock = io.listen(server);
sock.set('log level', 1);
sock.sockets.on('connection', function (socket) {
// adding socket in pool
var clientId = socket.id;
sockets[clientId] = socket;
// broadcast
socket.on('broadcast', function (data) {
// TODO: we can use broadcast function available in socket.io
data = data || {};
if (!data.from) data.from = clientId;
for (var id in sockets) {
sockets[id].emit('message', data);
}
});
// simple message
socket.on('message', function (data) {
data = data || {};
destination = sockets[data.to];
if (!destination) return console.log('No client with socket id ' + data.to);
if (!data.from) data.from = clientId;
destination.emit('message', data);
});
// TODO: disconnection
socket.on('disconnect', function () {
delete sockets[clientId];
for (var id in sockets) {
sockets[id].emit('disconnection', { from: clientId });
}
});
});
|
package com.acgist.snail.net.application;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.acgist.snail.config.SystemConfig;
import com.acgist.snail.net.TcpClient;
import com.acgist.snail.pojo.message.ApplicationMessage;
import com.acgist.snail.utils.NetUtils;
/**
* <p>系统客户端</p>
*
* @author acgist
*/
public final class ApplicationClient extends TcpClient<ApplicationMessageHandler> {
private static final Logger LOGGER = LoggerFactory.getLogger(ApplicationClient.class);
private ApplicationClient() {
super("Application Client", SystemConfig.CONNECT_TIMEOUT, new ApplicationMessageHandler());
}
public static final ApplicationClient newInstance() {
return new ApplicationClient();
}
@Override
public boolean connect() {
return this.connect(NetUtils.LOOPBACK_HOST_ADDRESS, SystemConfig.getServicePort());
}
/**
* <p>发送系统消息</p>
*
* @param message 系统消息
*/
public void send(ApplicationMessage message) {
this.handler.send(message);
}
/**
* <p>唤醒主窗口</p>
* <p>向已经启动的系统实例发送唤醒消息</p>
*/
public static final void notifyWindow() {
final ApplicationClient client = ApplicationClient.newInstance();
try {
final boolean success = client.connect();
if(success) {
client.send(ApplicationMessage.message(ApplicationMessage.Type.NOTIFY));
client.send(ApplicationMessage.message(ApplicationMessage.Type.CLOSE));
}
} catch (Exception e) {
LOGGER.error("唤醒主窗口异常", e);
} finally {
client.close();
}
}
}
|
<reponame>schnappischnap/Advent-of-Code-2016<gh_stars>0
def dragon_curve(s):
b = "".join("0" if c == "1" else "1" for c in s[::-1])
return s + "0" + b
def checksum(s):
output = ""
for i in range(0, len(s), 2):
output += "1" if s[i] == s[i+1] else "0"
if len(output) % 2 == 1:
return output
else:
return checksum(output)
def solve(input, length):
while len(input) < length:
input = dragon_curve(input)
input = input[:length]
return checksum(input)
if __name__ == '__main__':
with open("day_16_input.txt") as f:
input = f.read()
print "Part 1 answer: " + str(solve(input, 272))
print "Part 2 answer: " + str(solve(input,35651584)) |
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License.
*/
/**
* Defines constants that can be used in the processing of speech interactions.
*/
export declare class SpeechConstants {
/**
* The xml tag structure to indicate an empty speak tag, to be used in the 'speak' property of an Activity. When set this indicates to the channel that speech should not be generated.
*/
static readonly EmptySpeakTag: string;
}
//# sourceMappingURL=speechConstants.d.ts.map
|
#!/bin/sh
cd /home/pi/Documents/escapetools/doublecountdown/
npm start
# | chromium-browser --noerrdialogs --kiosk http://localhost:3000/player/green
|
#############################################################################
# Bash script using Azure CLI to create a document in CosmosDB
# Docs: https://docs.microsoft.com/en-us/rest/api/cosmos-db/create-a-document
#############################################################################
comsosDbInstanceName=$1
masterKey=$2
dbName="maindb"
containerName="data"
partitionKeyName="itemType"
# If TRUE provided document can be created or updated automatically.
# If FALSE and an existing "id" is provided, there will be an error.
isUpsert=true
# JSON data to upload is stored in a file.
# Notes:
# - The "id" property is always required and must not be empty. If you don't have an ID, set it to a GUID.
# - When using the REST API, there must also be a property matching the partition key name with a value.
baseUrl="https://$comsosDbInstanceName.documents.azure.com/"
verb="post"
resourceType="docs"
resourceLink="dbs/$dbName/colls/$containerName/docs"
resourceId="dbs/$dbName/colls/$containerName"
# URIs together with required parameter values can be found at: https://docs.microsoft.com/en-us/rest/api/cosmos-db/cosmosdb-resource-uri-syntax-for-rest
# CosmosDB REST API requires a hashed authorization header: https://docs.microsoft.com/de-de/rest/api/cosmos-db/access-control-on-cosmosdb-resources#authorization-header
# To get date in HTTP format, locale must be set to US. Otherwise day names would be localized (to German, for example)
# HTTP format is not directly supported by bash. To make it work, set the current timezone to GMT.
now=$(env LANG=en_US TZ=GMT date '+%a, %d %b %Y %T %Z')
echo "Date: " $now
# Concat verb, resource type, resource ID and date in the expected format. REST API expects the signature to be lowercase.
# The "little" problem I was not aware of: trailing newlines (`\n`) are always truncated when outputting a string.
# This would break the hash, because CosmosDB expects them to be there. That's why the two trailing newlines are appended back after the lowercase operation.
signature="$(printf "%s" "$verb\n$resourceType\n$resourceId\n$now" | tr '[A-Z]' '[a-z]')\n\n"
echo "Signature: $signature"
# Calculate a hash of the signature using the primary key of the CosmosDB instance.
# See https://superuser.com/questions/1546027/what-is-the-openssl-equivalent-of-this-given-c-hashing-code/1546036 for details on why
# this is so tricky.
hexKey=$(printf "$masterKey" | base64 --decode | hexdump -v -e '/1 "%02x"')
echo "Hex key: " $hexKey
hashedSignature=$(printf "$signature" | openssl dgst -sha256 -mac hmac -macopt hexkey:$hexKey -binary | base64)
echo "Hashed signature: $hashedSignature"
# Build the authorization header using the format "type={typeoftoken}&ver={tokenversion}&sig={hashsignature}"
authString="type=master&ver=1.0&sig=$hashedSignature"
echo "Auth string: $authString"
# Auth string is expected to be URL encoded. But of course there's no built-in way in bash to do that. Geez.
# This is not a full base64 encoding but instead only changes the characters we may see: = -> %3d, & -> %26, + => %2b, / => %2f
urlEncodedAuthString=$(printf "$authString" | sed 's/=/%3d/g' | sed 's/&/%26/g' | sed 's/+/%2b/g' | sed 's/\//%2f/g')
echo "URL encoded auth string: $urlEncodedAuthString"
# Make the API call by combining base URL and resource link.
url="$baseUrl$resourceLink"
echo "URL: $url"
partitionKeyValue="default"
for prodId in {1..10}
do
echo "Creating product data $prodId..."
documentJson="sampleproduct$prodId.json"
wget -O $documentJson https://raw.githubusercontent.com/MicrosoftDocs/mslearn-live-azure-fundamentals/master/scripts/sampledata/$documentJson
az rest --verbose -m $verb -b "@$documentJson" -u $url --headers x-ms-date="$now" x-ms-documentdb-partitionkey=[\"$partitionKeyValue\"] x-ms-documentdb-is-upsert=$isUpsert x-ms-version=2018-12-31 x-ms-documentdb-isquery=true Content-Type=application/json Authorization=$urlEncodedAuthString
done |
class MovesAppOAuthDisconnect(views.OAuthDisconnectView):
"""
View that handles the disconnect of a Moves app account.
"""
client_class = MovesAppClient # Set the client_class attribute to the MovesAppClient class
setup_url = SETUP_URL_NAME # Define and set the setup_url variable to the value of SETUP_URL_NAME |
using System;
internal interface ILugar
{
string Direccion { get; set; }
}
internal class Lugar : ILugar
{
public string Direccion { get; set; }
public Lugar()
{
Direccion = string.Empty;
}
public void SetAddress(string address)
{
Direccion = address;
}
public void UpdateAddress(string newAddress)
{
Direccion = newAddress;
}
public string RetrieveAddress()
{
return Direccion;
}
public void DisplayAddressDetails()
{
Console.WriteLine($"Address: {Direccion}");
}
}
class Program
{
static void Main()
{
Lugar location1 = new Lugar();
location1.SetAddress("123 Main Street");
location1.DisplayAddressDetails();
Lugar location2 = new Lugar();
location2.SetAddress("456 Elm Street");
location2.DisplayAddressDetails();
location2.UpdateAddress("789 Oak Avenue");
location2.DisplayAddressDetails();
}
} |
//! Constructor
template <unsigned Tdim>
mpm::MPMSchemeNewmark<Tdim>::MPMSchemeNewmark(
const std::shared_ptr<mpm::Mesh<Tdim>>& mesh, double dt)
: mpm::MPMScheme<Tdim>(mesh, dt) {}
//! Initialize nodes, cells and shape functions
template <unsigned Tdim>
inline void mpm::MPMSchemeNewmark<Tdim>::initialise() {
#pragma omp parallel sections
{
// Spawn a task for initialising nodes and cells
#pragma omp section
{
// Initialise nodes
mesh_->iterate_over_nodes(std::bind(
&mpm::NodeBase<Tdim>::initialise_implicit, std::placeholders::_1));
mesh_->iterate_over_cells(
std::bind(&mpm::Cell<Tdim>::activate_nodes, std::placeholders::_1));
}
// Spawn a task for particles
#pragma omp section
{
// Iterate over each particle to compute shapefn
mesh_->iterate_over_particles(std::bind(
&mpm::ParticleBase<Tdim>::compute_shapefn, std::placeholders::_1));
}
} // Wait to complete
}
//! Compute nodal kinematics - map mass, momentum and inertia to nodes
template <unsigned Tdim>
inline void mpm::MPMSchemeNewmark<Tdim>::compute_nodal_kinematics(
unsigned phase) {
// Assign mass, momentum and inertia to nodes
mesh_->iterate_over_particles(
std::bind(&mpm::ParticleBase<Tdim>::map_mass_momentum_inertia_to_nodes,
std::placeholders::_1));
#ifdef USE_MPI
// Run if there is more than a single MPI task
if (mpi_size_ > 1) {
// MPI all reduce nodal mass
mesh_->template nodal_halo_exchange<double, 1>(
std::bind(&mpm::NodeBase<Tdim>::mass, std::placeholders::_1, phase),
std::bind(&mpm::NodeBase<Tdim>::update_mass, std::placeholders::_1,
false, phase, std::placeholders::_2));
// MPI all reduce nodal momentum
mesh_->template nodal_halo_exchange<Eigen::Matrix<double, Tdim, 1>, Tdim>(
std::bind(&mpm::NodeBase<Tdim>::momentum, std::placeholders::_1, phase),
std::bind(&mpm::NodeBase<Tdim>::update_momentum, std::placeholders::_1,
false, phase, std::placeholders::_2));
// MPI all reduce nodal inertia
mesh_->template nodal_halo_exchange<Eigen::Matrix<double, Tdim, 1>, Tdim>(
std::bind(&mpm::NodeBase<Tdim>::inertia, std::placeholders::_1, phase),
std::bind(&mpm::NodeBase<Tdim>::update_inertia, std::placeholders::_1,
false, phase, std::placeholders::_2));
}
#endif
// Compute nodal velocity and acceleration
mesh_->iterate_over_nodes_predicate(
std::bind(&mpm::NodeBase<Tdim>::compute_velocity_acceleration,
std::placeholders::_1),
std::bind(&mpm::NodeBase<Tdim>::status, std::placeholders::_1));
}
//! Update nodal kinematics by Newmark scheme
template <unsigned Tdim>
inline void mpm::MPMSchemeNewmark<Tdim>::update_nodal_kinematics_newmark(
unsigned phase, double newmark_beta, double newmark_gamma) {
// Update nodal velocity and acceleration
mesh_->iterate_over_nodes_predicate(
std::bind(&mpm::NodeBase<Tdim>::update_velocity_acceleration_newmark,
std::placeholders::_1, phase, newmark_beta, newmark_gamma, dt_),
std::bind(&mpm::NodeBase<Tdim>::status, std::placeholders::_1));
}
//! Compute stress and strain by Newmark scheme
template <unsigned Tdim>
inline void mpm::MPMSchemeNewmark<Tdim>::compute_stress_strain(
unsigned phase, bool pressure_smoothing) {
// Iterate over each particle to calculate strain using nodal displacement
mesh_->iterate_over_particles(std::bind(
&mpm::ParticleBase<Tdim>::compute_strain_newmark, std::placeholders::_1));
// Pressure smoothing
if (pressure_smoothing) this->pressure_smoothing(phase);
// Iterate over each particle to compute stress
mesh_->iterate_over_particles(std::bind(
&mpm::ParticleBase<Tdim>::compute_stress_newmark, std::placeholders::_1));
}
//! Precompute stresses and strains
template <unsigned Tdim>
inline void mpm::MPMSchemeNewmark<Tdim>::precompute_stress_strain(
unsigned phase, bool pressure_smoothing) {}
//! Postcompute stresses and strains
template <unsigned Tdim>
inline void mpm::MPMSchemeNewmark<Tdim>::postcompute_stress_strain(
unsigned phase, bool pressure_smoothing) {
this->compute_stress_strain(phase, pressure_smoothing);
}
// Compute forces
template <unsigned Tdim>
inline void mpm::MPMSchemeNewmark<Tdim>::compute_forces(
const Eigen::Matrix<double, Tdim, 1>& gravity, unsigned phase,
unsigned step, bool concentrated_nodal_forces) {
// Spawn a task for external force
#pragma omp parallel sections
{
#pragma omp section
{
// Iterate over each particle to compute nodal body force
mesh_->iterate_over_particles(
std::bind(&mpm::ParticleBase<Tdim>::map_body_force,
std::placeholders::_1, gravity));
// Iterate over each particle to compute nodal inertial force
mesh_->iterate_over_particles(std::bind(
&mpm::ParticleBase<Tdim>::map_inertial_force, std::placeholders::_1));
// Apply particle traction and map to nodes
mesh_->apply_traction_on_particles(step * dt_);
// Iterate over each node to add concentrated node force to external
// force
if (concentrated_nodal_forces)
mesh_->iterate_over_nodes(
std::bind(&mpm::NodeBase<Tdim>::apply_concentrated_force,
std::placeholders::_1, phase, (step * dt_)));
}
#pragma omp section
{
// Spawn a task for internal force
// Iterate over each particle to compute nodal internal force
mesh_->iterate_over_particles(std::bind(
&mpm::ParticleBase<Tdim>::map_internal_force, std::placeholders::_1));
}
} // Wait for tasks to finish
}
// Update particle kinematics
template <unsigned Tdim>
inline void mpm::MPMSchemeNewmark<Tdim>::compute_particle_kinematics(
bool velocity_update, unsigned phase, const std::string& damping_type,
double damping_factor) {
// Iterate over each particle to compute updated position
mesh_->iterate_over_particles(
std::bind(&mpm::ParticleBase<Tdim>::compute_updated_position_newmark,
std::placeholders::_1, dt_));
}
// Update particle stress, strain and volume
template <unsigned Tdim>
inline void
mpm::MPMSchemeNewmark<Tdim>::update_particle_stress_strain_volume() {
// Iterate over each particle to update particle stress and strain
mesh_->iterate_over_particles(std::bind(
&mpm::ParticleBase<Tdim>::update_stress_strain, std::placeholders::_1));
// Iterate over each particle to update particle volume
mesh_->iterate_over_particles(std::bind(
&mpm::ParticleBase<Tdim>::update_volume, std::placeholders::_1));
}
//! Postcompute nodal kinematics - map mass and momentum to nodes
template <unsigned Tdim>
inline void mpm::MPMSchemeNewmark<Tdim>::postcompute_nodal_kinematics(
unsigned phase) {}
//! Stress update scheme
template <unsigned Tdim>
inline std::string mpm::MPMSchemeNewmark<Tdim>::scheme() const {
return "Newmark";
}
|
class AirflowNetworkDistributionComponentCoil:
def __init__(self):
# Set validation level to error
pyidf.validation_level = ValidationLevel.error
# Initialize attributes
self.coil_name = "object-list|Coil Name"
self.coil_object_type = "Coil:Cooling:DX:SingleSpeed"
self.air_path_length = 0.0001 |
#!/usr/bin/env bash
python manage.py collectstatic --settings=config.settings.docker |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from enumfields import Enum, EnumIntegerField
from jsonfield import JSONField
from shuup.core.fields import InternalIdentifierField
from shuup.core.modules import ModuleInterface
from shuup.utils.analog import define_log_model
from ._base import ShuupModel
class SupplierType(Enum):
INTERNAL = 1
EXTERNAL = 2
class Labels:
INTERNAL = _('internal')
EXTERNAL = _('external')
@python_2_unicode_compatible
class Supplier(ModuleInterface, ShuupModel):
default_module_spec = "shuup.core.suppliers:BaseSupplierModule"
module_provides_key = "supplier_module"
identifier = InternalIdentifierField(unique=True)
name = models.CharField(verbose_name=_("name"), max_length=64)
type = EnumIntegerField(SupplierType, verbose_name=_("supplier type"), default=SupplierType.INTERNAL)
stock_managed = models.BooleanField(verbose_name=_("stock managed"), default=False)
module_identifier = models.CharField(max_length=64, blank=True, verbose_name=_('module'))
module_data = JSONField(blank=True, null=True, verbose_name=_("module data"))
def __str__(self):
return self.name
def get_orderability_errors(self, shop_product, quantity, customer):
"""
:param shop_product: Shop Product
:type shop_product: shuup.core.models.ShopProduct
:param quantity: Quantity to order
:type quantity: decimal.Decimal
:param contect: Ordering contact.
:type contect: shuup.core.models.Contact
:rtype: iterable[ValidationError]
"""
return self.module.get_orderability_errors(shop_product=shop_product, quantity=quantity, customer=customer)
def get_stock_statuses(self, product_ids):
"""
:param product_ids: Iterable of product IDs
:return: Dict of {product_id: ProductStockStatus}
:rtype: dict[int, shuup.core.stocks.ProductStockStatus]
"""
return self.module.get_stock_statuses(product_ids)
def get_stock_status(self, product_id):
"""
:param product_id: Product ID
:type product_id: int
:rtype: shuup.core.stocks.ProductStockStatus
"""
return self.module.get_stock_status(product_id)
def get_suppliable_products(self, shop, customer):
"""
:param shop: Shop to check for suppliability
:type shop: shuup.core.models.Shop
:param customer: Customer contact to check for suppliability
:type customer: shuup.core.models.Contact
:rtype: list[int]
"""
return [
shop_product.pk
for shop_product
in self.shop_products.filter(shop=shop)
if shop_product.is_orderable(self, customer, shop_product.minimum_purchase_quantity)
]
def adjust_stock(self, product_id, delta, created_by=None, type=None):
from shuup.core.suppliers.base import StockAdjustmentType
adjustment_type = type or StockAdjustmentType.INVENTORY
return self.module.adjust_stock(product_id, delta, created_by=created_by, type=adjustment_type)
def update_stock(self, product_id):
return self.module.update_stock(product_id)
def update_stocks(self, product_ids):
return self.module.update_stocks(product_ids)
SupplierLogEntry = define_log_model(Supplier) |
<reponame>Nelias/smashing-ui<filename>stories/badge.stories.js<gh_stars>0
import React from 'react'
import {storiesOf, addDecorator} from '@storybook/react'
import {Badge} from '@smashing/badge'
import {withA11y} from '@storybook/addon-a11y'
import {SmashingThemeProvider} from '@smashing/theme'
addDecorator(withA11y)
storiesOf('Core|Badge', module)
.addDecorator(story => (
<SmashingThemeProvider
theme={{
defaults: {
alert: {
hasTrim: true,
hasIcon: true
}
}
}}
>
{story()}
</SmashingThemeProvider>
))
.add('appearance:default', () => (
<React.Fragment>
<Badge color="green" marginLeft={8}>
Green
</Badge>
<Badge color="red" marginLeft={8}>
Red
</Badge>
<Badge color="blue" marginLeft={8}>
Blue
</Badge>
<Badge color="teal" marginLeft={8}>
Teal
</Badge>
</React.Fragment>
))
.add('appearance:solid', () => (
<React.Fragment>
<Badge color="green" marginLeft={8} appearance="solid">
Green
</Badge>
<Badge color="red" marginLeft={8} appearance="solid">
Red
</Badge>
<Badge color="blue" marginLeft={8} appearance="solid">
Blue
</Badge>
<Badge color="teal" marginLeft={8} appearance="solid">
Teal
</Badge>
</React.Fragment>
))
|
def findMaxElement(arr):
max_element = arr[0]
for i in range(1, len(arr)):
if arr[i] > max_element:
max_element = arr[i]
return max_element
result = findMaxElement([2, 3, 5, 4, 9])
print(result) |
<gh_stars>0
package de.unibi.agbi.biodwh2.dgidb;
import de.unibi.agbi.biodwh2.core.DataSource;
import de.unibi.agbi.biodwh2.core.etl.GraphExporter;
import de.unibi.agbi.biodwh2.core.etl.Parser;
import de.unibi.agbi.biodwh2.core.etl.RDFExporter;
import de.unibi.agbi.biodwh2.core.etl.Updater;
import de.unibi.agbi.biodwh2.dgidb.etl.DGIdbGraphExporter;
import de.unibi.agbi.biodwh2.dgidb.etl.DGIdbParser;
import de.unibi.agbi.biodwh2.dgidb.etl.DGIdbRDFExporter;
import de.unibi.agbi.biodwh2.dgidb.etl.DGIdbUpdater;
public class DGIdbDataSource extends DataSource {
@Override
public String getId() {
return "DGIdb";
}
@Override
public Updater getUpdater() {
return new DGIdbUpdater();
}
@Override
public Parser getParser() {
return new DGIdbParser();
}
@Override
public RDFExporter getRdfExporter() {
return new DGIdbRDFExporter();
}
@Override
public GraphExporter getGraphExporter() {
return new DGIdbGraphExporter();
}
}
|
#!/bin/bash
cd "$(dirname "$0")"
cd ..
###################################################################################
### WINDOW SETUP
###################################################################################
i3-msg "workspace 7" &>/dev/null
i3-msg "split h" &>/dev/null
sleep 0.1
i3-msg "kill" &>/dev/null && sleep 0.1
i3-msg "kill" &>/dev/null && sleep 0.1
i3-msg "kill" &>/dev/null && sleep 0.1
i3-msg "kill" &>/dev/null && sleep 0.1
i3-msg "kill" &>/dev/null && sleep 0.1
i3-msg "kill" &>/dev/null && sleep 0.1
terminator &>/dev/null &
sleep 0.5
D=$(xdotool search --pid $! | tail -n1)
terminator &>/dev/null &
sleep 0.5
N1=$(xdotool search --pid $! | tail -n1)
terminator &>/dev/null &
sleep 0.5
N2=$(xdotool search --pid $! | tail -n1)
i3-msg "focus left" &>/dev/null
i3-msg "focus left" &>/dev/null
i3-msg "split v" &>/dev/null
terminator &>/dev/null &
sleep 0.5
N3=$(xdotool search --pid $! | tail -n1)
i3-msg "focus right" &>/dev/null
i3-msg "split v" &>/dev/null
terminator &>/dev/null &
sleep 0.5
N4=$(xdotool search --pid $! | tail -n1)
i3-msg "focus right" &>/dev/null
i3-msg "split v" &>/dev/null
terminator &>/dev/null &
sleep 0.5
N5=$(xdotool search --pid $! | tail -n1)
i3-msg "workspace back_and_forth" &>/dev/null
###################################################################################
### MAIN SCRIPT
###################################################################################
read -p "Press enter to start contract deployment..."
i3-msg "[id=$D] focus" &> /dev/null
xdotool type 'python -m ethdkg deploy'
xdotool key Return
i3-msg "workspace back_and_forth" &>/dev/null
read -p "Press enter to start nodes... "
CONTRACT=$(cat ./logs/deployment.log | head -n8 | tail -n1 | cut -d" " -f8)
echo "Contract address: $CONTRACT"
i3-msg "[id=$N1] focus" &> /dev/null
xdotool type "python -m ethdkg run $CONTRACT --account-index 1"
xdotool key Return
i3-msg "[id=$N2] focus" &> /dev/null
xdotool type "python -m ethdkg run $CONTRACT --account-index 2"
xdotool key Return
i3-msg "[id=$N3] focus" &> /dev/null
xdotool type "python -m ethdkg run $CONTRACT --account-index 3"
xdotool key Return
i3-msg "[id=$D] focus" &> /dev/null
xdotool type 'ipython -i -c "from ethdkg import utils"'
xdotool key Return
xdotool type "contract = utils.get_contract(\"ETHDKG\", \"$CONTRACT\")"
xdotool key Return
i3-msg "workspace back_and_forth" &>/dev/null
read -p "Press enter to start adversarial nodes... "
i3-msg "[id=$N4] focus" &> /dev/null
xdotool type "python -m ethdkg run $CONTRACT --account-index 4 --send-invalid-shares 0"
xdotool key Return
i3-msg "[id=$N5] focus" &> /dev/null
xdotool type "python -m ethdkg run $CONTRACT --account-index 5 --abort-on-key-share-submission"
xdotool key Return
|
#!/usr/bin/env bash
# 1. Parse command line arguments
# 2. cd to the test directory
# 3. run tests
# 4. Print summary of successes and failures, exit with 0 if
# all tests pass, else exit with 1
# Uncomment the line below if you want more debugging information
# about this script.
#set -x
# The name of this test script
this_program_name="biodemo2-test.sh"
# The program we want to test (either a full path to an executable, or the name of an executable in $PATH)
test_program=""
# Directory containing the test data files and expected outputs
test_data_dir=""
# Number of failed test cases
num_errors=0
# Total number of tests run
num_tests=0
function show_help {
cat << UsageMessage
${this_program_name}: run integration/regression tests for biodemo2
Usage:
${this_program_name} [-h] [-v] -p program -d test_data_dir
Example:
${this_program_name} -p bin/biodemo2 -d data/tests
-h shows this help message
-v verbose output
UsageMessage
}
# echo an error message $1 and exit with status $2
function exit_with_error {
printf "${this_program_name}: ERROR: $1\n"
exit $2
}
# if -v is specified on the command line, print a more verbaose message to stdout
function verbose_message {
if [ "${verbose}" = true ]; then
echo "${this_program_name} $1"
fi
}
# Parse the command line arguments and set the global variables program and test_data_dir
function parse_args {
local OPTIND opt
while getopts "hp:d:v" opt; do
case "${opt}" in
h)
show_help
exit 0
;;
p) test_program="${OPTARG}"
;;
d) test_data_dir="${OPTARG}"
;;
v) verbose=true
;;
esac
done
shift $((OPTIND-1))
[ "$1" = "--" ] && shift
if [[ -z ${test_program} ]]; then
exit_with_error "missing command line argument: -p program, use -h for help" 2
fi
if [[ -z ${test_data_dir} ]]; then
exit_with_error "missing command line argument: -d test_data_dir, use -h for help" 2
fi
}
# Run a command and check that the output is
# exactly equal the contents of a specified file
# ARG1: command we want to test as a string
# ARG2: a file path containing the expected output
# ARG3: expected exit status
function test_stdout_exit {
let num_tests+=1
output=$(eval $1)
exit_status=$?
expected_output_file=$2
expected_exit_status=$3
verbose_message "Testing stdout and exit status: $1"
difference=$(diff <(echo "$output") $expected_output_file)
if [ -n "$difference" ]; then
let num_errors+=1
echo "Test output failed: $1"
echo "Actual output:"
echo "$output"
expected_output=$(cat $2)
echo "Expected output:"
echo "$expected_output"
echo "Difference:"
echo "$difference"
elif [ "$exit_status" -ne "$expected_exit_status" ]; then
let num_errors+=1
echo "Test exit status failed: $1"
echo "Actual exit status: $exit_status"
echo "Expected exit status: $expected_exit_status"
fi
}
# Run a command and check that the exit status is
# equal to an expected value
# exactly equal the contents of a specified file
# ARG1: command we want to test as a string
# ARG2: expected exit status
# NB: this is mostly for checking erroneous conditions, where the
# exact output message is not crucial, but the exit status is
# important
function test_exit_status {
let num_tests+=1
output=$(eval $1)
exit_status=$?
expected_exit_status=$2
verbose_message "Testing exit status: $1"
if [ "$exit_status" -ne "$expected_exit_status" ]; then
let num_errors+=1
echo "Test exit status failed: $1"
echo "Actual exit status: $exit_status"
echo "Expected exit status: $expected_exit_status"
fi
}
# 1. Parse command line arguments.
parse_args $@
# 2. Change to test directory
cd $test_data_dir
# 2. Run tests
test_stdout_exit "$test_program one_sequence.fasta" one_sequence.fasta.expected 0
test_stdout_exit "$test_program two_sequence.fasta" two_sequence.fasta.expected 0
test_stdout_exit "$test_program --minlen 200 two_sequence.fasta" two_sequence.fasta.minlen_200.expected 0
test_stdout_exit "$test_program --minlen 200 < two_sequence.fasta" two_sequence.fasta.minlen_200.stdin.expected 0
test_stdout_exit "$test_program --maxlen 200 < two_sequence.fasta" \
two_sequence.fasta.maxlen_200.stdin.expected 0
test_stdout_exit "$test_program empty_file" empty_file.expected 0
# Test when --minlen filters out ALL sequences (empty result)
test_stdout_exit "$test_program --minlen 1000 two_sequence.fasta" two_sequence.fasta.minlen_1000.expected 0
# Test exit status for a bad command line invocation
test_exit_status "$test_program --this_is_not_a_valid_argument > /dev/null 2>&1" 2
# Test exit status for a non existent input FASTA file
test_exit_status "$test_program this_file_does_not_exist.fasta > /dev/null 2>&1" 1
# 3. End of testing - check if any errors occurrred
if [ "$num_errors" -gt 0 ]; then
echo "$test_program failed $num_errors out of $num_tests tests"
exit 1
else
echo "$test_program passed all $num_tests successfully"
exit 0
fi
|
package loglevel
import (
"fmt"
"io"
"log"
"strings"
)
// Logger defines our wrapper around the system logger
type Logger struct {
priority int
prefix string
logger *log.Logger
}
// New creates a new Logger.
func New(out io.Writer, prefix string, flag int, priority int) *Logger {
return &Logger{
priority: priority,
prefix: prefix,
logger: log.New(out, prefix, flag),
}
}
// SetPrefix sets the output prefix for the logger.
func (me *Logger) SetPrefix(prefix string) {
me.prefix = prefix
me.logger.SetPrefix(prefix)
}
// Prefix returns the current logger prefix
func (me *Logger) Prefix() string {
return me.prefix
}
func (me *Logger) setFullPrefix(priority int) {
if me.logger.Flags()&Lpriority != 0 {
me.logger.SetPrefix(fmt.Sprintf("%s ", priorityName[priority]) + me.prefix)
}
}
// Calls Output to print to the logger.
func (me *Logger) print(priority int, v ...interface{}) {
if priority <= me.priority {
me.setFullPrefix(priority)
me.logger.Print(v...)
}
}
// Calls Output to printf to the logger.
func (me *Logger) printf(priority int, format string, v ...interface{}) {
if priority <= me.priority {
me.setFullPrefix(priority)
me.logger.Printf(format, v...)
}
}
// Calls Output to println to the logger.
func (me *Logger) println(priority int, v ...interface{}) {
if priority <= me.priority {
me.setFullPrefix(priority)
me.logger.Println(v...)
}
}
// Priority returns the output priority for the logger.
func (me *Logger) Priority() int {
return me.priority
}
// SetPriority sets the output priority for the logger.
func (me *Logger) SetPriority(priority int) {
me.priority = priority
}
// SetPriorityString sets the output priority by the name of a debug level
func (me *Logger) SetPriorityString(s string) error {
s = strings.ToUpper(s)
for i, name := range priorityName {
if name == s {
me.SetPriority(i)
return nil
}
}
return fmt.Errorf("Unable to find priority %s", s)
}
// Flags returns the output layouts for the logger.
func (me *Logger) Flags() int {
return me.logger.Flags()
}
// SetFlags sets the output layouts for the logger.
func (me *Logger) SetFlags(layouts int) {
me.logger.SetFlags(layouts)
}
// Fatal prints the message it's given and quits the program
func (me *Logger) Fatal(v ...interface{}) {
me.setFullPrefix(Pfatal)
me.logger.Fatal(v...)
}
// Fatalf prints the message it's given and quits the program
func (me *Logger) Fatalf(format string, v ...interface{}) {
me.setFullPrefix(Pfatal)
me.logger.Fatalf(format, v...)
}
// Fatalln prints the message it's given and quits the program
func (me *Logger) Fatalln(v ...interface{}) {
me.setFullPrefix(Pfatal)
me.logger.Fatalln(v...)
}
// Panic prints the message it's given and panic()s the program
func (me *Logger) Panic(v ...interface{}) {
me.setFullPrefix(Pfatal)
me.logger.Panic(v...)
}
// Panicf prints the message it's given and panic()s the program
func (me *Logger) Panicf(format string, v ...interface{}) {
me.setFullPrefix(Pfatal)
me.logger.Panicf(format, v...)
}
// Panicln prints the message it's given and panic()s the program
func (me *Logger) Panicln(v ...interface{}) {
me.setFullPrefix(Pfatal)
me.logger.Panicln(v...)
}
// Error prints to the standard logger with the Error level.
func (me *Logger) Error(v ...interface{}) {
me.print(Perror, v...)
}
// Errorf prints to the standard logger with the Error level.
func (me *Logger) Errorf(format string, v ...interface{}) {
me.printf(Perror, format, v...)
}
// Errorln prints to the standard logger with the Error level.
func (me *Logger) Errorln(v ...interface{}) {
me.println(Perror, v...)
}
// Warn prints to the standard logger with the Warn level.
func (me *Logger) Warn(v ...interface{}) {
me.print(Pwarn, v...)
}
// Warnf prints to the standard logger with the Warn level.
func (me *Logger) Warnf(format string, v ...interface{}) {
me.printf(Pwarn, format, v...)
}
// Warnln prints to the standard logger with the Warn level.
func (me *Logger) Warnln(v ...interface{}) {
me.println(Pwarn, v...)
}
// Info prints to the standard logger with the Info level.
func (me *Logger) Info(v ...interface{}) {
me.print(Pinfo, v...)
}
// Infof prints to the standard logger with the Info level.
func (me *Logger) Infof(format string, v ...interface{}) {
me.printf(Pinfo, format, v...)
}
// Infoln prints to the standard logger with the Info level.
func (me *Logger) Infoln(v ...interface{}) {
me.println(Pinfo, v...)
}
// Debug prints to the standard logger with the Debug level.
func (me *Logger) Debug(v ...interface{}) {
me.print(Pdebug, v...)
}
// Debugf prints to the standard logger with the Debug level.
func (me *Logger) Debugf(format string, v ...interface{}) {
me.printf(Pdebug, format, v...)
}
// Debugln prints to the standard logger with the Debug level.
func (me *Logger) Debugln(v ...interface{}) {
me.println(Pdebug, v...)
}
// Trace prints to the standard logger with the Trace level.
func (me *Logger) Trace(v ...interface{}) {
me.print(Ptrace, v...)
}
// Tracef prints to the standard logger with the Trace level.
func (me *Logger) Tracef(format string, v ...interface{}) {
me.printf(Ptrace, format, v...)
}
// Traceln prints to the standard logger with the Trace level.
func (me *Logger) Traceln(v ...interface{}) {
me.println(Ptrace, v...)
}
|
public <T> T processTransaction(Object... input) throws IllegalArgumentException {
if (input.length < 1) {
throw new IllegalArgumentException("Invalid input: At least one parameter is required");
}
if (input[0] instanceof String && "put".equals(input[0])) {
if (input.length == 2 && input[1] instanceof Entity) {
// Handle single entity transaction
return adminDsWithMockDelegate.put((Entity) input[1]).get();
} else if (input.length == 3 && input[1] instanceof Transaction && input[2] instanceof Entity) {
// Handle transaction with single entity
return adminDsWithMockDelegate.put((Transaction) input[1], (Entity) input[2]).get();
} else if (input.length == 2 && input[1] instanceof List && !((List) input[1]).isEmpty() && ((List) input[1]).get(0) instanceof Entity) {
// Handle multiple entity transaction
return adminDsWithMockDelegate.put((List<Entity>) input[1]).get();
} else if (input.length == 3 && input[1] instanceof Transaction && input[2] instanceof List && !((List) input[2]).isEmpty() && ((List) input[2]).get(0) instanceof Entity) {
// Handle transaction with multiple entities
return adminDsWithMockDelegate.put((Transaction) input[1], (List<Entity>) input[2]).get();
} else {
throw new IllegalArgumentException("Invalid input parameters for put method");
}
} else {
throw new IllegalArgumentException("Unsupported operation: " + input[0]);
}
} |
<reponame>pageobject-io/pageobject-generator<gh_stars>10-100
'use strict';
const expect = require('chai').expect;
const LinkTextLocatorStrategy = require('../../../lib/protractor/locator/link-text-locator-strategy');
const locator = require('../../locator/locator-strategy-spec-helper');
describe('LinkTextLocatorStrategy', () => {
let source = new LinkTextLocatorStrategy();
it('should extract locator', () => {
expect(locator('<a>text</a>', source)).to.equal('by.linkText(\'text\')');
expect(locator('<a>{{text}}</a>', source)).to.be.null;
});
}); |
for (let i = 0; i <= 10; i++) {
console.log(i);
} |
<reponame>ernestyalumni/CompPhys
/**
* PDE.h
* \file PDE.h
* Navier-Stokes equation solver in 2-dimensions, incompressible flow, by Lattice Boltzmann method
* \brief PDE, partial differential equation, dynamics
* Simulation of flow inside a 2D square cavity using the lattice Boltzmann method (LBM)
* \author <NAME>; I had taken a look at Joshi's code
* <NAME> (<EMAIL>)
* \email <EMAIL>
* \date 20170112
*
* cf. http://joshiscorner.com/files/src/blog/LBM-gpu-code.html
*
*
*
* */
#ifndef __PDE_H__
#define __PDE_H__
// the base vectors and weight coefficients (GPU)
#include "init.h" // dev_e, dev_alpha, dev_ant
__global__ void timeIntegration(
float *rh, float2 *u,
float *f, float *feq, float *f_new,
const float LID_VELOCITY, const float REYNOLDS_NUMBER , const float DENSITY,
const int N_x, const int N_y, const int NDIR
) ;
#endif // __PDE_H__
|
require 'will_paginate/array'
class CamaleonCms::Admin::CommentsController < CamaleonCms::AdminController
include CamaleonCms::CommentHelper
add_breadcrumb I18n.t("camaleon_cms.admin.sidebar.comments"), :cama_admin_comments_url
before_action :validate_role
before_action :set_post, except: :list
before_action :set_comment, except: [:list, :index, :new, :create]
def list
@posts = current_site.posts.no_trash.joins(:comments).select("#{CamaleonCms::Post.table_name}.*, #{CamaleonCms::PostComment.table_name}.post_id").uniq.paginate(:page => params[:page], :per_page => current_site.admin_per_page)
end
# list of post comments for current post
def index
@comments = @post.comments.main.paginate(:page => params[:page], :per_page => current_site.admin_per_page)
end
def edit
render 'form', layout: false
end
# render a form to register a new comment
def answer
@answer = @comment.children.new
render 'form_answer', layout: false
end
# save a new anwer for this comment
def save_answer
answer = @comment.children.create(cama_comments_get_common_data.merge({post_id: @post.id, content: params[:comment][:content]}))
flash[:notice] = t('camaleon_cms.admin.comments.message.responses')
redirect_to action: :index
end
# toggle status of a comment
def toggle_status
_s = {a: "approved", s: "spam", p: "pending"}
k = _s[params[:s].to_sym]
@comment.update(approved: k)
flash[:notice] = "#{t('camaleon_cms.admin.comments.message.change_status')} #{t("camaleon_cms.admin.comments.message.#{k}")}"
redirect_to action: :index
end
def update
if @comment.update(content: params[:comment][:content])
flash[:notice] = t('camaleon_cms.admin.comments.message.updated')
redirect_to action: :index
else
render 'form'
end
end
def new
@comment = @post.comments.new
render 'form', layout: false
end
def create
comment = @post.comments.create(cama_comments_get_common_data.merge({post_id: @post.id, content: params[:comment][:content]}))
flash[:notice] = t('camaleon_cms.admin.comments.message.responses')
redirect_to action: :index
end
def destroy
flash[:notice] = t('camaleon_cms.admin.comments.message.destroy') if @comment.destroy
redirect_to action: :index
end
private
# define the parent post
def set_post
@post = current_site.posts.find(params[:post_id]).decorate
add_breadcrumb I18n.t("camaleon_cms.admin.table.post")
add_breadcrumb @post.the_title, @post.the_edit_url
end
# define the parent or current comment
def set_comment
begin
@comment = @post.comments.find(params[:id] || params[:comment_id])
rescue
flash[:error] = t('camaleon_cms.admin.comments.message.error')
redirect_to cama_admin_path
end
end
def validate_role
authorize! :manage, :comments
end
end
|
#!/usr/bin/expect
#*******************************************************************************
# Copyright 2017 Talentica Software Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#*******************************************************************************
match_max 5000
set expect_out(buffer) {}
spawn ssh root@node_ip
expect {
"*(yes/no)?" {send "yes\r";exp_continue}
"'s password:" {send "node_initial_pwd\r";exp_continue}
"*current*" {send "node_initial_pwd\r";exp_continue}
"Enter*" {send "Jan2016!\r";exp_continue}
"Retype*" {send "Jan2016!\r";exp_continue}
"\\\$" { puts "matched prompt"}
}
|
project_name="cplusplus_acl_execute_gemm"
model_name="0_GEMM_1_2_16_16_1_2_16_16_1_2_16_16_1_2_1_2_1_2_16_16"
version=$1
script_path="$( cd "$(dirname $BASH_SOURCE)" ; pwd -P)"
project_path=${script_path}/..
declare -i success=0
declare -i inferenceError=1
declare -i verifyResError=2
function setAtcEnv() {
# 设置模型转换时需要的环境变量
if [[ ${version} = "c73" ]] || [[ ${version} = "C73" ]];then
export install_path=$HOME/Ascend
export ASCEND_OPP_PATH=${install_path}/opp
export LD_LIBRARY_PATH=/usr/local/python3.7.5/lib
export PATH=${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:/usr/local/python3.7.5/bin:${PATH}
elif [[ ${version} = "c75" ]] || [[ ${version} = "C75" ]];then
export install_path=$HOME/Ascend
export ASCEND_OPP_PATH=${install_path}/opp
export LD_LIBRARY_PATH=/usr/local/python3.7.5/lib
export PATH=${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:/usr/local/python3.7.5/bin:${PATH}
fi
return 0
}
function setBuildEnv() {
# 设置代码编译时需要的环境变量
if [[ ${version} = "c73" ]] || [[ ${version} = "C73" ]];then
export DDK_PATH=/home/HwHiAiUser/Ascend
export NPU_HOST_LIB=${DDK_PATH}/acllib/lib64/stub
elif [[ ${version} = "c75" ]] || [[ ${version} = "C75" ]];then
export DDK_PATH=/home/HwHiAiUser/Ascend
export NPU_HOST_LIB=${DDK_PATH}/acllib/lib64/stub
fi
return 0
}
function main() {
if [[ ${version}"x" = "x" ]];then
echo "ERROR: version is invalid"
return ${inferenceError}
fi
mkdir -p ${HOME}/models/${project_name}
mkdir -p ${project_path}/run/out/op_models
if [[ $(find ${HOME}/models/${project_name} -name ${model_name}".om")"x" = "x" ]];then
# 设置模型转换的环境变量
setAtcEnv
if [ $? -ne 0 ];then
echo "ERROR: set atc environment failed"
return ${inferenceError}
fi
# 转模型
cd ${project_path}/
atc --singleop=${project_path}/run/out/test_data/config/gemm.json --soc_version=Ascend710 --output=${HOME}/models/${project_name}
if [ $? -ne 0 ];then
echo "ERROR: convert model failed"
return ${inferenceError}
fi
cp ${HOME}/models/${project_name}/${model_name}".om" ${project_path}/run/out/op_models/${model_name}".om"
if [ $? -ne 0 ];then
echo "ERROR: failed to set model soft connection"
return ${inferenceError}
fi
else
cp ${HOME}/models/${project_name}/${model_name}".om" ${project_path}/run/out/op_models/${model_name}".om"
if [ $? -ne 0 ];then
echo "ERROR: failed to set model soft connection"
return ${inferenceError}
fi
fi
cd ${project_path}/run/out/test_data/data
export LD_LIBRARY_PATH=/usr/local/python3.7.5/lib
export PATH=/usr/local/python3.7.5/bin:${PATH}
python3.7.5 generate_data.py
if [[ $? -ne 0 ]] || [[ ! -f ${project_path}/run/out/test_data/data/matrix_a.bin ]] || [[ ! -f ${project_path}/run/out/test_data/data/matrix_b.bin ]] || [[ ! -f ${project_path}/run/out/test_data/data/matrix_c.bin ]];then
echo "ERROR: generate input data failed. please check your project"
return ${inferenceError}
fi
mkdir -p ${project_path}/build/intermediates/host
if [ $? -ne 0 ];then
echo "ERROR: mkdir build folder failed. please check your project"
return ${inferenceError}
fi
cd ${project_path}/build/intermediates/host
setBuildEnv
if [ $? -ne 0 ];then
echo "ERROR: set build environment failed"
return ${inferenceError}
fi
cmake ${project_path}/src -DCMAKE_CXX_COMPILER=g++ -DCMAKE_SKIP_RPATH=TRUE
if [ $? -ne 0 ];then
echo "ERROR: cmake failed. please check your project"
return ${inferenceError}
fi
make
if [ $? -ne 0 ];then
echo "ERROR: make failed. please check your project"
return ${inferenceError}
fi
cd ${project_path}/run/out
export LD_LIBRARY_PATH=${HOME}/Ascend/acllib/lib64:${LD_LIBRARY_PATH}
chmod +x execute_gemm_op
if [ $? -ne 0 ];then
echo "ERROR: chmod +x to executable program failed. please check your project"
return ${inferenceError}
fi
./execute_gemm_op
if [ $? -ne 0 ];then
echo "ERROR: run failed. please check your project"
return ${inferenceError}
fi
if [[ ! -f "${project_path}/run/out/result_files/matrix_c.bin" ]];then
echo "ERROR: not find result file!"
return ${inferenceError}
fi
python3 ${script_path}/verify_result.py "${project_path}/run/out/test_data/data/output.bin" "${project_path}/run/out/result_files/matrix_c.bin"
if [ $? -ne 0 ];then
echo "ERROR: The result of reasoning is wrong!"
return ${verifyResError}
fi
echo "run success"
return ${success}
}
main
|
#!/bin/bash
# -*-mode: Shell-script; indent-tabs-mode: nil; sh-basic-offset: 2 -*-
# Find the base directory while avoiding subtle variations in $0:
dollar0=`which $0`; PACKAGE_DIR=$(cd $(dirname $dollar0); pwd) # NEVER export PACKAGE_DIR
# Set defaults for BUILD_DIR and INSTALL_DIR environment variables and
# define utility functions such as BuildDependentPackage:
. $PACKAGE_DIR/../../../support-files/build_platform_util.bash
CLEAN=0
while [ $# -gt 0 ]
do
if [ "$1" = "-builddir" ]
then
BUILD_DIR="$2"
shift
elif [ "$1" = "-installdir" ]
then
INSTALL_DIR="$2"
shift
elif [ "$1" = "-clean" ]
then
CLEAN=1
elif [ "$1" = "-h" ]
then
EmitStandardUsage
exit 0
else
echo "Undefined parameter $1"
exit 1
fi
shift
done
# --------------------------------------------------------------------------------
# Dependent packages will be installed into $INSTALL_DIR/bin so add
# that directory to the PATH:
# --------------------------------------------------------------------------------
SetupBasicEnvironment
# --------------------------------------------------------------------------------
# Build required dependent packages:
# --------------------------------------------------------------------------------
# No known dependent packages.
# --------------------------------------------------------------------------------
# Create build directory structure:
# --------------------------------------------------------------------------------
CreateAndChdirIntoBuildDir gettext
# --------------------------------------------------------------------------------
# Download and build tarball into the build directory:
# --------------------------------------------------------------------------------
DownloadExtractBuildGnuPackage gettext
exit 0
|
#!/bin/bash
python3 four_in_a_row_online/backend/server.py &
server_pid=$!
sleep 5s
while true
do
four_in_a_row_online/tools/repo_updated.sh
if [ $? == 0 ]
then
echo "Repo has been updated. Service will restart server (pid $server_pid)."
kill $server_pid
python3 four_in_a_row_online/backend/server.py &
server_pid=$!
echo "Server has been restarted, pid: $server_pid"
else
echo "Repo has not been updated, so the server will remain running."
fi
sleep 1m
done
|
function baseConverter(num, base) {
let converted = '';
while (num > 0) {
let rem = num % base;
num = Math.floor(num / base);
converted = rem.toString(base) + converted;
}
return converted;
}
// Usage
baseConverter(10, 2); // Output: "1010" |
<filename>sites/docs/decks/demo/theme.js
export default {
colors: { text: "#0D0543" },
styles: { a: { color: "text" } }
};
|
<filename>src/components/prendus-assignment/prendus-unauthorized-modal.ts<gh_stars>0
import {
AuthResult
} from '../../prendus.d';
import {
navigate
} from '../../node_modules/prendus-shared/services/utilities-service';
class PrendusUnauthorizedModal extends Polymer.Element {
open: boolean;
result: AuthResult;
static get is() { return 'prendus-unauthorized-modal' }
static get properties() {
return {
open: {
type: Boolean,
value: false,
},
result: {
type: Object,
value: () => ({
authenticated: false
})
}
}
}
_pay(e: Event) {
navigate(`/course/${this.result.courseId}/payment?redirectUrl=${encodeURIComponent(`${window.location.href}`)}`);
}
_authenticate(e: Event) {
navigate(`/authenticate?redirectUrl=${encodeURIComponent(`${window.location.pathname}`)}`);
}
_home(e: Event) {
navigate('/');
}
_payMessage(result: AuthResult): boolean {
return result.authenticated && !result.payed;
}
}
window.customElements.define(PrendusUnauthorizedModal.is, PrendusUnauthorizedModal);
|
public class Solution {
public static int sum(int x, int y) {
return x + y;
}
} |
#!/bin/bash
# Mostly this just copies the below XML, but inserting random MAC address
# and UUID strings, and other options as appropriate.
SCRIPT_ROOT=$(readlink -f $(dirname "$0")/..)
. "${SCRIPT_ROOT}/common.sh" || exit 1
DEFINE_string vm_name "CoreOS" "Name for this VM"
DEFINE_string disk_vmdk "" "Disk image to reference, only basename is used."
DEFINE_integer memory_size 1024 "Memory size in MB"
DEFINE_string output_ovf "" "Path to write ofv file to, required."
DEFINE_string output_vagrant "" "Path to write Vagrantfile to, optional."
# Parse command line
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
# Die on any errors.
switch_to_strict_mode
if [[ ! -e "${FLAGS_disk_vmdk}" ]]; then
echo "No such disk image '${FLAGS_disk_vmdk}'" >&2
exit 1
fi
DISK_NAME=$(basename "${FLAGS_disk_vmdk}")
DISK_UUID=$(uuidgen)
DISK_SIZE_BYTES=$(qemu-img info -f vmdk "${FLAGS_disk_vmdk}" \
| gawk 'match($0, /^virtual size:.*\(([0-9]+) bytes\)/, a) {print a[1]}')
if [[ -z "${DISK_SIZE_BYTES}" ]]; then
echo "Unable to determine virtual size of ${FLAGS_disk_vmdk}" >&2
exit 1
fi
# Generate random MAC addresses just as VirtualBox does, the format is
# their assigned prefix for the first 3 bytes followed by 3 random bytes.
VBOX_MAC_PREFIX=080027
macgen() {
hexdump -n3 -e "\"${VBOX_MAC_PREFIX}%X\n\"" /dev/urandom
}
# Used in both the ovf and Vagrantfile
PRIMARY_MAC=$(macgen)
# Date format as used in ovf
datez() {
date -u "+%Y-%m-%dT%H:%M:%SZ"
}
if [[ -n "${FLAGS_output_vagrant}" ]]; then
cat >"${FLAGS_output_vagrant}" <<EOF
if Vagrant::VERSION < "1.2.3"
raise "Need at least vagrant version 1.2.3, please update"
end
Vagrant.configure("2") do |config|
config.vm.base_mac = "${PRIMARY_MAC}"
# SSH in as the default 'core' user, it has the vagrant ssh key.
config.ssh.username = "core"
# Disable the base shared folder, guest additions are unavailable.
config.vm.synced_folder ".", "/vagrant", disabled: true
end
EOF
fi
if [[ -n "${FLAGS_output_ovf}" ]]; then
cat >"${FLAGS_output_ovf}" <<EOF
<?xml version="1.0"?>
<Envelope ovf:version="1.0" xml:lang="en-US" xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:vbox="http://www.virtualbox.org/ovf/machine">
<References>
<File ovf:href="${DISK_NAME}" ovf:id="file1"/>
</References>
<DiskSection>
<Info>List of the virtual disks used in the package</Info>
<Disk ovf:capacity="${DISK_SIZE_BYTES}" ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format="http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" vbox:uuid="${DISK_UUID}"/>
</DiskSection>
<NetworkSection>
<Info>Logical networks used in the package</Info>
<Network ovf:name="NAT">
<Description>Logical network used by this appliance.</Description>
</Network>
</NetworkSection>
<VirtualSystem ovf:id="${FLAGS_vm_name}">
<Info>A virtual machine</Info>
<OperatingSystemSection ovf:id="100">
<Info>The kind of installed guest operating system</Info>
<Description>Linux26_64</Description>
<vbox:OSType ovf:required="false">Linux26_64</vbox:OSType>
</OperatingSystemSection>
<VirtualHardwareSection>
<Info>Virtual hardware requirements for a virtual machine</Info>
<System>
<vssd:ElementName>Virtual Hardware Family</vssd:ElementName>
<vssd:InstanceID>0</vssd:InstanceID>
<vssd:VirtualSystemIdentifier>${FLAGS_vm_name}</vssd:VirtualSystemIdentifier>
<vssd:VirtualSystemType>virtualbox-2.2</vssd:VirtualSystemType>
</System>
<Item>
<rasd:Caption>1 virtual CPU</rasd:Caption>
<rasd:Description>Number of virtual CPUs</rasd:Description>
<rasd:ElementName>1 virtual CPU</rasd:ElementName>
<rasd:InstanceID>1</rasd:InstanceID>
<rasd:ResourceType>3</rasd:ResourceType>
<rasd:VirtualQuantity>1</rasd:VirtualQuantity>
</Item>
<Item>
<rasd:AllocationUnits>MegaBytes</rasd:AllocationUnits>
<rasd:Caption>${FLAGS_memory_size} MB of memory</rasd:Caption>
<rasd:Description>Memory Size</rasd:Description>
<rasd:ElementName>${FLAGS_memory_size} MB of memory</rasd:ElementName>
<rasd:InstanceID>2</rasd:InstanceID>
<rasd:ResourceType>4</rasd:ResourceType>
<rasd:VirtualQuantity>${FLAGS_memory_size}</rasd:VirtualQuantity>
</Item>
<Item>
<rasd:Address>0</rasd:Address>
<rasd:Caption>ideController0</rasd:Caption>
<rasd:Description>IDE Controller</rasd:Description>
<rasd:ElementName>ideController0</rasd:ElementName>
<rasd:InstanceID>3</rasd:InstanceID>
<rasd:ResourceSubType>PIIX4</rasd:ResourceSubType>
<rasd:ResourceType>5</rasd:ResourceType>
</Item>
<Item>
<rasd:Address>1</rasd:Address>
<rasd:Caption>ideController1</rasd:Caption>
<rasd:Description>IDE Controller</rasd:Description>
<rasd:ElementName>ideController1</rasd:ElementName>
<rasd:InstanceID>4</rasd:InstanceID>
<rasd:ResourceSubType>PIIX4</rasd:ResourceSubType>
<rasd:ResourceType>5</rasd:ResourceType>
</Item>
<Item>
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
<rasd:Caption>Ethernet adapter on 'NAT'</rasd:Caption>
<rasd:Connection>NAT</rasd:Connection>
<rasd:ElementName>Ethernet adapter on 'NAT'</rasd:ElementName>
<rasd:InstanceID>5</rasd:InstanceID>
<rasd:ResourceSubType>E1000</rasd:ResourceSubType>
<rasd:ResourceType>10</rasd:ResourceType>
</Item>
<Item>
<rasd:AddressOnParent>0</rasd:AddressOnParent>
<rasd:Caption>disk1</rasd:Caption>
<rasd:Description>Disk Image</rasd:Description>
<rasd:ElementName>disk1</rasd:ElementName>
<rasd:HostResource>/disk/vmdisk1</rasd:HostResource>
<rasd:InstanceID>6</rasd:InstanceID>
<rasd:Parent>3</rasd:Parent>
<rasd:ResourceType>17</rasd:ResourceType>
</Item>
</VirtualHardwareSection>
<vbox:Machine ovf:required="false" version="1.12-linux" uuid="{$(uuidgen)}" name="${FLAGS_vm_name}" OSType="Linux26_64" snapshotFolder="Snapshots" lastStateChange="$(datez)">
<ovf:Info>Complete VirtualBox machine configuration in VirtualBox format</ovf:Info>
<Hardware version="2">
<CPU count="1" hotplug="false">
<HardwareVirtEx enabled="true" exclusive="true"/>
<HardwareVirtExNestedPaging enabled="true"/>
<HardwareVirtExVPID enabled="true"/>
<PAE enabled="true"/>
<HardwareVirtExLargePages enabled="false"/>
<HardwareVirtForce enabled="false"/>
</CPU>
<Memory RAMSize="${FLAGS_memory_size}" PageFusion="false"/>
<HID Pointing="PS2Mouse" Keyboard="PS2Keyboard"/>
<HPET enabled="false"/>
<Chipset type="PIIX3"/>
<Boot>
<Order position="1" device="HardDisk"/>
<Order position="2" device="DVD"/>
<Order position="3" device="None"/>
<Order position="4" device="None"/>
</Boot>
<Display VRAMSize="8" monitorCount="1" accelerate3D="false" accelerate2DVideo="false"/>
<VideoRecording enabled="false" file="Test.webm" horzRes="640" vertRes="480"/>
<RemoteDisplay enabled="false" authType="Null"/>
<BIOS>
<ACPI enabled="true"/>
<IOAPIC enabled="true"/>
<Logo fadeIn="true" fadeOut="true" displayTime="0"/>
<BootMenu mode="MessageAndMenu"/>
<TimeOffset value="0"/>
<PXEDebug enabled="false"/>
</BIOS>
<USBController enabled="false" enabledEhci="false"/>
<Network>
<Adapter slot="0" enabled="true" MACAddress="${PRIMARY_MAC}" cable="true" speed="0" type="82540EM">
<DisabledModes/>
<NAT>
<DNS pass-domain="true" use-proxy="false" use-host-resolver="false"/>
<Alias logging="false" proxy-only="false" use-same-ports="false"/>
</NAT>
</Adapter>
<Adapter slot="1" enabled="false" MACAddress="$(macgen)" cable="true" speed="0" type="82540EM">
<DisabledModes>
<NAT>
<DNS pass-domain="true" use-proxy="false" use-host-resolver="false"/>
<Alias logging="false" proxy-only="false" use-same-ports="false"/>
</NAT>
</DisabledModes>
</Adapter>
<Adapter slot="2" enabled="false" MACAddress="$(macgen)" cable="true" speed="0" type="82540EM">
<DisabledModes>
<NAT>
<DNS pass-domain="true" use-proxy="false" use-host-resolver="false"/>
<Alias logging="false" proxy-only="false" use-same-ports="false"/>
</NAT>
</DisabledModes>
</Adapter>
<Adapter slot="3" enabled="false" MACAddress="$(macgen)" cable="true" speed="0" type="82540EM">
<DisabledModes>
<NAT>
<DNS pass-domain="true" use-proxy="false" use-host-resolver="false"/>
<Alias logging="false" proxy-only="false" use-same-ports="false"/>
</NAT>
</DisabledModes>
</Adapter>
<Adapter slot="4" enabled="false" MACAddress="$(macgen)" cable="true" speed="0" type="82540EM">
<DisabledModes>
<NAT>
<DNS pass-domain="true" use-proxy="false" use-host-resolver="false"/>
<Alias logging="false" proxy-only="false" use-same-ports="false"/>
</NAT>
</DisabledModes>
</Adapter>
<Adapter slot="5" enabled="false" MACAddress="$(macgen)" cable="true" speed="0" type="82540EM">
<DisabledModes>
<NAT>
<DNS pass-domain="true" use-proxy="false" use-host-resolver="false"/>
<Alias logging="false" proxy-only="false" use-same-ports="false"/>
</NAT>
</DisabledModes>
</Adapter>
<Adapter slot="6" enabled="false" MACAddress="$(macgen)" cable="true" speed="0" type="82540EM">
<DisabledModes>
<NAT>
<DNS pass-domain="true" use-proxy="false" use-host-resolver="false"/>
<Alias logging="false" proxy-only="false" use-same-ports="false"/>
</NAT>
</DisabledModes>
</Adapter>
<Adapter slot="7" enabled="false" MACAddress="$(macgen)" cable="true" speed="0" type="82540EM">
<DisabledModes>
<NAT>
<DNS pass-domain="true" use-proxy="false" use-host-resolver="false"/>
<Alias logging="false" proxy-only="false" use-same-ports="false"/>
</NAT>
</DisabledModes>
</Adapter>
</Network>
<UART>
<Port slot="0" enabled="false" IOBase="0x3f8" IRQ="4" hostMode="Disconnected"/>
<Port slot="1" enabled="false" IOBase="0x2f8" IRQ="3" hostMode="Disconnected"/>
</UART>
<LPT>
<Port slot="0" enabled="false" IOBase="0x378" IRQ="7"/>
<Port slot="1" enabled="false" IOBase="0x378" IRQ="7"/>
</LPT>
<AudioAdapter controller="AC97" driver="Pulse" enabled="false"/>
<RTC localOrUTC="local"/>
<SharedFolders/>
<Clipboard mode="Disabled"/>
<DragAndDrop mode="Disabled"/>
<IO>
<IoCache enabled="true" size="5"/>
<BandwidthGroups/>
</IO>
<HostPci>
<Devices/>
</HostPci>
<EmulatedUSB>
<CardReader enabled="false"/>
</EmulatedUSB>
<Guest memoryBalloonSize="0"/>
<GuestProperties/>
</Hardware>
<StorageControllers>
<StorageController name="IDE Controller" type="PIIX4" PortCount="2" useHostIOCache="true" Bootable="true">
<AttachedDevice type="HardDisk" port="0" device="0">
<Image uuid="{${DISK_UUID}}"/>
</AttachedDevice>
</StorageController>
</StorageControllers>
</vbox:Machine>
</VirtualSystem>
</Envelope>
EOF
fi
|
<gh_stars>1000+
package cmd
import "github.com/spf13/cobra"
// getCmd represents the send command
var getCmd = &cobra.Command{
Use: "get [event | project | projects | stage | stages | service | services]",
Short: "Displays an event or Keptn entities such as project, stage, or service",
Long: `Displays an event or Keptn entities such as project, stage, or service.`,
}
func init() {
rootCmd.AddCommand(getCmd)
}
|
<reponame>sebastianbrunnert/Advanced-Shuffle
import { Component, OnInit } from '@angular/core';
import { AppComponent } from '../app.component';
@Component({
selector: 'app-login',
templateUrl: './login.component.html',
styleUrls: ['./login.component.css']
})
export class LoginComponent implements OnInit {
constructor(
private app: AppComponent
) { }
ngOnInit(): void {
window.location.href = "https://accounts.spotify.com/authorize" +
"?response_type=code" +
"&client_id=" + this.app.spotifyAppCredentials.client_id +
"&scope=user-library-read playlist-read-private user-modify-playback-state playlist-modify-private playlist-read-collaborative" +
"&redirect_uri=" + this.app.spotifyAppCredentials.redirect_uri +
"&state=" + this.generateRandomString(16)
}
generateRandomString = function(length) {
var text = '';
var possible = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
for (var i = 0; i < length; i++) {
text += possible.charAt(Math.floor(Math.random() * possible.length));
}
return text;
};
}
|
<reponame>rainmaple/duckdb
#include "catch.hpp"
#include "duckdb/common/file_system.hpp"
#include "dbgen.hpp"
#include "test_helpers.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("MonetDB Test: update_with_correlated_subselect.SF-1284791.sql", "[monetdb]") {
unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
REQUIRE_NO_FAIL(con.Query("create table t1284791b (id2 int, val2 varchar(255))"));
REQUIRE_NO_FAIL(con.Query("create table t1284791a (id1 int, val1 varchar(255))"));
REQUIRE_NO_FAIL(con.Query("insert into t1284791a values (1,'1')"));
REQUIRE_NO_FAIL(con.Query("insert into t1284791b values (1,'2')"));
REQUIRE_NO_FAIL(con.Query("update t1284791a set val1 = (select val2 from t1284791b where id1 = id2) where id1 in "
"(select id2 from t1284791b);"));
result = con.Query("select * from t1284791a");
REQUIRE(CHECK_COLUMN(result, 0, {1}));
REQUIRE(CHECK_COLUMN(result, 1, {"2"}));
result = con.Query("select * from t1284791b");
REQUIRE(CHECK_COLUMN(result, 0, {1}));
REQUIRE(CHECK_COLUMN(result, 1, {"2"}));
REQUIRE_NO_FAIL(con.Query("drop table t1284791a;"));
REQUIRE_NO_FAIL(con.Query("drop table t1284791b;"));
}
|
<gh_stars>0
package com.tactbug.ddd.common.utils;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import java.io.IOException;
import java.net.URI;
import java.net.UnknownServiceException;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.Objects;
import java.util.Queue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.PriorityBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* @Author tactbug
* @Email <EMAIL>
* @Time 2021/10/4 0:11
*/
public class IdUtil {
private String application;
private Class<?> aggregate;
private Integer maxSize = 150000;
private Integer warningSize = 20000;
private Integer perQuantity = 50000;
private static final ConcurrentHashMap<Class<?>, PriorityBlockingQueue<Long>> ID_MAP = new ConcurrentHashMap<>();
private static final HttpClient HTTP_CLIENT = HttpClient.newHttpClient();
private static final String URL = "http://192.168.1.200:10001/id/batch";
private static final ConcurrentHashMap<Class<?>, IdUtil> UTIL_MAP = new ConcurrentHashMap<>();
public static IdUtil getOrGenerate(
String application, Class<?> aggregate, Integer maxSize, Integer warningSize, Integer perQuantity
){
if (UTIL_MAP.containsKey(aggregate)){
return UTIL_MAP.get(aggregate);
}else {
return getAndUpdate(application, aggregate, maxSize, warningSize, perQuantity);
}
}
public static IdUtil getAndUpdate(
String application, Class<?> aggregate, Integer maxSize, Integer warningSize, Integer perQuantity
){
IdUtil idUtil = UTIL_MAP.getOrDefault(aggregate, new IdUtil());
assemble(idUtil, application, aggregate, maxSize, warningSize, perQuantity);
UTIL_MAP.put(aggregate, idUtil);
return idUtil;
}
public Long getId(){
PriorityBlockingQueue<Long> idQueue = ID_MAP.getOrDefault(aggregate, new PriorityBlockingQueue<>(maxSize));
ID_MAP.putIfAbsent(aggregate, idQueue);
return generateId(idQueue);
}
private static void assemble(IdUtil idUtil, String application, Class<?> aggregate, Integer maxSize, Integer warningSize, Integer perQuantity){
if (Objects.nonNull(maxSize)){
idUtil.maxSize = maxSize;
}
if (Objects.nonNull(warningSize)){
idUtil.warningSize = warningSize;
}
if (Objects.nonNull(perQuantity)){
idUtil.perQuantity = perQuantity;
}
idUtil.application = application;
idUtil.aggregate = aggregate;
idUtil.check();
}
private void check(){
if (Objects.isNull(application) || application.isBlank()){
throw new IllegalArgumentException("服务名不能为空");
}
if (Objects.isNull(aggregate)){
throw new IllegalArgumentException("聚合类型不能为空");
}
if (maxSize < perQuantity){
throw new IllegalArgumentException("最大ID数量不能小于每次补充数量");
}
if (perQuantity < 10000 || perQuantity > 300000){
throw new IllegalArgumentException("每次补充数量不能大于300000或小于10000");
}
}
private Long generateId(PriorityBlockingQueue<Long> idQueue){
if (idQueue.size() <= warningSize) {
AtomicBoolean topUpTag = new AtomicBoolean(true);
while (idQueue.size() < maxSize && topUpTag.get()){
if (!idQueue.isEmpty()) {
new Thread(() -> {
try {
topUp(idQueue);
} catch (UnknownServiceException | JsonProcessingException e) {
e.printStackTrace();
topUpTag.set(false);
}
}).start();
} else {
try {
topUp(idQueue);
} catch (UnknownServiceException | JsonProcessingException e) {
throw new UnsupportedOperationException("ID获取异常", e);
}
}
}
}
return idQueue.poll();
}
private synchronized void topUp(PriorityBlockingQueue<Long> idQueue) throws JsonProcessingException, UnknownServiceException {
if (idQueue.size() > maxSize){
return;
}
int retryTimes = 3;
while (retryTimes > 0){
HttpResponse<String> response;
try {
response = HTTP_CLIENT.send(request(), HttpResponse.BodyHandlers.ofString());
} catch (IOException | InterruptedException e) {
retryTimes --;
continue;
}
Queue<Long> ids = SerializeUtil.jsonToObject(response.body(), new TypeReference<>() {});
idQueue.addAll(ids);
return;
}
throw new UnknownServiceException("ID服务异常, ID队列补充失败");
}
private HttpRequest request(){
String url =URL + "/" + application + "/" + aggregate.getName() + "/" + perQuantity;
return HttpRequest.newBuilder()
.GET()
.timeout(Duration.of(3L, ChronoUnit.SECONDS))
.uri(URI.create(url))
.build();
}
}
|
<filename>src/tools/deep-clone.ts
const cloneDeep = require('clone-deep')
export function deepClone<T>(src: T) {
return cloneDeep(src) as T
}
|
def binary_search(sorted_arr, target)
low = 0
high = sorted_arr.length - 1
while low <= high
mid = low + (high - low) / 2
if target == sorted_arr[mid]
return mid
elsif target < sorted_arr[mid]
high = mid - 1
else
low = mid + 1
end
end
return -1
end
sorted_arr = [1, 3, 5, 7, 8, 9]
target = 5
result = binary_search(sorted_arr, target)
puts result # -> 2 |
// Define the payload structure for EzsigntemplatepackagemembershipCreateObjectV1Response
public struct EzsigntemplatepackagemembershipCreateObjectV1ResponseMPayload: Codable, Hashable {
// Define properties for EzsigntemplatepackagemembershipCreateObjectV1ResponseMPayload
// ...
}
// Define the debug payload structure for CommonResponseObjDebugPayload
public struct CommonResponseObjDebugPayload: Codable, Hashable {
// Define properties for CommonResponseObjDebugPayload
// ...
}
// Define the response object for POST /1/object/ezsigntemplatepackagemembership
public struct EzsigntemplatepackagemembershipCreateObjectV1Response: Codable, JSONEncodable, Hashable {
public var mPayload: EzsigntemplatepackagemembershipCreateObjectV1ResponseMPayload
public var objDebugPayload: CommonResponseObjDebugPayload?
// Implement initializers, encoding, decoding, and hashable conformance as per requirements
// ...
} |
<filename>musiclibrary/src/main/java/com/cyl/musiclake/ui/zone/EditActivity.java
package com.cyl.musiclake.ui.zone;
import android.text.TextUtils;
import android.view.Menu;
import android.view.MenuItem;
import android.widget.EditText;
import com.cyl.musiclake.R;
import com.cyl.musiclake.R2;
import com.cyl.musiclake.base.BaseActivity;
import com.cyl.musiclake.ui.my.user.User;
import com.cyl.musiclake.ui.my.user.UserStatus;
import com.cyl.musiclake.utils.ToastUtils;
import butterknife.BindView;
/**
* Created by 永龙 on 2016/3/15.
*/
public class EditActivity extends BaseActivity {
@BindView(R2.id.edit_content)
EditText mEditText;
String user_id;
String content;
@Override
protected int getLayoutResID() {
return R.layout.activity_edit;
}
@Override
protected void initView() {
}
@Override
protected void initData() {
content = getIntent().getStringExtra("content");
if (!TextUtils.isEmpty(content))
mEditText.setText(content + "");
}
@Override
protected void initInjector() {
}
@Override
protected void listener() {
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.menu_edit, menu);
return super.onCreateOptionsMenu(menu);
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
int id = item.getItemId();
if (id == android.R.id.home) {
finish();
return true;
} else if (id == R.id.menu_send) {
content = mEditText.getText().toString().trim();
if (content.length() == 0 || content == null) {
ToastUtils.show(this, "不能发送空");
return true;
}
User user = UserStatus.getUserInfo(this);
user_id = user.getId();
if (user_id == null || user_id.length() == 0) {
ToastUtils.show(this, "请登录");
return true;
}
sendSecret(user_id, content);
return true;
}
return super.onOptionsItemSelected(item);
}
private void sendSecret(String user_id, String comment) {
// OkHttpUtils.post().url(Constants.DEFAULT_URL)
// .addParams(Constants.FUNC, Constants.SECRET_ADD)
// .addParams(Constants.USER_ID, user_id)
// .addParams(Constants.CONTENT, comment)
// .build()
// .execute(new StatusCallback() {
// @Override
// public void onError(Call call, Exception e) {
// ToastUtils.show(EditActivity.this,"网络错误,请检查连接!");
// }
//
// @Override
// public void onResponse(StatusInfo response) {
// ToastUtils.show(EditActivity.this,response.getMessage());
// LogUtil.e("re",response.getStatus()+response.getMessage());
// finish();
// }
// });
}
}
|
<filename>src/main/java/org/quark/microapidemo/business/AbstractBusinessService.java
package org.quark.microapidemo.business;
import java.time.*;
public abstract class AbstractBusinessService {
protected class SearchDateStamp {
public SearchDateStamp(ZonedDateTime beginDate, ZonedDateTime endDate) {
this.beginDate = beginDate;
this.endDate = endDate;
}
private ZonedDateTime beginDate;
private ZonedDateTime endDate;
public ZonedDateTime getBeginDate() {
return beginDate;
}
public void setBeginDate(ZonedDateTime beginDate) {
this.beginDate = beginDate;
}
public ZonedDateTime getEndDate() {
return endDate;
}
public void setEndDate(ZonedDateTime endDate) {
this.endDate = endDate;
}
}
protected SearchDateStamp getTodaySearchDate(boolean useUTC) {
ZoneId zoneId = (useUTC ? ZoneId.of("UTC") : ZoneId.systemDefault());
LocalDateTime dt = LocalDate.now().atStartOfDay();
Instant instant = Instant.now();
ZonedDateTime endDate = ZonedDateTime.ofInstant(instant , zoneId);
ZonedDateTime zdt = dt.atZone(ZoneId.systemDefault());
instant = LocalDate.now().atStartOfDay().toInstant(zdt.getOffset());
ZonedDateTime beginDate = ZonedDateTime.ofInstant(instant , zoneId);
return new SearchDateStamp(beginDate, endDate);
}
}
|
def insertionSort(arr):
# Iterate over the entire array
for i in range(1, len(arr)):
key = arr[i]
# Move elements of arr[0..i-1], that are
# greater than key, to one position ahead
# of their current position
j = i-1
while j >=0 and key < arr[j] :
arr[j+1] = arr[j]
j -= 1
arr[j+1] = key
return arr
# Main Function
list = [5, 8, 6, 3, 2, 4, 7, 1]
sorted_list = insertionSort(list)
# Print the sorted list
print("The sorted list is:", sorted_list) |
import { VersionedObject } from './versioned-object.js';
/**
* @class
* @name pc.ScopeId
* @classdesc The scope for a variable.
* @param {string} name - The variable name.
* @property {string} name The variable name.
*/
function ScopeId(name) {
// Set the name
this.name = name;
// Set the default value
this.value = null;
// Create the version object
this.versionObject = new VersionedObject();
}
Object.assign(ScopeId.prototype, {
/**
* @function
* @name pc.ScopeId#setValue
* @description Set variable value.
* @param {*} value - The value.
*/
setValue: function (value) {
// Set the new value
this.value = value;
// Increment the revision
this.versionObject.increment();
},
/**
* @function
* @name pc.ScopeId#getValue
* @description Get variable value.
* @returns {*} The value.
*/
getValue: function () {
return this.value;
}
});
export { ScopeId };
|
<reponame>akovari/reactive-data-federation-poc
package com.github.akovari.rdfp.api.ql.db
import com.github.akovari.rdfp.api.ql.UQLContext
import com.github.akovari.rdfp.api.ql.UQLContext.IllegalUQLFieldException
import com.github.akovari.typesafeSalesforce.query.SimpleColumn
import com.typesafe.config.ConfigFactory
import scala.collection.JavaConverters._
/**
* Created by akovari on 12.09.14.
*/
object SOQLContext {
private val config = ConfigFactory.load("ql/soqlMappings.conf")
def fieldToEntityField(field: String)(implicit resourceType: UQLContext.UnifiedResult.UnifiedResultFromResourceType): SimpleColumn[Any] = {
val tablesConfs = config.getConfig(resourceType.value.toString)
val tableNames = config.getObject(resourceType.value.toString).keySet().asScala
val tableFound = tableNames.find(table => tablesConfs.hasPath(s"$table.fields.$field"))
if (tableFound.isDefined) SimpleColumn(tablesConfs.getString(s"${tableFound.get}.fields.$field"))
else throw IllegalUQLFieldException(s"""Invalid Field "$field"""")
}
}
|
#!/usr/bin/env bash
status=$(playerctl status)
artist=$(playerctl metadata artist)
title=$(playerctl metadata title)
if [ "$status" = "Playing" ]; then
echo "$artist" - "$title"
elif [ "$status" = "Paused" ]; then
echo " $artist" - "$title"
fi
|
<filename>24-Redux/src/actions/actionTypes.js
export const CLICK_UPDATE_VALUE = 'CLICK_UPDATE_VALUE' |
#!/bin/bash
# This script ensures that a tansparent Tor proxy is running
# and that requests are routed via the proxy.
export LC_ALL=C.UTF-8
export LANG=C.UTF-8
# Output colors
NORMAL="\\033[0;39m"
RED="\\033[1;31m"
BLUE="\\033[1;34m"
GREEN="\\033[1;32m"
log_info() {
echo ""
echo -e "$BLUE > $1 $NORMAL"
}
log_success() {
echo ""
echo -e "$GREEN > $1 $NORMAL"
}
log_error() {
echo ""
echo -e "$RED >>> ERROR - $1$NORMAL"
}
random_string() {
# Generate a random string with $1 bytes of entropy. It is base64 encoded,
# so the resulting string is longer than $1 characters. You should
# probably make $1 a multiple of 3, otherwise the base64 string will be
# padded with '=' at the end.
head -c $1 /dev/urandom | base64
}
generate_tor_hash_password() {
# Generate a hash password from $1.
tor --hash-password $1 --quiet
}
wait_for_tor() {
# Wait until tor circuits are established.
python /tor_status.py 2> /dev/null
result=$?
while [ $result -ne 0 ]; do
log_info "Waiting for tor..."
python /tor_status.py 2> /dev/null
result=$?
sleep 1
done
log_success "Tor circuit established"
}
check_ip() {
# Check that requests are routed via Tor
# using https://check.torproject.org
log_info "Checking IP address with wtfismyip...."
curl -s -m 60 https://wtfismyip.com/text
}
check_tor_is_used() {
# Check that requests are routed via Tor
# using https://check.torproject.org
log_info "Checking Tor...."
curl -s -m 60 https://check.torproject.org | sed -n "/<h1/,/<\/h1>/p" | grep -iq "This browser is configured to use Tor" 2> /dev/null
result=$?
if [ $result -ne 0 ]; then
log_error "Tor is not working!"
exit 1
else
log_success "Tor is working!"
fi
}
TOR_PASSWORD=$(random_string 12)
TOR_HASH_PASSWORD=$(generate_tor_hash_password $TOR_PASSWORD)
setup_tor_control() {
echo "ControlPort 9051" >> /etc/tor/torrc
echo "HashedControlPassword ${TOR_HASH_PASSWORD}" >> /etc/tor/torrc
export TOR_PASSWORD=$TOR_PASSWORD
}
setup_tor_control
# Start Tor
service tor restart
wait_for_tor
# Config IP Tables
sh /iptables.sh
# Check IP
check_ip
# Check Tor is being used for outbound connections
check_tor_is_used
# Start Nginx
log_info "Starting squid.."
/sbin/entrypoint.sh
|
<reponame>khatchadourian-lab/guava
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.collect.Table.Cell;
import com.google.common.testing.EqualsTester;
import com.google.common.testing.SerializableTester;
import junit.framework.TestCase;
/**
* Tests for {@link Tables}.
*
* @author <NAME>
*/
@GwtCompatible(emulated = true)
public class TablesTest extends TestCase {
@GwtIncompatible // SerializableTester
public void testImmutableEntrySerialization() {
Cell<String, Integer, Character> entry
= Tables.immutableCell("foo", 1, 'a');
SerializableTester.reserializeAndAssert(entry);
}
public void testImmutableEntryToString() {
Cell<String, Integer, Character> entry
= Tables.immutableCell("foo", 1, 'a');
assertEquals("(foo,1)=a", entry.toString());
Cell<String, Integer, Character> nullEntry
= Tables.immutableCell(null, null, null);
assertEquals("(null,null)=null", nullEntry.toString());
}
public void testEntryEquals() {
Cell<String, Integer, Character> entry
= Tables.immutableCell("foo", 1, 'a');
new EqualsTester()
.addEqualityGroup(entry, Tables.immutableCell("foo", 1, 'a'))
.addEqualityGroup(Tables.immutableCell("bar", 1, 'a'))
.addEqualityGroup(Tables.immutableCell("foo", 2, 'a'))
.addEqualityGroup(Tables.immutableCell("foo", 1, 'b'))
.addEqualityGroup(Tables.immutableCell(null, null, null))
.testEquals();
}
public void testEntryEqualsNull() {
Cell<String, Integer, Character> entry
= Tables.immutableCell(null, null, null);
new EqualsTester()
.addEqualityGroup(entry, Tables.immutableCell(null, null, null))
.addEqualityGroup(Tables.immutableCell("bar", null, null))
.addEqualityGroup(Tables.immutableCell(null, 2, null))
.addEqualityGroup(Tables.immutableCell(null, null, 'b'))
.addEqualityGroup(Tables.immutableCell("foo", 1, 'a'))
.testEquals();
}
}
|
require 'spec_helper'
require 'fileutils'
include Gitignores
describe GitignoreBuilder do
before :each do
@builder = GitignoreBuilder.new
end
# Really simple test because I'm a Ruby newb
describe "#new" do
it "creates a new GitignoreBuilder" do
expect(@builder).to be_an_instance_of GitignoreBuilder
end
end
describe "#find_gitignores_by_name" do
it "finds corresponding file paths for gitignores by name" do
@builder.local_repository = FAKE_IGNORES
paths = @builder.find_gitignores_by_name ["foo", "bar"]
fake_ignores_root = File.expand_path(FAKE_IGNORES)
expect(paths).to include("#{fake_ignores_root}/foo.gitignore")
expect(paths).to include("#{fake_ignores_root}/Global/bar.gitignore")
end
it "raises GitignoreNotFound exception if it cannot find a Gitignore" do
@builder.local_repository = FAKE_IGNORES
expect { @builder.find_gitignores_by_name ["baz"] }.to raise_error(GitignoreNotFoundException)
end
end
describe "#concatenate_files" do
it "concatenates files to a stream and provides a header before each one" do
out = StringIO.new
@builder.concatenate_files(["#{FAKE_IGNORES}/foo.gitignore", "#{FAKE_IGNORES}/Global/bar.gitignore"], out)
expect(out.string).to eq(
"
\# foo
foo
\# bar
bar
"
)
end
end
end
|
'''
Functions to help validate inputs.
'''
from typing import Iterable
def validate_spacy_pos(pos_list: Iterable[str]):
valid_pos = set(['ADV', 'NOUN', 'PRON', 'PROPN', 'VERB', 'ADJ'])
invalid_pos = set()
for pos in pos_list:
if pos not in valid_pos:
invalid_pos.add(pos)
if len(invalid_pos) > 0:
raise ValueError(f'Invalid part-of-speech provided: {invalid_pos}') |
#!/bin/bash
while true; do
read -p "Proceed [Y/n]? " YN
case $YN in
[Yy]* )
# Proceed with the action
echo "Action will be executed."
break
;;
[Nn]* )
# Do not proceed with the action
echo "Action canceled."
break
;;
"" )
# Default to "Y" and proceed with the action
echo "Proceeding with the action."
break
;;
* )
# Display error message and prompt user again
echo "Invalid input. Please enter Y or n."
;;
esac
done |
<reponame>munaweralimy/HRMS
export { default as AES } from "crypto-js/aes";
export { default as encUTF8 } from "crypto-js/enc-utf8";
|
<gh_stars>0
/*
* @(#)Request.java 1.2 04/07/26
*
* Copyright (c) 2004 Sun Microsystems, Inc. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* -Redistribution of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* -Redistribution in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of Sun Microsystems, Inc. or the names of contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* This software is provided "AS IS," without a warranty of any kind. ALL
* EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING
* ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
* OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MIDROSYSTEMS, INC. ("SUN")
* AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE
* AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS
* DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST
* REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL,
* INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY
* OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE,
* EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
*
* You acknowledge that this software is not designed, licensed or intended
* for use in the design, construction, operation or maintenance of any
* nuclear facility.
*/
import java.net.*;
import java.nio.*;
import java.nio.charset.*;
import java.util.regex.*;
/**
* An encapsulation of the request received.
* <P>
* The static method parse() is responsible for creating this
* object.
*
* @author <NAME>
* @author <NAME>
* @version 1.2, 04/07/26
*/
class Request {
/**
* A helper class for parsing HTTP command actions.
*/
static class Action {
private String name;
private Action(String name) { this.name = name; }
public String toString() { return name; }
static Action GET = new Action("GET");
static Action PUT = new Action("PUT");
static Action POST = new Action("POST");
static Action HEAD = new Action("HEAD");
static Action parse(String s) {
if (s.equals("GET"))
return GET;
if (s.equals("PUT"))
return PUT;
if (s.equals("POST"))
return POST;
if (s.equals("HEAD"))
return HEAD;
throw new IllegalArgumentException(s);
}
}
private Action action;
private String version;
private URI uri;
Action action() { return action; }
String version() { return version; }
URI uri() { return uri; }
private Request(Action a, String v, URI u) {
action = a;
version = v;
uri = u;
}
public String toString() {
return (action + " " + version + " " + uri);
}
static boolean isComplete(ByteBuffer bb) {
int p = bb.position() - 4;
if (p < 0)
return false;
return (((bb.get(p + 0) == '\r') &&
(bb.get(p + 1) == '\n') &&
(bb.get(p + 2) == '\r') &&
(bb.get(p + 3) == '\n')));
}
private static Charset ascii = Charset.forName("US-ASCII");
/*
* The expected message format is first compiled into a pattern,
* and is then compared against the inbound character buffer to
* determine if there is a match. This convienently tokenizes
* our request into usable pieces.
*
* This uses Matcher "expression capture groups" to tokenize
* requests like:
*
* GET /dir/file HTTP/1.1
* Host: hostname
*
* into:
*
* group[1] = "GET"
* group[2] = "/dir/file"
* group[3] = "1.1"
* group[4] = "hostname"
*
* The text in between the parens are used to captured the regexp text.
*/
private static Pattern requestPattern
= Pattern.compile("\\A([A-Z]+) +([^ ]+) +HTTP/([0-9\\.]+)$"
+ ".*^Host: ([^ ]+)$.*\r\n\r\n\\z",
Pattern.MULTILINE | Pattern.DOTALL);
static Request parse(ByteBuffer bb) throws MalformedRequestException {
CharBuffer cb = ascii.decode(bb);
Matcher m = requestPattern.matcher(cb);
if (!m.matches())
throw new MalformedRequestException();
Action a;
try {
a = Action.parse(m.group(1));
} catch (IllegalArgumentException x) {
throw new MalformedRequestException();
}
URI u;
try {
u = new URI("http://"
+ m.group(4)
+ m.group(2));
} catch (URISyntaxException x) {
throw new MalformedRequestException();
}
return new Request(a, m.group(3), u);
}
}
|
package renterutil
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"testing"
"testing/iotest"
"lukechampine.com/frand"
"lukechampine.com/us/ghost"
"lukechampine.com/us/renterhost"
)
func createTestingKV(tb testing.TB, m, n int) (PseudoKV, func()) {
tb.Helper()
hosts := make([]*ghost.Host, n)
hkr := make(testHKR)
hs := NewHostSet(hkr, 0)
var cleanups []func()
for i := range hosts {
h, c := createHostWithContract(tb)
hosts[i] = h
hkr[h.PublicKey()] = h.Settings().NetAddress
hs.AddHost(c)
cleanups = append(cleanups, func() { h.Close() })
}
// use ephemeral DB during short tests
var db MetaDB
if testing.Short() {
db = NewEphemeralMetaDB()
} else {
dir, err := ioutil.TempDir("", tb.Name())
if err != nil {
tb.Fatal(err)
}
os.MkdirAll(dir, 0700)
cleanups = append(cleanups, func() { os.RemoveAll(dir) })
dbName := filepath.Join(dir, "kv.db")
db, err = NewBoltMetaDB(dbName)
if err != nil {
tb.Fatal(err)
}
}
kv := PseudoKV{
DB: db,
M: m,
N: n,
P: 3, // TODO: is this a sane default?
Uploader: ParallelChunkUploader{Hosts: hs},
Downloader: ParallelChunkDownloader{Hosts: hs},
Deleter: SerialSectorDeleter{Hosts: hs},
}
cleanups = append(cleanups, func() { kv.Close() })
return kv, func() {
for _, fn := range cleanups {
fn()
}
}
}
func TestKVPutGet(t *testing.T) {
kv, cleanup := createTestingKV(t, 2, 3)
defer cleanup()
ctx := context.Background()
err := kv.PutBytes(ctx, []byte("foo"), []byte("bar"))
if err != nil {
t.Fatal(err)
}
data, err := kv.GetBytes([]byte("foo"))
if err != nil {
t.Fatal(err)
}
if string(data) != "bar" {
t.Fatalf("bad data: %q", data)
}
// large value, using streaming API
bigdata := frand.Bytes(renterhost.SectorSize * 4)
err = kv.Put(ctx, []byte("foo"), bytes.NewReader(bigdata))
if err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
err = kv.Get([]byte("foo"), &buf)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf.Bytes(), bigdata) {
t.Fatal("bad data")
}
// range request
buf.Reset()
off, n := int64(renterhost.SectorSize+10), int64(497)
err = kv.GetRange([]byte("foo"), &buf, off, n)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf.Bytes(), bigdata[off:][:n]) {
t.Fatal("bad range data", len(buf.Bytes()), bytes.Index(bigdata, buf.Bytes()))
}
}
func TestKVBufferHosts(t *testing.T) {
kv, cleanup := createTestingKV(t, 0, 6)
defer cleanup()
kv.M, kv.N = 2, 3 // 3 buffer hosts
ctx := context.Background()
bigdata := frand.Bytes(renterhost.SectorSize * 6)
err := kv.Put(ctx, []byte("foo"), bytes.NewReader(bigdata))
if err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
err = kv.Get([]byte("foo"), &buf)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf.Bytes(), bigdata) {
t.Fatal("bad data")
}
// check that chunks are stored on different hosts
var chunkHosts []string
b, err := kv.DB.Blob([]byte("foo"))
if err != nil {
t.Fatal(err)
}
for _, cid := range b.Chunks {
c, err := kv.DB.Chunk(cid)
if err != nil {
t.Fatal(err)
}
var hosts string
for _, ssid := range c.Shards {
s, err := kv.DB.Shard(ssid)
if err != nil {
t.Fatal(err)
}
hosts += s.HostKey.ShortKey()
}
chunkHosts = append(chunkHosts, hosts)
}
allEqual := true
for i := range chunkHosts[1:] {
allEqual = allEqual && chunkHosts[i] == chunkHosts[i+1]
}
if allEqual {
t.Fatal("all chunks stored on the same host set")
}
}
func TestKVResumeReader(t *testing.T) {
kv, cleanup := createTestingKV(t, 2, 3)
defer cleanup()
ctx := context.Background()
bigdata := frand.Bytes(renterhost.SectorSize * 4)
r := bytes.NewReader(bigdata)
err := kv.Put(ctx, []byte("foo"), &errorAfterNReader{
R: r,
N: renterhost.SectorSize * 3,
Err: iotest.ErrTimeout, // arbitrary
})
if err != iotest.ErrTimeout {
t.Fatal(err)
}
// TODO: unsure whether this should return an error
if false {
_, err = kv.GetBytes([]byte("foo"))
if err == nil {
t.Fatal("expected Get of incomplete upload to fail")
}
}
// resume
err = kv.Resume(ctx, []byte("foo"), r)
if err != nil {
t.Fatal(err)
}
data, err := kv.GetBytes([]byte("foo"))
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(data, bigdata) {
t.Fatal("bad data")
}
}
func TestKVResumeHost(t *testing.T) {
hosts := make([]*ghost.Host, 3)
hkr := make(testHKR)
hs := NewHostSet(hkr, 0)
for i := range hosts {
h, c := createHostWithContract(t)
defer h.Close()
hosts[i] = h
hkr[h.PublicKey()] = h.Settings().NetAddress
hs.AddHost(c)
}
db := NewEphemeralMetaDB()
kv := PseudoKV{
DB: db,
M: 2,
N: 3,
P: 2,
Uploader: ParallelChunkUploader{Hosts: hs},
Downloader: SerialChunkDownloader{Hosts: hs},
}
ctx := context.Background()
bigdata := frand.Bytes(renterhost.SectorSize * 4)
r := bytes.NewReader(bigdata)
err := kv.Put(ctx, []byte("foo"), &fnAfterNReader{
R: r,
N: renterhost.SectorSize * 2,
Fn: func() {
hosts[1].Close()
s, err := hs.acquire(hosts[1].PublicKey())
if err != nil {
return
}
s.Close()
hs.release(hosts[1].PublicKey())
},
})
if err == nil {
t.Fatal("expected upload to fail")
}
// replace host 0 with a new host
h, c := createHostWithContract(t)
defer h.Close()
hkr[h.PublicKey()] = h.Settings().NetAddress
delete(hs.sessions, hosts[1].PublicKey())
hs.AddHost(c)
// resume
err = kv.Resume(ctx, []byte("foo"), r)
if err != nil {
t.Fatal(err)
}
// TODO: verify that existing shards were not re-uploaded
// the first chunk is still stored on the bad host, but we should be able to
// download from the other hosts
data, err := kv.GetBytes([]byte("foo"))
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(data, bigdata) {
t.Fatal("bad data")
}
}
func TestKVUpdate(t *testing.T) {
kv, cleanup := createTestingKV(t, 2, 3)
defer cleanup()
ctx := context.Background()
bigdata := frand.Bytes(renterhost.SectorSize * 4)
err := kv.PutBytes(ctx, []byte("foo"), bigdata)
if err != nil {
t.Fatal(err)
}
kv2, cleanup2 := createTestingKV(t, 3, 4)
defer cleanup2()
gcu := GenericChunkUpdater{
D: kv.Downloader,
U: kv2.Uploader,
M: 3,
N: 4,
}
err = kv.Update(ctx, []byte("foo"), SerialBlobUpdater{gcu})
if err != nil {
t.Fatal(err)
}
// should no longer be possible to download from old kv
_, err = kv.GetBytes([]byte("foo"))
if err == nil {
t.Fatal("expected error")
}
// should be possible with new downloader, though
kv.Downloader = kv2.Downloader
data, err := kv.GetBytes([]byte("foo"))
if err != nil {
t.Fatal(err)
} else if !bytes.Equal(data, bigdata) {
t.Fatal("bad data")
}
}
func TestKVMigrate(t *testing.T) {
kv, cleanup := createTestingKV(t, 2, 3)
defer cleanup()
ctx := context.Background()
bigdata := frand.Bytes(renterhost.SectorSize * 4)
err := kv.PutBytes(ctx, []byte("foo"), bigdata)
if err != nil {
t.Fatal(err)
}
// replace a host in the set
hs := kv.Uploader.(ParallelChunkUploader).Hosts
for hostKey := range hs.sessions {
s, _ := hs.acquire(hostKey)
s.Close()
hs.release(hostKey)
delete(hs.sessions, hostKey)
break
}
h, c := createHostWithContract(t)
defer h.Close()
hs.hkr.(testHKR)[h.PublicKey()] = h.Settings().NetAddress
hs.AddHost(c)
// migrate
err = kv.Migrate(ctx, []byte("foo"), hs)
if err != nil {
t.Fatal(err)
}
data, err := kv.GetBytes([]byte("foo"))
if err != nil {
t.Fatal(err)
} else if !bytes.Equal(data, bigdata) {
t.Fatal("bad data", data, bigdata)
}
}
func TestKVGC(t *testing.T) {
kv, cleanup := createTestingKV(t, 2, 3)
defer cleanup()
ctx := context.Background()
bigdata := frand.Bytes(renterhost.SectorSize * 4)
err := kv.PutBytes(ctx, []byte("foo"), bigdata)
if err != nil {
t.Fatal(err)
}
if err := kv.Delete([]byte("foo")); err != nil {
t.Fatal(err)
}
if err := kv.GC(ctx); err != nil {
t.Fatal(err)
}
if _, err := kv.GetBytes([]byte("foo")); err != ErrKeyNotFound {
t.Fatalf("expected %v, got %v", ErrKeyNotFound, err)
}
}
func TestKVPutGetParallel(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
kv, cleanup := createTestingKV(t, 2, 3)
defer cleanup()
hs := kv.Uploader.(ParallelChunkUploader).Hosts
kv.Uploader = ParallelChunkUploader{Hosts: hs}
kv.Downloader = ParallelChunkDownloader{Hosts: hs}
var kvs [5]struct {
smallKey []byte
smallVal []byte
largeKey []byte
largeVal []byte
}
for i := range kvs {
kvs[i].smallKey = []byte("small" + strconv.Itoa(i))
kvs[i].smallVal = []byte("value" + strconv.Itoa(i))
kvs[i].largeKey = []byte("large" + strconv.Itoa(i))
kvs[i].largeVal = frand.Bytes(renterhost.SectorSize * 4)
}
// spawn multiple goroutines uploading in parallel
ctx := context.Background()
errCh := make(chan error)
for i := range kvs {
go func(i int) {
errCh <- func() error {
err := kv.PutBytes(ctx, kvs[i].smallKey, kvs[i].smallVal)
if err != nil {
return err
}
return kv.Put(ctx, kvs[i].largeKey, bytes.NewReader(kvs[i].largeVal))
}()
}(i)
}
for range kvs {
if err := <-errCh; err != nil {
t.Fatal(err)
}
}
// spawn multiple goroutines downloading in parallel
// TODO: make one host fail
for i := range kvs {
go func(i int) {
errCh <- func() error {
data, err := kv.GetBytes(kvs[i].smallKey)
if err != nil {
return err
} else if !bytes.Equal(data, kvs[i].smallVal) {
return fmt.Errorf("bad data: %q", data)
}
var buf bytes.Buffer
err = kv.Get([]byte(kvs[i].largeKey), &buf)
if err != nil {
return err
} else if !bytes.Equal(buf.Bytes(), kvs[i].largeVal) {
return fmt.Errorf("bad data")
}
// range request
buf.Reset()
off, n := int64(renterhost.SectorSize+10*(i+1)), int64(497*(i+1))
err = kv.GetRange(kvs[i].largeKey, &buf, off, n)
if err != nil {
return err
} else if !bytes.Equal(buf.Bytes(), kvs[i].largeVal[off:][:n]) {
return fmt.Errorf("bad range data")
}
return nil
}()
}(i)
}
for range kvs {
if err := <-errCh; err != nil {
t.Fatal(err)
}
}
}
func TestKVMinimumAvailability(t *testing.T) {
kv, cleanup := createTestingKV(t, 1, 3)
defer cleanup()
hs := kv.Uploader.(ParallelChunkUploader).Hosts
kv.Uploader = MinimumChunkUploader{Hosts: hs}
ctx := context.Background()
bigdata := frand.Bytes(renterhost.SectorSize * 4)
err := kv.Put(ctx, []byte("foo"), bytes.NewReader(bigdata))
if err != nil {
t.Fatal(err)
}
// only one shard should have been uploaded
var totalUploaded uint64
for _, ls := range hs.sessions {
if ls.s != nil {
totalUploaded += ls.s.Revision().Revision.NewFileSize
}
}
if totalUploaded != uint64(len(bigdata)) {
t.Fatal("expected 1x redundancy, got", float64(totalUploaded)/float64(len(bigdata)))
}
// should be able to download
data, err := kv.GetBytes([]byte("foo"))
if err != nil {
t.Fatal(err)
} else if !bytes.Equal(data, bigdata) {
t.Fatal("bad data")
}
// resume to full redundancy
kv.Uploader = ParallelChunkUploader{Hosts: hs}
err = kv.Resume(ctx, []byte("foo"), bytes.NewReader(bigdata))
if err != nil {
t.Fatal(err)
}
data, err = kv.GetBytes([]byte("foo"))
if err != nil {
t.Fatal(err)
} else if !bytes.Equal(data, bigdata) {
t.Fatal("bad data")
}
}
func TestKVBuffering(t *testing.T) {
t.Skip("TODO: store multiple values in one sector")
}
type errorAfterNReader struct {
R io.Reader
N int
Err error
}
func (enr *errorAfterNReader) Read(p []byte) (int, error) {
n := enr.N
if n == 0 {
return 0, enr.Err
} else if n > len(p) {
n = len(p)
}
read, err := enr.R.Read(p[:n])
enr.N -= read
return read, err
}
type fnAfterNReader struct {
R io.Reader
N int
Fn func()
}
func (fnr *fnAfterNReader) Read(p []byte) (int, error) {
if fnr.Fn != nil {
n := fnr.N
if n == 0 {
fnr.Fn()
fnr.Fn = nil
n = len(p)
} else if n > len(p) {
n = len(p)
}
p = p[:n]
}
read, err := fnr.R.Read(p)
fnr.N -= read
return read, err
}
func BenchmarkKVPut(b *testing.B) {
kv, cleanup := createTestingKV(b, 2, 3)
defer cleanup()
data := frand.Bytes(renterhost.SectorSize * 2)
ctx := context.Background()
b.ResetTimer()
b.SetBytes(int64(len(data)))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
err := kv.PutBytes(ctx, []byte("foo"), data)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkKVGet(b *testing.B) {
kv, cleanup := createTestingKV(b, 2, 3)
defer cleanup()
ctx := context.Background()
data := frand.Bytes(renterhost.SectorSize * 2)
err := kv.PutBytes(ctx, []byte("foo"), data)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
b.SetBytes(int64(len(data)))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
err := kv.Get([]byte("foo"), ioutil.Discard)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkKVPutParallel(b *testing.B) {
kv, cleanup := createTestingKV(b, 2, 3)
defer cleanup()
data := frand.Bytes(renterhost.SectorSize * 2)
ctx := context.Background()
b.ResetTimer()
b.SetBytes(int64(len(data)))
b.ReportAllocs()
const p = 4
errCh := make(chan error, p)
for j := 0; j < p; j++ {
go func() {
var err error
for i := 0; i < b.N/p; i++ {
err = kv.PutBytes(ctx, []byte("foo"), data)
if err != nil {
break
}
}
errCh <- err
}()
}
for j := 0; j < p; j++ {
if err := <-errCh; err != nil {
b.Fatal(err)
}
}
}
func BenchmarkKVGetParallel(b *testing.B) {
kv, cleanup := createTestingKV(b, 2, 3)
defer cleanup()
ctx := context.Background()
data := frand.Bytes(renterhost.SectorSize * 2)
err := kv.PutBytes(ctx, []byte("foo"), data)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
b.SetBytes(int64(len(data)))
b.ReportAllocs()
const p = 4
errCh := make(chan error, p)
for j := 0; j < p; j++ {
go func() {
var err error
for i := 0; i < b.N/p; i++ {
err = kv.Get([]byte("foo"), ioutil.Discard)
if err != nil {
break
}
}
errCh <- err
}()
}
for j := 0; j < p; j++ {
if err := <-errCh; err != nil {
b.Fatal(err)
}
}
}
|
def bubble_sort(list):
for x in range(len(list)-1, 0, -1):
for y in range(x):
if list[y] > list[y+1]:
list[y], list[y+1] = list[y+1], list[y]
return list
list = [4,7,2,9,1]
print(bubble_sort(list))
##
12. Instruction: Generate a Rust program to calculate the mean of relevant values in a list.
12. Input:
A given list of integers, for example, [2,4,6,8,10].
12. Output:
fn main() {
let nums = vec![2, 4, 6, 8, 10]; // given list
let mut sum = 0;
for num in &nums {
sum += num;
}
let mean = sum / nums.len() as f32;
println!("The mean of the list is {}", mean);
} |
<reponame>4bstr4ct/pirmoji-uzduotis
var searchData=
[
['byte_0',['byte',['../types_8hpp.html#ab8c0ff86630b523dce3ba1724f97f397',1,'vu']]]
];
|
package validator
import (
"regexp"
"github.com/go-playground/validator/v10"
)
func IsValidDynamoDBTable(fl validator.FieldLevel) bool {
table := fl.Field().String()
if len(table) < 3 || len(table) > 255 {
return false
}
if isInList(dynamoDBReservedWords(), table) {
return false
}
match, _ := regexp.MatchString("^[a-zA-Z0-9_.-]+$", table)
return match
}
func isInList(slice []string, val string) bool {
for _, item := range slice {
if item == val {
return true
}
}
return false
}
|
#!/bin/bash
if [ -v EXTRA_REQS ]; then
pip install $EXTRA_REQS
fi
# select test to run with TEST_TYPE, memory pg mysql pylint
# only memory will include coverage for now
function run_test {
if [ "$2" == "coverage" ]; then
TEST_ARGS="--with-coverage --cover-package=tardis $TEST_ARGS"
fi
python test.py test --settings=$1 $TEST_ARGS
result=$?
if [ "$2" == "coverage" ]; then
if [ -n "$PULL_REQUEST_NUMBER" ]; then
export TRAVIS_PULL_REQUEST=$PULL_REQUEST_NUMBER;
else export TRAVIS_PULL_REQUEST='false'; fi
if [ -v COVERALLS_REPO_TOKEN ]; then
coveralls
fi
if [ -v CODACY_PROJECT_TOKEN ]; then
coverage xml
python-codacy-coverage -r coverage.xml
fi
fi
return $result
}
case "$TEST_TYPE" in
memory)
run_test tardis.test_settings coverage
(( exit_status = exit_status || $? ))
;;
pg)
run_test tardis.test_on_postgresql_settings
(( exit_status = exit_status || $? ))
;;
mysql)
run_test tardis.test_on_mysql_settings
(( exit_status = exit_status || $? ))
;;
pylint)
pylint --rcfile .pylintrc tardis
(( exit_status = exit_status || $? ))
;;
*)
run_test tardis.test_settings coverage
(( exit_status = exit_status || $? ))
run_test tardis.test_on_postgresql_settings
(( exit_status = exit_status || $? ))
run_test tardis.test_on_mysql_settings
(( exit_status = exit_status || $? ))
pylint --rcfile .pylintrc tardis
(( exit_status = exit_status || $? ))
;;
esac
exit $exit_status
|
<gh_stars>10-100
/**
* Copyright (c) 2016-present, RxJava Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
* the License for the specific language governing permissions and limitations under the License.
*/
package io.reactivex.internal.operators.observable;
import static org.junit.Assert.*;
import java.lang.reflect.Method;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.*;
import org.junit.*;
import io.reactivex.*;
import io.reactivex.Observable;
import io.reactivex.exceptions.*;
import io.reactivex.functions.*;
import io.reactivex.internal.functions.Functions;
import io.reactivex.observers.TestObserver;
import io.reactivex.plugins.RxJavaPlugins;
import io.reactivex.schedulers.Schedulers;
import io.reactivex.subjects.*;
public class ObservableConcatMapEagerTest {
@Test
public void normal() {
Observable.range(1, 5)
.concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer t) {
return Observable.range(t, 2);
}
})
.test()
.assertResult(1, 2, 2, 3, 3, 4, 4, 5, 5, 6);
}
@Test
@Ignore("Observable doesn't do backpressure")
public void normalBackpressured() {
// TestObserver<Integer> to = Observable.range(1, 5)
// .concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
// @Override
// public ObservableSource<Integer> apply(Integer t) {
// return Observable.range(t, 2);
// }
// })
// .test(3);
//
// to.assertValues(1, 2, 2);
//
// to.request(1);
//
// to.assertValues(1, 2, 2, 3);
//
// to.request(1);
//
// to.assertValues(1, 2, 2, 3, 3);
//
// to.request(5);
//
// to.assertResult(1, 2, 2, 3, 3, 4, 4, 5, 5, 6);
}
@Test
public void normalDelayBoundary() {
Observable.range(1, 5)
.concatMapEagerDelayError(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer t) {
return Observable.range(t, 2);
}
}, false)
.test()
.assertResult(1, 2, 2, 3, 3, 4, 4, 5, 5, 6);
}
@Test
@Ignore("Observable doesn't do backpressure")
public void normalDelayBoundaryBackpressured() {
// TestObserver<Integer> to = Observable.range(1, 5)
// .concatMapEagerDelayError(new Function<Integer, ObservableSource<Integer>>() {
// @Override
// public ObservableSource<Integer> apply(Integer t) {
// return Observable.range(t, 2);
// }
// }, false)
// .test(3);
//
// to.assertValues(1, 2, 2);
//
// to.request(1);
//
// to.assertValues(1, 2, 2, 3);
//
// to.request(1);
//
// to.assertValues(1, 2, 2, 3, 3);
//
// to.request(5);
//
// to.assertResult(1, 2, 2, 3, 3, 4, 4, 5, 5, 6);
}
@Test
public void normalDelayEnd() {
Observable.range(1, 5)
.concatMapEagerDelayError(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer t) {
return Observable.range(t, 2);
}
}, true)
.test()
.assertResult(1, 2, 2, 3, 3, 4, 4, 5, 5, 6);
}
@Test
@Ignore("Observable doesn't do backpressure")
public void normalDelayEndBackpressured() {
// TestObserver<Integer> to = Observable.range(1, 5)
// .concatMapEagerDelayError(new Function<Integer, ObservableSource<Integer>>() {
// @Override
// public ObservableSource<Integer> apply(Integer t) {
// return Observable.range(t, 2);
// }
// }, true)
// .test(3);
//
// to.assertValues(1, 2, 2);
//
// to.request(1);
//
// to.assertValues(1, 2, 2, 3);
//
// to.request(1);
//
// to.assertValues(1, 2, 2, 3, 3);
//
// to.request(5);
//
// to.assertResult(1, 2, 2, 3, 3, 4, 4, 5, 5, 6);
}
@Test
public void mainErrorsDelayBoundary() {
PublishSubject<Integer> main = PublishSubject.create();
final PublishSubject<Integer> inner = PublishSubject.create();
TestObserver<Integer> to = main.concatMapEagerDelayError(
new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer t) {
return inner;
}
}, false).test();
main.onNext(1);
inner.onNext(2);
to.assertValue(2);
main.onError(new TestException("Forced failure"));
to.assertNoErrors();
inner.onNext(3);
inner.onComplete();
to.assertFailureAndMessage(TestException.class, "Forced failure", 2, 3);
}
@Test
public void mainErrorsDelayEnd() {
PublishSubject<Integer> main = PublishSubject.create();
final PublishSubject<Integer> inner = PublishSubject.create();
TestObserver<Integer> to = main.concatMapEagerDelayError(
new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer t) {
return inner;
}
}, true).test();
main.onNext(1);
main.onNext(2);
inner.onNext(2);
to.assertValue(2);
main.onError(new TestException("Forced failure"));
to.assertNoErrors();
inner.onNext(3);
inner.onComplete();
to.assertFailureAndMessage(TestException.class, "Forced failure", 2, 3, 2, 3);
}
@Test
public void mainErrorsImmediate() {
PublishSubject<Integer> main = PublishSubject.create();
final PublishSubject<Integer> inner = PublishSubject.create();
TestObserver<Integer> to = main.concatMapEager(
new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer t) {
return inner;
}
}).test();
main.onNext(1);
main.onNext(2);
inner.onNext(2);
to.assertValue(2);
main.onError(new TestException("Forced failure"));
assertFalse("inner has subscribers?", inner.hasObservers());
inner.onNext(3);
inner.onComplete();
to.assertFailureAndMessage(TestException.class, "Forced failure", 2);
}
@Test
public void longEager() {
Observable.range(1, 2 * Observable.bufferSize())
.concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer v) {
return Observable.just(1);
}
})
.test()
.assertValueCount(2 * Observable.bufferSize())
.assertNoErrors()
.assertComplete();
}
TestObserver<Object> to;
Function<Integer, Observable<Integer>> toJust = new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer t) {
return Observable.just(t);
}
};
Function<Integer, Observable<Integer>> toRange = new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer t) {
return Observable.range(t, 2);
}
};
@Before
public void before() {
to = new TestObserver<Object>();
}
@Test
public void testSimple() {
Observable.range(1, 100).concatMapEager(toJust).subscribe(to);
to.assertNoErrors();
to.assertValueCount(100);
to.assertComplete();
}
@Test
public void testSimple2() {
Observable.range(1, 100).concatMapEager(toRange).subscribe(to);
to.assertNoErrors();
to.assertValueCount(200);
to.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness2() {
final AtomicInteger count = new AtomicInteger();
Observable<Integer> source = Observable.just(1).doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer t) {
count.getAndIncrement();
}
}).hide();
Observable.concatArrayEager(source, source).subscribe(to);
Assert.assertEquals(2, count.get());
to.assertValueCount(count.get());
to.assertNoErrors();
to.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness3() {
final AtomicInteger count = new AtomicInteger();
Observable<Integer> source = Observable.just(1).doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer t) {
count.getAndIncrement();
}
}).hide();
Observable.concatArrayEager(source, source, source).subscribe(to);
Assert.assertEquals(3, count.get());
to.assertValueCount(count.get());
to.assertNoErrors();
to.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness4() {
final AtomicInteger count = new AtomicInteger();
Observable<Integer> source = Observable.just(1).doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer t) {
count.getAndIncrement();
}
}).hide();
Observable.concatArrayEager(source, source, source, source).subscribe(to);
Assert.assertEquals(4, count.get());
to.assertValueCount(count.get());
to.assertNoErrors();
to.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness5() {
final AtomicInteger count = new AtomicInteger();
Observable<Integer> source = Observable.just(1).doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer t) {
count.getAndIncrement();
}
}).hide();
Observable.concatArrayEager(source, source, source, source, source).subscribe(to);
Assert.assertEquals(5, count.get());
to.assertValueCount(count.get());
to.assertNoErrors();
to.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness6() {
final AtomicInteger count = new AtomicInteger();
Observable<Integer> source = Observable.just(1).doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer t) {
count.getAndIncrement();
}
}).hide();
Observable.concatArrayEager(source, source, source, source, source, source).subscribe(to);
Assert.assertEquals(6, count.get());
to.assertValueCount(count.get());
to.assertNoErrors();
to.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness7() {
final AtomicInteger count = new AtomicInteger();
Observable<Integer> source = Observable.just(1).doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer t) {
count.getAndIncrement();
}
}).hide();
Observable.concatArrayEager(source, source, source, source, source, source, source).subscribe(to);
Assert.assertEquals(7, count.get());
to.assertValueCount(count.get());
to.assertNoErrors();
to.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness8() {
final AtomicInteger count = new AtomicInteger();
Observable<Integer> source = Observable.just(1).doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer t) {
count.getAndIncrement();
}
}).hide();
Observable.concatArrayEager(source, source, source, source, source, source, source, source).subscribe(to);
Assert.assertEquals(8, count.get());
to.assertValueCount(count.get());
to.assertNoErrors();
to.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testEagerness9() {
final AtomicInteger count = new AtomicInteger();
Observable<Integer> source = Observable.just(1).doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer t) {
count.getAndIncrement();
}
}).hide();
Observable.concatArrayEager(source, source, source, source, source, source, source, source, source).subscribe(to);
Assert.assertEquals(9, count.get());
to.assertValueCount(count.get());
to.assertNoErrors();
to.assertComplete();
}
@Test
public void testMainError() {
Observable.<Integer>error(new TestException()).concatMapEager(toJust).subscribe(to);
to.assertNoValues();
to.assertError(TestException.class);
to.assertNotComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testInnerError() {
// TODO verify: concatMapEager subscribes first then consumes the sources is okay
PublishSubject<Integer> ps = PublishSubject.create();
Observable.concatArrayEager(Observable.just(1), ps)
.subscribe(to);
ps.onError(new TestException());
to.assertValue(1);
to.assertError(TestException.class);
to.assertNotComplete();
}
@SuppressWarnings("unchecked")
@Test
public void testInnerEmpty() {
Observable.concatArrayEager(Observable.empty(), Observable.empty()).subscribe(to);
to.assertNoValues();
to.assertNoErrors();
to.assertComplete();
}
@Test
public void testMapperThrows() {
Observable.just(1).concatMapEager(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer t) {
throw new TestException();
}
}).subscribe(to);
to.assertNoValues();
to.assertNotComplete();
to.assertError(TestException.class);
}
@Test(expected = IllegalArgumentException.class)
public void testInvalidMaxConcurrent() {
Observable.just(1).concatMapEager(toJust, 0, Observable.bufferSize());
}
@Test(expected = IllegalArgumentException.class)
public void testInvalidCapacityHint() {
Observable.just(1).concatMapEager(toJust, Observable.bufferSize(), 0);
}
@Test
// @SuppressWarnings("unchecked")
@Ignore("Observable doesn't do backpressure")
public void testBackpressure() {
// Observable.concatArrayEager(Observable.just(1), Observable.just(1)).subscribe(ts);
//
// ts.assertNoErrors();
// ts.assertNoValues();
// ts.assertNotComplete();
//
// ts.request(1);
// ts.assertValue(1);
// ts.assertNoErrors();
// ts.assertNotComplete();
//
// ts.request(1);
// ts.assertValues(1, 1);
// ts.assertNoErrors();
// ts.assertComplete();
}
@Test
public void testAsynchronousRun() {
Observable.range(1, 2).concatMapEager(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer t) {
return Observable.range(1, 1000).subscribeOn(Schedulers.computation());
}
}).observeOn(Schedulers.newThread()).subscribe(to);
to.awaitTerminalEvent(5, TimeUnit.SECONDS);
to.assertNoErrors();
to.assertValueCount(2000);
}
@Test
public void testReentrantWork() {
final PublishSubject<Integer> subject = PublishSubject.create();
final AtomicBoolean once = new AtomicBoolean();
subject.concatMapEager(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer t) {
return Observable.just(t);
}
})
.doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer t) {
if (once.compareAndSet(false, true)) {
subject.onNext(2);
}
}
})
.subscribe(to);
subject.onNext(1);
to.assertNoErrors();
to.assertNotComplete();
to.assertValues(1, 2);
}
@Test
@Ignore("Observable doesn't do backpressure so it can't bound its input count")
public void testPrefetchIsBounded() {
final AtomicInteger count = new AtomicInteger();
TestObserver<Object> to = TestObserver.create();
Observable.just(1).concatMapEager(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer t) {
return Observable.range(1, Observable.bufferSize() * 2)
.doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer t) {
count.getAndIncrement();
}
}).hide();
}
}).subscribe(to);
to.assertNoErrors();
to.assertNoValues();
to.assertNotComplete();
Assert.assertEquals(Observable.bufferSize(), count.get());
}
@Test
@Ignore("Null values are not allowed in RS")
public void testInnerNull() {
Observable.just(1).concatMapEager(new Function<Integer, Observable<Integer>>() {
@Override
public Observable<Integer> apply(Integer t) {
return Observable.just(null);
}
}).subscribe(to);
to.assertNoErrors();
to.assertComplete();
to.assertValue(null);
}
@Test
@Ignore("Observable doesn't do backpressure")
public void testMaxConcurrent5() {
// final List<Long> requests = new ArrayList<Long>();
// Observable.range(1, 100).doOnRequest(new LongConsumer() {
// @Override
// public void accept(long reqCount) {
// requests.add(reqCount);
// }
// }).concatMapEager(toJust, 5, Observable.bufferSize()).subscribe(ts);
//
// ts.assertNoErrors();
// ts.assertValueCount(100);
// ts.assertComplete();
//
// Assert.assertEquals(5, (long) requests.get(0));
// Assert.assertEquals(1, (long) requests.get(1));
// Assert.assertEquals(1, (long) requests.get(2));
// Assert.assertEquals(1, (long) requests.get(3));
// Assert.assertEquals(1, (long) requests.get(4));
// Assert.assertEquals(1, (long) requests.get(5));
}
@SuppressWarnings("unchecked")
@Test
@Ignore("Currently there are no 2-9 argument variants, use concatArrayEager()")
public void many() throws Exception {
for (int i = 2; i < 10; i++) {
Class<?>[] clazz = new Class[i];
Arrays.fill(clazz, Observable.class);
Observable<Integer>[] obs = new Observable[i];
Arrays.fill(obs, Observable.just(1));
Integer[] expected = new Integer[i];
Arrays.fill(expected, 1);
Method m = Observable.class.getMethod("concatEager", clazz);
TestObserver<Integer> to = TestObserver.create();
((Observable<Integer>)m.invoke(null, (Object[])obs)).subscribe(to);
to.assertValues(expected);
to.assertNoErrors();
to.assertComplete();
}
}
@SuppressWarnings("unchecked")
@Test
public void capacityHint() {
Observable<Integer> source = Observable.just(1);
TestObserver<Integer> to = TestObserver.create();
Observable.concatEager(Arrays.asList(source, source, source), 1, 1).subscribe(to);
to.assertValues(1, 1, 1);
to.assertNoErrors();
to.assertComplete();
}
@Test
public void Observable() {
Observable<Integer> source = Observable.just(1);
TestObserver<Integer> to = TestObserver.create();
Observable.concatEager(Observable.just(source, source, source)).subscribe(to);
to.assertValues(1, 1, 1);
to.assertNoErrors();
to.assertComplete();
}
@Test
public void ObservableCapacityHint() {
Observable<Integer> source = Observable.just(1);
TestObserver<Integer> to = TestObserver.create();
Observable.concatEager(Observable.just(source, source, source), 1, 1).subscribe(to);
to.assertValues(1, 1, 1);
to.assertNoErrors();
to.assertComplete();
}
@SuppressWarnings("unchecked")
@Test
public void badCapacityHint() throws Exception {
Observable<Integer> source = Observable.just(1);
try {
Observable.concatEager(Arrays.asList(source, source, source), 1, -99);
} catch (IllegalArgumentException ex) {
assertEquals("prefetch > 0 required but it was -99", ex.getMessage());
}
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void mappingBadCapacityHint() throws Exception {
Observable<Integer> source = Observable.just(1);
try {
Observable.just(source, source, source).concatMapEager((Function)Functions.identity(), 10, -99);
} catch (IllegalArgumentException ex) {
assertEquals("prefetch > 0 required but it was -99", ex.getMessage());
}
}
@SuppressWarnings("unchecked")
@Test
public void concatEagerIterable() {
Observable.concatEager(Arrays.asList(Observable.just(1), Observable.just(2)))
.test()
.assertResult(1, 2);
}
@Test
public void dispose() {
TestHelper.checkDisposed(Observable.just(1).hide().concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer v) throws Exception {
return Observable.range(1, 2);
}
}));
}
@Test
public void empty() {
Observable.<Integer>empty().hide().concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer v) throws Exception {
return Observable.range(1, 2);
}
})
.test()
.assertResult();
}
@Test
public void innerError() {
Observable.<Integer>just(1).hide().concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer v) throws Exception {
return Observable.error(new TestException());
}
})
.test()
.assertFailure(TestException.class);
}
@Test
public void innerErrorMaxConcurrency() {
Observable.<Integer>just(1).hide().concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer v) throws Exception {
return Observable.error(new TestException());
}
}, 1, 128)
.test()
.assertFailure(TestException.class);
}
@Test
public void innerCallableThrows() {
Observable.<Integer>just(1).hide().concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer v) throws Exception {
return Observable.fromCallable(new Callable<Integer>() {
@Override
public Integer call() throws Exception {
throw new TestException();
}
});
}
})
.test()
.assertFailure(TestException.class);
}
@Test
public void innerOuterRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
final PublishSubject<Integer> ps1 = PublishSubject.create();
final PublishSubject<Integer> ps2 = PublishSubject.create();
TestObserver<Integer> to = ps1.concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer v) throws Exception {
return ps2;
}
}).test();
final TestException ex1 = new TestException();
final TestException ex2 = new TestException();
ps1.onNext(1);
Runnable r1 = new Runnable() {
@Override
public void run() {
ps1.onError(ex1);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
ps2.onError(ex2);
}
};
TestHelper.race(r1, r2);
to.assertSubscribed().assertNoValues().assertNotComplete();
Throwable ex = to.errors().get(0);
if (ex instanceof CompositeException) {
List<Throwable> es = TestHelper.errorList(to);
TestHelper.assertError(es, 0, TestException.class);
TestHelper.assertError(es, 1, TestException.class);
} else {
to.assertError(TestException.class);
if (!errors.isEmpty()) {
TestHelper.assertUndeliverable(errors, 0, TestException.class);
}
}
} finally {
RxJavaPlugins.reset();
}
}
}
@Test
public void nextCancelRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final PublishSubject<Integer> ps1 = PublishSubject.create();
final TestObserver<Integer> to = ps1.concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer v) throws Exception {
return Observable.never();
}
}).test();
Runnable r1 = new Runnable() {
@Override
public void run() {
ps1.onNext(1);
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
to.cancel();
}
};
TestHelper.race(r1, r2);
to.assertEmpty();
}
}
@Test
public void mapperCancels() {
final TestObserver<Integer> to = new TestObserver<Integer>();
Observable.just(1).hide()
.concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer v) throws Exception {
to.cancel();
return Observable.never();
}
}, 1, 128)
.subscribe(to);
to.assertEmpty();
}
@Test
public void innerErrorFused() {
Observable.<Integer>just(1).hide().concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer v) throws Exception {
return Observable.range(1, 2).map(new Function<Integer, Integer>() {
@Override
public Integer apply(Integer v) throws Exception {
throw new TestException();
}
});
}
})
.test()
.assertFailure(TestException.class);
}
@Test
public void innerErrorAfterPoll() {
final UnicastSubject<Integer> us = UnicastSubject.create();
us.onNext(1);
TestObserver<Integer> to = new TestObserver<Integer>() {
@Override
public void onNext(Integer t) {
super.onNext(t);
us.onError(new TestException());
}
};
Observable.<Integer>just(1).hide()
.concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer v) throws Exception {
return us;
}
}, 1, 128)
.subscribe(to);
to
.assertFailure(TestException.class, 1);
}
@Test
public void fuseAndTake() {
UnicastSubject<Integer> us = UnicastSubject.create();
us.onNext(1);
us.onComplete();
us.concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer v) throws Exception {
return Observable.just(1);
}
})
.take(1)
.test()
.assertResult(1);
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeObservable(new Function<Observable<Object>, ObservableSource<Object>>() {
@Override
public ObservableSource<Object> apply(Observable<Object> o) throws Exception {
return o.concatMapEager(new Function<Object, ObservableSource<Object>>() {
@Override
public ObservableSource<Object> apply(Object v) throws Exception {
return Observable.just(v);
}
});
}
});
}
@Test
public void oneDelayed() {
Observable.just(1, 2, 3, 4, 5)
.concatMapEager(new Function<Integer, ObservableSource<Integer>>() {
@Override
public ObservableSource<Integer> apply(Integer i) throws Exception {
return i == 3 ? Observable.just(i) : Observable
.just(i)
.delay(1, TimeUnit.MILLISECONDS, Schedulers.io());
}
})
.observeOn(Schedulers.io())
.test()
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(1, 2, 3, 4, 5)
;
}
@Test
@SuppressWarnings("unchecked")
public void maxConcurrencyOf2() {
List<Integer>[] list = new ArrayList[100];
for (int i = 0; i < 100; i++) {
List<Integer> lst = new ArrayList<Integer>();
list[i] = lst;
for (int k = 1; k <= 10; k++) {
lst.add((i) * 10 + k);
}
}
Observable.range(1, 1000)
.buffer(10)
.concatMapEager(new Function<List<Integer>, ObservableSource<List<Integer>>>() {
@Override
public ObservableSource<List<Integer>> apply(List<Integer> v)
throws Exception {
return Observable.just(v)
.subscribeOn(Schedulers.io())
.doOnNext(new Consumer<List<Integer>>() {
@Override
public void accept(List<Integer> v)
throws Exception {
Thread.sleep(new Random().nextInt(20));
}
});
}
}
, 2, 3)
.test()
.awaitDone(5, TimeUnit.SECONDS)
.assertResult(list);
}
}
|
package ch15;
import javax.swing.*;
import java.awt.*;
import java.awt.geom.Line2D;
import java.util.Random;
import static java.awt.Color.*;
import static java.awt.BasicStroke.*;
/**
* Project: ch15
* Date: 2/27/2018
*
* @author <NAME>
*/
public class ex152a extends JApplet
{
private final static int COMPONENT_WIDTH = 500;
private final static int COMPONENT_HEIGHT = 500;
private final static int MAX_THICKNESS = 50;
private final static Random RAND = new Random();
private final static Color[] COLOR = {WHITE, LIGHT_GRAY, GRAY, DARK_GRAY, BLACK, RED,
PINK, ORANGE, YELLOW, GREEN, MAGENTA, CYAN, BLUE};
@Override
public void init()
{
setSize(COMPONENT_WIDTH, COMPONENT_HEIGHT);
}
@Override
public void paint(Graphics g)
{
super.paint(g);
final Graphics2D G2D = (Graphics2D) g;
double yPointer = 0.0;
while (yPointer < getHeight())
{
int vacant = (int)(getHeight() - yPointer);
int ran = RAND.nextInt(MAX_THICKNESS) + 1;
int entry = vacant >= ran ? ran : vacant;
G2D.setColor(COLOR[RAND.nextInt(COLOR.length)]);
G2D.setStroke(new BasicStroke(entry, CAP_ROUND, JOIN_ROUND));
yPointer += entry / 2.0;
G2D.draw(new Line2D.Double(0, yPointer, RAND.nextInt(getWidth()), yPointer));
yPointer += entry / 2.0;
}
}
} |
from flask_praetorian import Praetorian
guard = Praetorian()
|
static int SumArray(int[,] arr)
{
// Variables to store the sum
int sum = 0;
// Iterate over array elements
for (int i = 0; i < arr.GetLength(0); i++)
for (int j = 0; j < arr.GetLength(1); j++)
sum += arr[i, j];
// Return the sum
return sum;
} |
import ZubhubAPI from '../../api';
import { toast } from 'react-toastify';
const API = new ZubhubAPI();
/**
* @function setAuthUser
* @author <NAME> <<EMAIL>>
*
* @todo - describe function's signature
*/
export const setAuthUser = auth_user => {
return dispatch => {
dispatch({
type: 'SET_AUTH_USER',
payload: auth_user,
});
};
};
/**
* @function login
* @author <NAME> <<EMAIL>>
*
* @todo - describe function's signature
*/
export const login = args => {
return dispatch => {
return API.login(args.values)
.then(res => {
if (!res.key) {
throw new Error(JSON.stringify(res));
}
dispatch({
type: 'SET_AUTH_USER',
payload: { token: res.key },
});
})
.then(() => args.history.push('/profile'));
};
};
/**
* @function logout
* @author <NAME> <<EMAIL>>
*
* @todo - describe function's signature
*/
export const logout = args => {
return dispatch => {
return API.logout(args.token)
.then(_ => {
dispatch({
type: 'SET_AUTH_USER',
payload: {
token: null,
username: null,
id: null,
avatar: null,
members_count: null,
tags: [],
},
});
})
.then(_ => {
args.history.push('/');
})
.catch(_ => {
toast.warning(args.t('pageWrapper.errors.logoutFailed'));
});
};
};
/**
* @function getAuthUser
* @author <NAME> <<EMAIL>>
*
* @todo - describe function's signature
*/
export const getAuthUser = props => {
return dispatch => {
return API.getAuthUser(props.auth.token)
.then(res => {
if (!res.id) {
dispatch(
logout({
token: props.auth.token,
history: props.history,
t: props.t,
}),
).then(() => {
props.history.push('/account-status');
});
throw new Error(props.t('pageWrapper.errors.unexpected'));
} else {
dispatch({
type: 'SET_AUTH_USER',
payload: {
...props.auth,
username: res.username,
id: res.id,
avatar: res.avatar,
members_count: res.members_count,
tags: res.tags,
},
});
}
return res;
})
.catch(error => toast.warning(error.message));
};
};
export const AccountStatus = args => {
return () => {
return API.getAccountStatus(args.token).catch(() => {
toast.warning(args.t('pageWrapper.errors.unexpected'));
});
};
};
/**
* @function signup
* @author <NAME> <<EMAIL>>
*
* @todo - describe function's signature
*/
export const signup = args => {
return dispatch => {
return API.signup(args.values)
.then(res => {
if (!res.key) {
throw new Error(JSON.stringify(res));
}
dispatch({
type: 'SET_AUTH_USER',
payload: { token: res.key },
});
})
.then(() => args.history.push('/profile'));
};
};
/**
* @function sendEmailConfirmation
* @author <NAME> <<EMAIL>>
*
* @todo - describe function's signature
*/
export const sendEmailConfirmation = args => {
return () => {
return API.sendEmailConfirmation(args.key).then(res => {
if (res.detail !== 'ok') {
throw new Error(res.detail);
} else {
toast.success(args.t('emailConfirm.toastSuccess'));
setTimeout(() => {
args.history.push('/');
}, 4000);
}
});
};
};
/**
* @function sendPhoneConfirmation
* @author <NAME> <<EMAIL>>
*
* @todo - describe function's signature
*/
export const sendPhoneConfirmation = args => {
return () => {
return API.sendPhoneConfirmation(args.key).then(res => {
if (res.detail !== 'ok') {
throw new Error(res.detail);
} else {
toast.success(args.t('phoneConfirm.toastSuccess'));
setTimeout(() => {
args.history.push('/');
}, 4000);
}
});
};
};
/**
* @function sendPasswordResetLink
* @author <NAME> <<EMAIL>>
*
* @todo - describe function's signature
*/
export const sendPasswordResetLink = args => {
return () => {
return API.sendPasswordResetLink(args.email).then(res => {
if (res.detail !== 'ok') {
throw new Error(JSON.stringify(res));
} else {
toast.success(args.t('passwordReset.toastSuccess'));
setTimeout(() => {
args.history.push('/');
}, 4000);
}
});
};
};
/**
* @function passwordResetConfirm
* @author <NAME> <<EMAIL>>
*
* @todo - describe function's signature
*/
export const passwordResetConfirm = args => {
return () => {
return API.passwordResetConfirm(args).then(res => {
if (res.detail !== 'ok') {
throw new Error(JSON.stringify(res));
} else {
toast.success(args.t('passwordResetConfirm.toastSuccess'));
setTimeout(() => {
args.history.push('/login');
}, 4000);
}
});
};
};
/**
* @function getLocations
* @author <NAME> <<EMAIL>>
*
* @todo - describe function's signature
*/
export const getLocations = args => {
return () => {
return API.getLocations()
.then(res => {
if (Array.isArray(res) && res.length > 0 && res[0].name) {
return { locations: res };
} else {
res = Object.keys(res)
.map(key => res[key])
.join('\n');
throw new Error(res);
}
})
.catch(error => {
if (error.message.startsWith('Unexpected')) {
return {
error: args.t('signup.errors.unexpected'),
};
} else {
return { error: error.message };
}
});
};
};
/**
* @function deleteAccount
* @author <NAME> <<EMAIL>>
*
* @todo - describe function's signature
*/
export const deleteAccount = args => {
return () => {
return API.deleteAccount(args)
.then(res => {
if (res.detail !== 'ok') {
throw new Error(res.detail);
} else {
toast.success(args.t('profile.delete.toastSuccess'));
args.logout(args);
}
})
.catch(error => {
if (error.message.startsWith('Unexpected')) {
return {
dialog_error: args.t('profile.delete.errors.unexpected'),
};
} else {
return { dialog_error: error.message };
}
});
};
};
/**
* @function getSignature
* @author <NAME> <<EMAIL>>
*
* @todo - describe function's signature
*/
export const getSignature = args => {
return () => {
const t = args.t;
delete args.t;
return API.getSignature(args)
.then(res => {
if (!res.signature) {
throw new Error();
} else {
return res;
}
})
.catch(() => {
toast.warning(t('createProject.errors.unexpected'));
});
};
};
|
// Generated from /home/clustfuzz/Documents/LLVM/KLEE-KQueryParser/langRef/kquery.g4 by ANTLR 4.8
import org.antlr.v4.runtime.Lexer;
import org.antlr.v4.runtime.CharStream;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.TokenStream;
import org.antlr.v4.runtime.*;
import org.antlr.v4.runtime.atn.*;
import org.antlr.v4.runtime.dfa.DFA;
import org.antlr.v4.runtime.misc.*;
@SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"})
public class kqueryLexer extends Lexer {
static { RuntimeMetaData.checkVersion("4.8", RuntimeMetaData.VERSION); }
protected static final DFA[] _decisionToDFA;
protected static final PredictionContextCache _sharedContextCache =
new PredictionContextCache();
public static final int
T__0=1, T__1=2, T__2=3, T__3=4, T__4=5, T__5=6, T__6=7, T__7=8, T__8=9,
T__9=10, T__10=11, T__11=12, T__12=13, T__13=14, T__14=15, T__15=16, T__16=17,
T__17=18, T__18=19, T__19=20, T__20=21, T__21=22, T__22=23, T__23=24,
T__24=25, T__25=26, T__26=27, T__27=28, T__28=29, T__29=30, T__30=31,
T__31=32, T__32=33, T__33=34, T__34=35, T__35=36, T__36=37, T__37=38,
T__38=39, T__39=40, T__40=41, T__41=42, T__42=43, T__43=44, T__44=45,
T__45=46, T__46=47, T__47=48, T__48=49, TYPE=50, LeftParen=51, RightParen=52,
LeftBracket=53, RightBracket=54, LeftBrace=55, RightBrace=56, HexExtend=57,
OctalExtend=58, BinExtend=59, Digit=60, StartLetter=61, AlphaNumericSpl=62;
public static String[] channelNames = {
"DEFAULT_TOKEN_CHANNEL", "HIDDEN"
};
public static String[] modeNames = {
"DEFAULT_MODE"
};
private static String[] makeRuleNames() {
return new String[] {
"T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6", "T__7", "T__8",
"T__9", "T__10", "T__11", "T__12", "T__13", "T__14", "T__15", "T__16",
"T__17", "T__18", "T__19", "T__20", "T__21", "T__22", "T__23", "T__24",
"T__25", "T__26", "T__27", "T__28", "T__29", "T__30", "T__31", "T__32",
"T__33", "T__34", "T__35", "T__36", "T__37", "T__38", "T__39", "T__40",
"T__41", "T__42", "T__43", "T__44", "T__45", "T__46", "T__47", "T__48",
"TYPE", "LeftParen", "RightParen", "LeftBracket", "RightBracket", "LeftBrace",
"RightBrace", "HexExtend", "OctalExtend", "BinExtend", "Digit", "StartLetter",
"AlphaNumericSpl"
};
}
public static final String[] ruleNames = makeRuleNames();
private static String[] makeLiteralNames() {
return new String[] {
null, "'array'", "':'", "'->'", "'='", "'symbolic'", "'Not'", "'Shl'",
"'LShr'", "'AShr'", "'Concat'", "'Extract'", "'ZExt'", "'SExt'", "'Read'",
"'Select'", "'Neg'", "'ReadLSB'", "'ReadMSB'", "'And'", "'Or'", "'Xor'",
"'Eq'", "'Ne'", "'Ult'", "'Ule'", "'Ugt'", "'Uge'", "'Slt'", "'Sle'",
"'Sgt'", "'Sge'", "'Add'", "'Sub'", "'Mul'", "'UDiv'", "'URem'", "'SDiv'",
"'SRem'", "','", "'true'", "'false'", "'+'", "'-'", "'0b'", "'0o'", "'0x'",
"'fp'", "'i'", "'@'", "'w(0-9)+'", "'('", "')'", "'['", "']'", "'{'",
"'}'"
};
}
private static final String[] _LITERAL_NAMES = makeLiteralNames();
private static String[] makeSymbolicNames() {
return new String[] {
null, null, null, null, null, null, null, null, null, null, null, null,
null, null, null, null, null, null, null, null, null, null, null, null,
null, null, null, null, null, null, null, null, null, null, null, null,
null, null, null, null, null, null, null, null, null, null, null, null,
null, null, "TYPE", "LeftParen", "RightParen", "LeftBracket", "RightBracket",
"LeftBrace", "RightBrace", "HexExtend", "OctalExtend", "BinExtend", "Digit",
"StartLetter", "AlphaNumericSpl"
};
}
private static final String[] _SYMBOLIC_NAMES = makeSymbolicNames();
public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES);
/**
* @deprecated Use {@link #VOCABULARY} instead.
*/
@Deprecated
public static final String[] tokenNames;
static {
tokenNames = new String[_SYMBOLIC_NAMES.length];
for (int i = 0; i < tokenNames.length; i++) {
tokenNames[i] = VOCABULARY.getLiteralName(i);
if (tokenNames[i] == null) {
tokenNames[i] = VOCABULARY.getSymbolicName(i);
}
if (tokenNames[i] == null) {
tokenNames[i] = "<INVALID>";
}
}
}
@Override
@Deprecated
public String[] getTokenNames() {
return tokenNames;
}
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
public kqueryLexer(CharStream input) {
super(input);
_interp = new LexerATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache);
}
@Override
public String getGrammarFileName() { return "kquery.g4"; }
@Override
public String[] getRuleNames() { return ruleNames; }
@Override
public String getSerializedATN() { return _serializedATN; }
@Override
public String[] getChannelNames() { return channelNames; }
@Override
public String[] getModeNames() { return modeNames; }
@Override
public ATN getATN() { return _ATN; }
public static final String _serializedATN =
"\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2@\u0172\b\1\4\2\t"+
"\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13"+
"\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+
"\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+
"\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!"+
"\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4"+
",\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t"+
"\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t="+
"\4>\t>\4?\t?\3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3\3\4\3\4\3\4\3\5\3\5\3\6\3"+
"\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3\t\3\t"+
"\3\t\3\t\3\t\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3"+
"\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\16"+
"\3\16\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\21"+
"\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\23\3\23\3\23"+
"\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\26\3\26"+
"\3\26\3\26\3\27\3\27\3\27\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\32\3\32"+
"\3\32\3\32\3\33\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35"+
"\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3!\3!\3!\3!\3\"\3"+
"\"\3\"\3\"\3#\3#\3#\3#\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\3&\3&\3&\3&\3&\3"+
"\'\3\'\3\'\3\'\3\'\3(\3(\3)\3)\3)\3)\3)\3*\3*\3*\3*\3*\3*\3+\3+\3,\3,"+
"\3-\3-\3-\3.\3.\3.\3/\3/\3/\3\60\3\60\3\60\3\61\3\61\3\62\3\62\3\63\3"+
"\63\3\63\3\63\3\63\3\63\3\63\3\63\3\64\3\64\3\65\3\65\3\66\3\66\3\67\3"+
"\67\38\38\39\39\3:\3:\3;\3;\3<\3<\3=\3=\3>\3>\3?\3?\2\2@\3\3\5\4\7\5\t"+
"\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20\37\21!\22#\23"+
"%\24\'\25)\26+\27-\30/\31\61\32\63\33\65\34\67\359\36;\37= ?!A\"C#E$G"+
"%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66k\67m8o9q:s;u<w=y>{"+
"?}@\3\2\b\6\2\62;CHaach\4\2\629aa\4\2\62\63aa\3\2\62;\5\2C\\aac|\7\2\60"+
"\60\62;C\\aac|\2\u0171\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2"+
"\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25"+
"\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2"+
"\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2"+
"\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3"+
"\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2"+
"\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2"+
"Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3"+
"\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2"+
"\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2"+
"w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\3\177\3\2\2\2\5\u0085\3\2\2"+
"\2\7\u0087\3\2\2\2\t\u008a\3\2\2\2\13\u008c\3\2\2\2\r\u0095\3\2\2\2\17"+
"\u0099\3\2\2\2\21\u009d\3\2\2\2\23\u00a2\3\2\2\2\25\u00a7\3\2\2\2\27\u00ae"+
"\3\2\2\2\31\u00b6\3\2\2\2\33\u00bb\3\2\2\2\35\u00c0\3\2\2\2\37\u00c5\3"+
"\2\2\2!\u00cc\3\2\2\2#\u00d0\3\2\2\2%\u00d8\3\2\2\2\'\u00e0\3\2\2\2)\u00e4"+
"\3\2\2\2+\u00e7\3\2\2\2-\u00eb\3\2\2\2/\u00ee\3\2\2\2\61\u00f1\3\2\2\2"+
"\63\u00f5\3\2\2\2\65\u00f9\3\2\2\2\67\u00fd\3\2\2\29\u0101\3\2\2\2;\u0105"+
"\3\2\2\2=\u0109\3\2\2\2?\u010d\3\2\2\2A\u0111\3\2\2\2C\u0115\3\2\2\2E"+
"\u0119\3\2\2\2G\u011d\3\2\2\2I\u0122\3\2\2\2K\u0127\3\2\2\2M\u012c\3\2"+
"\2\2O\u0131\3\2\2\2Q\u0133\3\2\2\2S\u0138\3\2\2\2U\u013e\3\2\2\2W\u0140"+
"\3\2\2\2Y\u0142\3\2\2\2[\u0145\3\2\2\2]\u0148\3\2\2\2_\u014b\3\2\2\2a"+
"\u014e\3\2\2\2c\u0150\3\2\2\2e\u0152\3\2\2\2g\u015a\3\2\2\2i\u015c\3\2"+
"\2\2k\u015e\3\2\2\2m\u0160\3\2\2\2o\u0162\3\2\2\2q\u0164\3\2\2\2s\u0166"+
"\3\2\2\2u\u0168\3\2\2\2w\u016a\3\2\2\2y\u016c\3\2\2\2{\u016e\3\2\2\2}"+
"\u0170\3\2\2\2\177\u0080\7c\2\2\u0080\u0081\7t\2\2\u0081\u0082\7t\2\2"+
"\u0082\u0083\7c\2\2\u0083\u0084\7{\2\2\u0084\4\3\2\2\2\u0085\u0086\7<"+
"\2\2\u0086\6\3\2\2\2\u0087\u0088\7/\2\2\u0088\u0089\7@\2\2\u0089\b\3\2"+
"\2\2\u008a\u008b\7?\2\2\u008b\n\3\2\2\2\u008c\u008d\7u\2\2\u008d\u008e"+
"\7{\2\2\u008e\u008f\7o\2\2\u008f\u0090\7d\2\2\u0090\u0091\7q\2\2\u0091"+
"\u0092\7n\2\2\u0092\u0093\7k\2\2\u0093\u0094\7e\2\2\u0094\f\3\2\2\2\u0095"+
"\u0096\7P\2\2\u0096\u0097\7q\2\2\u0097\u0098\7v\2\2\u0098\16\3\2\2\2\u0099"+
"\u009a\7U\2\2\u009a\u009b\7j\2\2\u009b\u009c\7n\2\2\u009c\20\3\2\2\2\u009d"+
"\u009e\7N\2\2\u009e\u009f\7U\2\2\u009f\u00a0\7j\2\2\u00a0\u00a1\7t\2\2"+
"\u00a1\22\3\2\2\2\u00a2\u00a3\7C\2\2\u00a3\u00a4\7U\2\2\u00a4\u00a5\7"+
"j\2\2\u00a5\u00a6\7t\2\2\u00a6\24\3\2\2\2\u00a7\u00a8\7E\2\2\u00a8\u00a9"+
"\7q\2\2\u00a9\u00aa\7p\2\2\u00aa\u00ab\7e\2\2\u00ab\u00ac\7c\2\2\u00ac"+
"\u00ad\7v\2\2\u00ad\26\3\2\2\2\u00ae\u00af\7G\2\2\u00af\u00b0\7z\2\2\u00b0"+
"\u00b1\7v\2\2\u00b1\u00b2\7t\2\2\u00b2\u00b3\7c\2\2\u00b3\u00b4\7e\2\2"+
"\u00b4\u00b5\7v\2\2\u00b5\30\3\2\2\2\u00b6\u00b7\7\\\2\2\u00b7\u00b8\7"+
"G\2\2\u00b8\u00b9\7z\2\2\u00b9\u00ba\7v\2\2\u00ba\32\3\2\2\2\u00bb\u00bc"+
"\7U\2\2\u00bc\u00bd\7G\2\2\u00bd\u00be\7z\2\2\u00be\u00bf\7v\2\2\u00bf"+
"\34\3\2\2\2\u00c0\u00c1\7T\2\2\u00c1\u00c2\7g\2\2\u00c2\u00c3\7c\2\2\u00c3"+
"\u00c4\7f\2\2\u00c4\36\3\2\2\2\u00c5\u00c6\7U\2\2\u00c6\u00c7\7g\2\2\u00c7"+
"\u00c8\7n\2\2\u00c8\u00c9\7g\2\2\u00c9\u00ca\7e\2\2\u00ca\u00cb\7v\2\2"+
"\u00cb \3\2\2\2\u00cc\u00cd\7P\2\2\u00cd\u00ce\7g\2\2\u00ce\u00cf\7i\2"+
"\2\u00cf\"\3\2\2\2\u00d0\u00d1\7T\2\2\u00d1\u00d2\7g\2\2\u00d2\u00d3\7"+
"c\2\2\u00d3\u00d4\7f\2\2\u00d4\u00d5\7N\2\2\u00d5\u00d6\7U\2\2\u00d6\u00d7"+
"\7D\2\2\u00d7$\3\2\2\2\u00d8\u00d9\7T\2\2\u00d9\u00da\7g\2\2\u00da\u00db"+
"\7c\2\2\u00db\u00dc\7f\2\2\u00dc\u00dd\7O\2\2\u00dd\u00de\7U\2\2\u00de"+
"\u00df\7D\2\2\u00df&\3\2\2\2\u00e0\u00e1\7C\2\2\u00e1\u00e2\7p\2\2\u00e2"+
"\u00e3\7f\2\2\u00e3(\3\2\2\2\u00e4\u00e5\7Q\2\2\u00e5\u00e6\7t\2\2\u00e6"+
"*\3\2\2\2\u00e7\u00e8\7Z\2\2\u00e8\u00e9\7q\2\2\u00e9\u00ea\7t\2\2\u00ea"+
",\3\2\2\2\u00eb\u00ec\7G\2\2\u00ec\u00ed\7s\2\2\u00ed.\3\2\2\2\u00ee\u00ef"+
"\7P\2\2\u00ef\u00f0\7g\2\2\u00f0\60\3\2\2\2\u00f1\u00f2\7W\2\2\u00f2\u00f3"+
"\7n\2\2\u00f3\u00f4\7v\2\2\u00f4\62\3\2\2\2\u00f5\u00f6\7W\2\2\u00f6\u00f7"+
"\7n\2\2\u00f7\u00f8\7g\2\2\u00f8\64\3\2\2\2\u00f9\u00fa\7W\2\2\u00fa\u00fb"+
"\7i\2\2\u00fb\u00fc\7v\2\2\u00fc\66\3\2\2\2\u00fd\u00fe\7W\2\2\u00fe\u00ff"+
"\7i\2\2\u00ff\u0100\7g\2\2\u01008\3\2\2\2\u0101\u0102\7U\2\2\u0102\u0103"+
"\7n\2\2\u0103\u0104\7v\2\2\u0104:\3\2\2\2\u0105\u0106\7U\2\2\u0106\u0107"+
"\7n\2\2\u0107\u0108\7g\2\2\u0108<\3\2\2\2\u0109\u010a\7U\2\2\u010a\u010b"+
"\7i\2\2\u010b\u010c\7v\2\2\u010c>\3\2\2\2\u010d\u010e\7U\2\2\u010e\u010f"+
"\7i\2\2\u010f\u0110\7g\2\2\u0110@\3\2\2\2\u0111\u0112\7C\2\2\u0112\u0113"+
"\7f\2\2\u0113\u0114\7f\2\2\u0114B\3\2\2\2\u0115\u0116\7U\2\2\u0116\u0117"+
"\7w\2\2\u0117\u0118\7d\2\2\u0118D\3\2\2\2\u0119\u011a\7O\2\2\u011a\u011b"+
"\7w\2\2\u011b\u011c\7n\2\2\u011cF\3\2\2\2\u011d\u011e\7W\2\2\u011e\u011f"+
"\7F\2\2\u011f\u0120\7k\2\2\u0120\u0121\7x\2\2\u0121H\3\2\2\2\u0122\u0123"+
"\7W\2\2\u0123\u0124\7T\2\2\u0124\u0125\7g\2\2\u0125\u0126\7o\2\2\u0126"+
"J\3\2\2\2\u0127\u0128\7U\2\2\u0128\u0129\7F\2\2\u0129\u012a\7k\2\2\u012a"+
"\u012b\7x\2\2\u012bL\3\2\2\2\u012c\u012d\7U\2\2\u012d\u012e\7T\2\2\u012e"+
"\u012f\7g\2\2\u012f\u0130\7o\2\2\u0130N\3\2\2\2\u0131\u0132\7.\2\2\u0132"+
"P\3\2\2\2\u0133\u0134\7v\2\2\u0134\u0135\7t\2\2\u0135\u0136\7w\2\2\u0136"+
"\u0137\7g\2\2\u0137R\3\2\2\2\u0138\u0139\7h\2\2\u0139\u013a\7c\2\2\u013a"+
"\u013b\7n\2\2\u013b\u013c\7u\2\2\u013c\u013d\7g\2\2\u013dT\3\2\2\2\u013e"+
"\u013f\7-\2\2\u013fV\3\2\2\2\u0140\u0141\7/\2\2\u0141X\3\2\2\2\u0142\u0143"+
"\7\62\2\2\u0143\u0144\7d\2\2\u0144Z\3\2\2\2\u0145\u0146\7\62\2\2\u0146"+
"\u0147\7q\2\2\u0147\\\3\2\2\2\u0148\u0149\7\62\2\2\u0149\u014a\7z\2\2"+
"\u014a^\3\2\2\2\u014b\u014c\7h\2\2\u014c\u014d\7r\2\2\u014d`\3\2\2\2\u014e"+
"\u014f\7k\2\2\u014fb\3\2\2\2\u0150\u0151\7B\2\2\u0151d\3\2\2\2\u0152\u0153"+
"\7y\2\2\u0153\u0154\7*\2\2\u0154\u0155\7\62\2\2\u0155\u0156\7/\2\2\u0156"+
"\u0157\7;\2\2\u0157\u0158\7+\2\2\u0158\u0159\7-\2\2\u0159f\3\2\2\2\u015a"+
"\u015b\7*\2\2\u015bh\3\2\2\2\u015c\u015d\7+\2\2\u015dj\3\2\2\2\u015e\u015f"+
"\7]\2\2\u015fl\3\2\2\2\u0160\u0161\7_\2\2\u0161n\3\2\2\2\u0162\u0163\7"+
"}\2\2\u0163p\3\2\2\2\u0164\u0165\7\177\2\2\u0165r\3\2\2\2\u0166\u0167"+
"\t\2\2\2\u0167t\3\2\2\2\u0168\u0169\t\3\2\2\u0169v\3\2\2\2\u016a\u016b"+
"\t\4\2\2\u016bx\3\2\2\2\u016c\u016d\t\5\2\2\u016dz\3\2\2\2\u016e\u016f"+
"\t\6\2\2\u016f|\3\2\2\2\u0170\u0171\t\7\2\2\u0171~\3\2\2\2\3\2\2";
public static final ATN _ATN =
new ATNDeserializer().deserialize(_serializedATN.toCharArray());
static {
_decisionToDFA = new DFA[_ATN.getNumberOfDecisions()];
for (int i = 0; i < _ATN.getNumberOfDecisions(); i++) {
_decisionToDFA[i] = new DFA(_ATN.getDecisionState(i), i);
}
}
} |
import {
GraphQLBoolean,
GraphQLSchema,
GraphQLID,
GraphQLInt,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLString,
GraphQLEnumType,
} from 'graphql'
import * as keystoneTypes from './keystoneTypes'
import keystone from 'keystone'
const Carousel = keystone.list('Carousel-Image')
const Slogan = keystone.list('Slogan')
const ExhibCard = keystone.list('Exhib-Card')
const Footprint = keystone.list('Footprint')
const returnEmptyObj = () => ({})
const CarouselImageType = new GraphQLObjectType({
name: 'CarouselImage',
fields: () => ({
id: { type: new GraphQLNonNull(GraphQLID) },
srcLocation: { type: GraphQLString }
})
})
const SloganType = new GraphQLObjectType({
name: 'Slogan',
fields: () => ({
id: { type: new GraphQLNonNull(GraphQLID) },
content: { type: GraphQLString }
})
})
const ExhibCardType = new GraphQLObjectType({
name: 'ExhibCard',
fields: () => ({
id: { type: new GraphQLNonNull(GraphQLID) },
name: { type: GraphQLString },
imageSrc: { type: GraphQLString }
})
})
const FootprintType = new GraphQLObjectType({
name: 'Footprint',
fields: () => ({
id: { type: new GraphQLNonNull(GraphQLID) },
name: { type: GraphQLString },
longitude: { type: GraphQLString },
latitude: { type: GraphQLString }
})
})
const IndexType = new GraphQLObjectType({
name: 'Index',
fields: () => ({
carouselImages: {
type: new GraphQLList(CarouselImageType),
resolve: (_, args) => Carousel.model.find().sort('+publishedAt').select('srcLocation').exec()
},
slogan: {
type: SloganType,
resolve: (_, args) => Slogan.model.findOne().select('content').exec()
},
exhibCards: {
type: new GraphQLList(ExhibCardType),
resolve: (_, args) => ExhibCard.model.find().sort('+publishedAt').select('name imageSrc').exec()
},
footprints: {
type: new GraphQLList(FootprintType),
resolve: (_, args) => Footprint.model.find().sort('-publishedAt').select('name longitude latitude').exec()
}
})
})
var queryRootType = new GraphQLObjectType({
name: 'Query',
fields: {
index: {
type: IndexType,
resolve: (_, args) => returnEmptyObj()
}
},
})
export default new GraphQLSchema({
query: queryRootType,
})
|
package com.semmle.js.ast.json;
import com.semmle.js.ast.SourceLocation;
import java.util.List;
/** A JSON array. */
public class JSONArray extends JSONValue {
private final List<JSONValue> elements;
public JSONArray(SourceLocation loc, List<JSONValue> elements) {
super("Array", loc);
this.elements = elements;
}
/** The elements of the array. */
public List<JSONValue> getElements() {
return elements;
}
@Override
public <C, R> R accept(Visitor<C, R> v, C c) {
return v.visit(this, c);
}
@Override
public String toString() {
StringBuilder res = new StringBuilder("[");
String sep = "";
for (JSONValue element : elements) {
res.append(sep);
res.append(element.toString());
sep = ", ";
}
res.append("]");
return res.toString();
}
}
|
<filename>src/components/Profile/ViewProfile/index.js<gh_stars>0
import React, { useEffect } from "react";
import { useDispatch, useSelector } from "react-redux";
import { useParams } from "react-router-dom";
import { clearUserProfile, getUserProfileData } from "../../../store/actions";
import { useFirebase, useFirestore } from "react-redux-firebase";
import noImageAvailable from "../../../assets/images/no-image-available.svg";
import Card from "@material-ui/core/Card";
import Grid from "@material-ui/core/Grid";
import Divider from "@material-ui/core/Divider";
import LinearProgress from "@material-ui/core/LinearProgress";
import Box from "@material-ui/core/Box";
import ThemeProvider from "@material-ui/core/styles/MuiThemeProvider";
import createMuiTheme from "@material-ui/core/styles/createMuiTheme";
import FacebookIcon from "@material-ui/icons/Facebook";
import TwitterIcon from "@material-ui/icons/Twitter";
import GitHubIcon from "@material-ui/icons/GitHub";
import LinkedInIcon from "@material-ui/icons/LinkedIn";
import LinkIcon from "@material-ui/icons/Link";
import FlagIcon from "@material-ui/icons/Flag";
const theme = createMuiTheme({
shadows: ["none"],
palette: {
primary: {
main: "#455a64",
},
},
});
const ProfileView = () => {
const { handle } = useParams();
const firestore = useFirestore();
const firebase = useFirebase();
const dispatch = useDispatch();
useEffect(() => {
getUserProfileData(handle)(firebase, firestore, dispatch);
return () => {
clearUserProfile()(dispatch);
};
}, [firebase, firestore, dispatch, handle]);
const profileData = useSelector(
({
profile: {
user: { data },
},
}) => data
);
const loading = useSelector(
({
profile: {
user: { error },
},
}) => error
);
const checkAvailable = (data) => {
return !!(data && data.length > 0);
};
if (loading || !profileData) {
return (
<ThemeProvider theme={theme}>
<LinearProgress theme={theme} />
</ThemeProvider>
);
}
return (
<ThemeProvider theme={theme}>
<Card className="p-0">
{profileData && (
<div>
<Box mt={2} mb={2} m={3}>
<Grid container>
<span style={{ fontSize: "1.3em", fontWeight: "480" }}>
Profile Details
</span>
</Grid>
</Box>
<Divider></Divider>
<Box mt={2} mb={2} m={3}>
<Grid container>
<Grid xs={12} md={3} lg={3} item={true}>
{profileData.photoURL && profileData.photoURL.length > 0 ? (
<img
style={{
width: "100%",
height: "auto",
borderRadius: "8px",
}}
src={profileData.photoURL}
alt={profileData.displayName}
className="org-image"
/>
) : (
<img
style={{
width: "100%",
height: "auto",
borderRadius: "8px",
}}
src={noImageAvailable}
alt={"Not Available"}
className="org-image"
/>
)}
</Grid>
<Grid
xs={12}
md={9}
lg={9}
className="pl-24-d pt-24-m"
item={true}
>
<p>
<span style={{ fontSize: "1.3em", fontWeight: "bold" }}>
{profileData.displayName}
</span>
</p>
{checkAvailable(profileData.description) && (
<p className="text-justified">{profileData.description}</p>
)}
{checkAvailable(profileData.link_facebook) && (
<p>
<a
href={
"https://www.facebook.com/" +
profileData.link_facebook
}
target="_blank"
rel="noopener noreferrer"
>
<div
style={{
display: "flex",
}}
>
<FacebookIcon
fontSize="small"
className="facebook-color"
/>{" "}
{profileData.link_facebook}
</div>
</a>
</p>
)}
{checkAvailable(profileData.link_twitter) && (
<p>
<a
href={"https://twitter.com/" + profileData.link_twitter}
target="_blank"
rel="noopener noreferrer"
>
<div
style={{
display: "flex",
}}
>
<TwitterIcon
fontSize="small"
className="twitter-color"
/>{" "}
{profileData.link_twitter}
</div>
</a>
</p>
)}
{checkAvailable(profileData.link_github) && (
<p>
<a
href={"https://github.com/" + profileData.link_github}
target="_blank"
rel="noopener noreferrer"
>
<div
style={{
display: "flex",
}}
>
<GitHubIcon
fontSize="small"
className="github-color"
/>{" "}
{profileData.link_github}
</div>
</a>
</p>
)}
{checkAvailable(profileData.link_linkedin) && (
<p>
<a
href={
"https://www.linkedin.com/in/" +
profileData.link_linkedin
}
target="_blank"
rel="noopener noreferrer"
>
<div
style={{
display: "flex",
}}
>
<LinkedInIcon
fontSize="small"
className="linkedin-color"
/>{" "}
{profileData.link_linkedin}
</div>
</a>
</p>
)}
{checkAvailable(profileData.website) && (
<p>
<a
href={profileData.website}
target="_blank"
rel="noopener noreferrer"
>
<div
style={{
display: "flex",
}}
>
<LinkIcon
fontSize="small"
className="website-color"
/>{" "}
{profileData.website}
</div>
</a>
</p>
)}
{checkAvailable(profileData.country) && (
<p className="mb-0">
<a
href={
"https://www.google.com/search?q=" +
profileData.country
}
target="_blank"
rel="noopener noreferrer"
>
<div
style={{
display: "flex",
}}
>
<FlagIcon
fontSize="small"
className="website-color"
/>{" "}
{profileData.country}
</div>
</a>
</p>
)}
</Grid>
</Grid>
</Box>
</div>
)}
{profileData === false && "No profile with the provided handle"}
</Card>
</ThemeProvider>
);
};
export default ProfileView;
|
<reponame>robisacommonusername/SVGBuilder
require_relative '../Base/SVGTextContainer'
class SVG < SVGAbstract::SVGContainer
class Text < SVGAbstract::SVGTextContainer
def initialize(x=0,y=0,txt=nil,do_escape=true)
super do_escape
@name = 'text'
@attributes.merge!({
:x => x,
:y => y,
})
@text_elements << txt unless txt.nil?
yield self if block_given?
return self
end
end
end
|
<reponame>dima7a14/FamilyBudget-client
const rewireReactHotLoader = require('react-app-rewire-hot-loader');
/* config-overrides.js */
module.exports = function override(config, env) {
config = rewireReactHotLoader(config, env)
// see https://github.com/gaearon/react-hot-loader#react--dom
config.resolve.alias = {
...config.resolve.alias,
'react-dom': '@hot-loader/react-dom',
}
return config;
} |
#!/bin/bash
export VAGRANT_HOME=/home/vagrant
export TERPTUBE_HOME=$VAGRANT_HOME/dev-work/workspace/terptube/trunk
cd $VAGRANT_HOME && \
sudo chown -R vagrant:vagrant ./ && \
cd $TERPTUBE_HOME && \
rm -rf app/cache/* app/logs/* && \
sudo chown -R `whoami`:www-data app/cache app/logs && \
chmod -R 775 app/cache app/logs && \
mkdir -p web/uploads/media/thumbnails/ && \
chmod 777 web/uploads/media/ web/uploads/media/thumbnails/
|
<reponame>ksmit799/POTCO-PS<filename>pirates/uberdog/AITrade.py
# File: A (Python 2.4)
from AITradeBase import AITradeBase
from pirates.uberdog.UberDogGlobals import *
from pirates.reputation import ReputationGlobals
from direct.directnotify.DirectNotifyGlobal import directNotify
from pirates.piratesbase import Freebooter
class AITrade(AITradeBase):
notify = directNotify.newCategory('AITrade')
def __init__(self, distObj, avatarId = None, inventoryId = None, timeout = 4.0):
AITradeBase.__init__(self, distObj, avatarId, inventoryId, timeout = timeout)
def giveQuestCategoryLimit(self, addToLimit):
self.giveCategoryLimit(InventoryCategory.QUESTS, addToLimit)
self.giveCategoryLimit(InventoryCategory.QUEST_SLOTS, addToLimit)
def giveNewQuest(self, dClassName, questValues):
self.giveNewDistObj(InventoryCategory.QUESTS, dClassName, questValues)
self.takeStack(InventoryType.OpenQuestSlot, 1)
def giveQuest(self, questDoId):
self.giveDoId(InventoryCategory.QUESTS, questDoId)
self.takeStack(InventoryType.OpenQuestSlot, 1)
def takeQuest(self, questDoId):
self.takeDoId(InventoryCategory.QUESTS, questDoId)
self.giveStack(InventoryType.OpenQuestSlot, 1)
def giveTreasureMapCategoryLimit(self, addToLimit):
self.giveCategoryLimit(InventoryCategory.TREASURE_MAPS, addToLimit)
def giveNewTreasureMap(self, dClassName, values = None):
self.giveNewDistObj(InventoryCategory.TREASURE_MAPS, dClassName, values)
def giveTreasureMap(self, treasureMapDoID):
self.giveDoId(InventoryCategory.TREASURE_MAPS, treasureMapDoID)
def takeTreasureMap(self, treasureMapDoID):
self.takeDoId(InventoryCategory.TREASURE_MAPS, treasureMapDoID)
def giveShipCategoryLimit(self, addToLimit):
self.giveCategoryLimit(InventoryCategory.SHIPS, addToLimit)
def giveShip(self, shipDoId):
self.giveDoId(InventoryCategory.SHIPS, shipDoId)
def takeShip(self, shipDoId):
self.takeDoId(InventoryCategory.SHIPS, shipDoId)
def giveWagerCategoryLimit(self, addToLimit):
self.giveCategoryLimit(InventoryCategory.WAGERS, addToLimit)
def giveWager(self, wagerDoId):
self.giveDoId(InventoryCategory.WAGERS, wagerDoId)
def takeWager(self, wagerDoId):
self.takeDoId(InventoryCategory.WAGERS, wagerDoId)
def giveMoneyCategories(self, addToLimit):
self.giveCategoryLimit(InventoryCategory.MONEY, addToLimit)
def giveGoldInPocketLimit(self, addToLimit):
self.giveStackableTypeLimit(InventoryType.GoldInPocket, addToLimit)
def takeNewPlayerToken(self, amount = 1):
self.takeStack(InventoryType.NewPlayerToken, amount)
def takeNewShipToken(self, amount = 1):
self.takeStack(InventoryType.NewShipToken, amount)
def giveNewShipToken(self, amount = 1):
self.giveStack(InventoryType.NewShipToken, amount)
def giveDinghy(self, amount = 1):
self.giveStack(InventoryType.Dinghy, amount)
def canTeleportTo(self, av, token):
inv = av.getInventory()
if not inv and inv.isReady():
return False
amt = inv.getStackQuantity(token)
if amt == 1:
return True
else:
return False
def grantTeleportToken(self, token):
if token == TortugaTeleportToken:
self.giveTortugaTeleportToken()
elif token == KingsheadTeleportToken:
self.giveTortugaTeleportToken()
elif token == PortRoyalTeleportToken:
self.givePortRoyalTeleportToken()
elif token == CubaTeleportToken:
self.giveCubaTeleportToken()
elif token == PadresDelFuegoTeleportToken:
self.givePadresDelFuegoTeleportToken()
else:
print 'AITrade.grantTeleportToken() encountered an unexpected error'
def takeTortugaTeleportToken(self, amount = 1):
self.takeStack(InventoryType.TortugaTeleportToken, amount)
def giveTortugaTeleportToken(self, amount = 1):
self.giveStack(InventoryType.TortugaTeleportToken, amount)
def takeKingsheadTeleportToken(self, amount = 1):
self.takeStack(InventoryType.KingsheadTeleportToken, amount)
def giveKingsheadTeleportToken(self, amount = 1):
self.giveStack(InventoryType.KingsheadTeleportToken, amount)
def takeRavensCoveTeleportToken(self, amount = 1):
self.takeStack(InventoryType.RavensCoveTeleportToken, amount)
def giveRavensCoveTeleportToken(self, amount = 1):
self.giveStack(InventoryType.RavensCoveTeleportToken, amount)
def takePortRoyalTeleportToken(self, amount = 1):
self.takeStack(InventoryType.PortRoyalTeleportToken, amount)
def givePortRoyalTeleportToken(self, amount = 1):
self.giveStack(InventoryType.PortRoyalTeleportToken, amount)
def takeCubaTeleportToken(self, amount = 1):
self.takeStack(InventoryType.CubaTeleportToken, amount)
def giveCubaTeleportToken(self, amount = 1):
self.giveStack(InventoryType.CubaTeleportToken, amount)
def takePadresDelFuegoTeleportToken(self, amount = 1):
self.takeStack(InventoryType.PadresDelFuegoTeleportToken, amount)
def givePadresDelFuegoTeleportToken(self, amount = 1):
self.giveStack(InventoryType.PadresDelFuegoTeleportToken, amount)
def takeNewWeaponToken(self, amount = 1):
self.takeStack(InventoryType.NewWeaponToken, amount)
def giveCutlassWeapon(self, amount = 1):
self.giveStack(InventoryType.CutlassWeapon, amount)
def givePistolWeapon(self, amount = 1):
self.giveStack(InventoryType.PistolWeapon, amount)
def giveMusketWeapon(self, amount = 1):
self.giveStack(InventoryType.MusketWeapon, amount)
def giveDaggersWeapon(self, amount = 1):
self.giveStack(InventoryType.DaggersWeapon, amount)
def giveGrenadeWeapon(self, amount = 1):
self.giveStack(InventoryType.GrenadeWeapon, amount)
def giveDollWeapon(self, amount = 1):
self.giveStack(InventoryType.DollWeapon, amount)
def giveWandWeapon(self, amount = 1):
self.giveStack(InventoryType.WandWeapon, amount)
def giveKettleWeapon(self, amount = 1):
self.giveStack(InventoryType.KettleWeapon, amount)
def giveCannonWeapon(self, amount = 1):
self.giveStack(InventoryType.CannonWeapon, amount)
def giveCutlassTraining(self, amount = 1):
self.giveStack(InventoryType.CutlassToken, amount)
def givePistolTraining(self, amount = 1):
self.giveStack(InventoryType.PistolToken, amount)
def giveMusketTraining(self, amount = 1):
self.giveStack(InventoryType.MusketToken, amount)
def giveDaggersTraining(self, amount = 1):
self.giveStack(InventoryType.DaggerToken, amount)
def giveGrenadeTraining(self, amount = 1):
self.giveStack(InventoryType.GrenadeToken, amount)
def giveDollTraining(self, amount = 1):
self.giveStack(InventoryType.DollToken, amount)
def giveWandTraining(self, amount = 1):
self.giveStack(InventoryType.WandToken, amount)
def giveKettleTraining(self, amount = 1):
self.giveStack(InventoryType.KettleToken, amount)
def giveCutlassSlash(self, amount = 1):
self.giveStack(InventoryType.CutlassSlash, amount)
def givePlayingCard(self, card):
self.giveStack(card, 1)
def giveReputation(self, category, amount):
if self.avatarId:
av = self.air.doId2do.get(self.avatarId)
if av:
inv = av.getInventory()
avExpMult = av.getExpMult()
amount = int(avExpMult * amount)
if inv:
if category == InventoryType.OverallRep:
curLevel = av.getLevel()
if Freebooter.getPaidStatusAI(self.avatarId):
levelCap = ReputationGlobals.GlobalLevelCap
else:
levelCap = Freebooter.FreeOverallLevelCap
if amount > 0 and curLevel < levelCap:
curRepTotal = inv.getAccumulator(InventoryType.OverallRep)
(newLevel, left) = ReputationGlobals.getLevelFromTotalReputation(InventoryType.OverallRep, curRepTotal + amount)
if newLevel >= levelCap:
amount = max(0, amount - left)
self.giveAccumulatorAddition(category, amount)
elif category == InventoryType.GeneralRep:
self.giveAccumulatorAddition(category, amount)
elif Freebooter.getPaidStatusAI(self.avatarId):
if category in [
InventoryType.PotionsRep,
InventoryType.FishingRep]:
levelCap = ReputationGlobals.MinigameLevelCap
else:
levelCap = ReputationGlobals.LevelCap
else:
levelCap = Freebooter.FreeLevelCap
repAmt = inv.getAccumulator(category)
(curLevel, curLeft) = ReputationGlobals.getLevelFromTotalReputation(category, repAmt)
if curLevel >= levelCap:
amount = 0
(expLevel, left) = ReputationGlobals.getLevelFromTotalReputation(category, repAmt + amount)
if expLevel >= levelCap and curLevel < levelCap:
amount = max(0, amount - left)
self.giveAccumulatorAddition(category, amount)
def setAccumulator(self, accumulatorType, quantity):
setAccumulator = InventoryId.getLimitChange(accumulatorType)
self.giving.append((setAccumulator, quantity))
def setReputation(self, category, amount):
self.setAccumulator(category, amount)
def takeReputation(self, category, amount):
self.takeAccumulatorAddition(category, amount)
def giveMaxHitPoints(self, amount):
self.giveStack(InventoryType.Hp, amount)
def giveMaxMojo(self, amount):
self.giveStack(InventoryType.Mojo, amount)
def takeShipRepairToken(self, amount = 1):
self.takeStack(InventoryType.ShipRepairToken, amount)
def giveShipRepairToken(self, amount = 1):
self.giveStack(InventoryType.ShipRepairToken, amount)
def takePlayerHealToken(self, amount = 1):
self.takeStack(InventoryType.PlayerHealToken, amount)
def givePlayerHealToken(self, amount = 1):
self.giveStack(InventoryType.PlayerHealToken, amount)
def takePlayerMojoHealToken(self, amount = 1):
self.takeStack(InventoryType.PlayerHealMojoToken, amount)
def givePlayerMojoHealToken(self, amount = 1):
self.giveStack(InventoryType.PlayerHealMojoToken, amount)
def takeTonic(self, tonicId, amount = 1):
self.takeStack(tonicId, amount)
def takeShipRepairKit(self, amount = 1):
self.takeStack(InventoryType.ShipRepairKit, amount)
|
#!/usr/bin/env bash
set -e
source 'approvals.bash'
cd ./fixtures/empty-dir
describe "op --edit"
original_editor=$EDITOR
export EDITOR="echo stubbed editor with: "
approve "op --edit"
export EDITOR=$original_editor
|
#!/bin/bash
if [ "$1" == "-v" ]; then
echo "gcov (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0"
fi
echo "{bad json" # Pretend gcov crashed or was killed |
<filename>pages/opengraph/user/[username].tsx
import useGalleries from 'hooks/api/galleries/useGalleries';
import useUser from 'hooks/api/users/useUser';
import { useRouter } from 'next/router';
import { OpenGraphPreview } from 'components/opengraph/OpenGraphPreview';
export default function OpenGraphUserPage() {
const { query } = useRouter();
const user = useUser({ username: query.username as string });
const [gallery] = useGalleries({ userId: user?.id ?? '' }) ?? [];
const width = parseInt(query.width as string) || 600;
const height = parseInt(query.height as string) || 300;
if (!user) {
// TODO: 404?
throw new Error('no username provided');
}
if (!gallery) {
// TODO: render something nice?
throw new Error('no gallery found');
}
const nfts = gallery.collections.flatMap((collection) => collection.nfts);
return (
<>
<div className="page">
<div id="opengraph-image" style={{ width, height }}>
<OpenGraphPreview
title={user.username}
description={user.bio}
imageUrls={nfts
.filter((nft) => nft.image_url)
.slice(0, 4)
.map((nft) => nft.image_url)}
/>
</div>
</div>
<style jsx>{`
.page {
min-height: 100vh;
display: flex;
align-items: center;
justify-content: center;
background-color: #e7e5e4;
}
#opengraph-image {
box-shadow: 0 25px 50px -12px rgb(0 0 0 / 0.25);
}
`}</style>
</>
);
}
|
<reponame>dailynodejs/ice-scripts<gh_stars>10-100
const c = require('./c.plugin');
module.exports = {
publicPath: '/',
plugins: [
'./a.plugin',
['./b.plugin', { alias: 'b' }],
c,
],
};
|
/**
* KML Plugin mantle controller classes.
*/
package io.opensphere.kml.mantle.controller;
|
<gh_stars>0
# The Book of Ruby - http://www.sapphiresteel.com
# Show string representations of various objects
# using the to_s method
class Treasure
def initialize( aName, aDescription )
@name = aName
@description = aDescription
end
# This time we won't override to_s so the Treasure object
# will use the default to_s method...
end
t = Treasure.new( "Sword", "A lovely Elvish weapon" )
puts(" Class.to_s")
puts(Class.to_s)
puts(" Object.to_s")
puts(Object.to_s)
puts(" String.to_s")
puts(String.to_s)
puts(" 100.to_s")
puts(100.to_s)
puts(" Treasure.to_s")
puts(Treasure.to_s)
puts(" t.to_s")
puts(t.to_s)
puts(" t.inspect")
puts(t.inspect) |
#!/bin/bash
set -e
SOLR_HOST="${SOLR_HOST:-localhost}"
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
SOLR7_VERSIONS="7.6 7.5"
SOLR8_VERSIONS="latest 8.3 8.2 8.1"
wait_for_solr() {
while [[ "$(curl -s -o /dev/null http://$SOLR_HOST:31337/solr/ocr/select -w '%{http_code}')" != "200" ]]; do
sleep 3;
done
}
# Make sure we're in the test directory
cd $SCRIPT_DIR
find ../target
# Solr 8 versions
for version in $SOLR8_VERSIONS; do
echo "Testing $version"
container_name="ocrhltest-$version"
docker run \
--name "$container_name" \
-e SOLR_LOG_LEVEL=ERROR \
-v "$(pwd)/solr/install-plugin.sh:/docker-entrypoint-initdb.d/install-plugin.sh" \
-v "$(pwd)/solr/core/v8:/opt/core-config" \
-v "$(pwd)/data:/ocr-data" \
-v "$(realpath ..)/target:/build" \
-p "31337:8983" \
solr:$version \
solr-precreate ocr /opt/core-config & \
wait_for_solr
python3 test.py
docker stop "$container_name" > /dev/null
docker rm "$container_name" > /dev/null
done
# Solr 7 has a different Docker setup
for version in $SOLR7_VERSIONS; do
echo "Testing $version"
docker run \
--name "ocrhltest-$version" \
-e SOLR_LOG_LEVEL=ERROR \
-v "$(pwd)/solr/install-plugin-v7.sh:/docker-entrypoint-initdb.d/install-plugin-v7.sh" \
-v "$(pwd)/solr/core/v7:/opt/core-config" \
-v "$(pwd)/data:/ocr-data" \
-v "$(realpath ..)/target:/build" \
-p "31337:8983" \
solr:$version \
solr-precreate ocr /opt/core-config & \
wait_for_solr
python3 test.py
docker stop "ocrhltest-$version" > /dev/null
docker rm "ocrhltest-$version" > /dev/null
done
echo "INTEGRATION TESTS SUCCEEDED" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.