text stringlengths 1 1.05M |
|---|
package main
import (
"errors"
"fmt"
)
func main() {
// 新建error
err := errors.New("test error")
fmt.Println(err)
} |
module ProviderInterface
class SaveProviderUserService
include ImpersonationAuditHelper
attr_accessor :wizard
def initialize(actor:, wizard:)
@actor = actor
self.wizard = wizard
end
def call!
audit(@actor) do
assert_permissions_for_providers!
if email_exists?
update_user
else
create_user
end
end
end
private
def assert_permissions_for_providers!
authorisation = ProviderAuthorisation.new(actor: @actor)
return if @wizard.provider_permissions.keys.all? { |provider_id| authorisation.can_manage_users_for?(provider: Provider.find(provider_id)) }
raise ProviderAuthorisation::NotAuthorisedError, 'You are not allowed to add users to these providers'
end
def email_exists?
ProviderUser.find_by(email_address: wizard.email_address).present?
end
def update_user
existing_user = ProviderUser.find_by(email_address: wizard.email_address)
existing_user.update(
email_address: wizard.email_address,
first_name: wizard.first_name,
last_name: wizard.last_name,
)
update_provider_permissions(existing_user)
end
def create_user
user = ProviderUser.create(
email_address: wizard.email_address,
first_name: wizard.first_name,
last_name: wizard.last_name,
)
create_provider_permissions(user)
user
end
def create_provider_permissions(user)
wizard.provider_permissions.each do |provider_id, permission|
provider_permission = ProviderPermissions.new(
provider_id: provider_id,
provider_user_id: user.id,
)
permission.fetch('permissions', []).reject(&:blank?).each do |permission_name|
provider_permission.send("#{permission_name}=".to_sym, true)
end
provider_permission.save!
end
end
def update_provider_permissions(user)
wizard.provider_permissions.each do |provider_id, permission|
provider_permission = ProviderPermissions.find_or_initialize_by(
provider_id: provider_id,
provider_user_id: user.id,
)
ProviderPermissions::VALID_PERMISSIONS.each do |permission_type|
provider_permission.send(
"#{permission_type}=",
permission['permissions'].include?(permission_type.to_s),
)
end
provider_permission.save!
end
end
end
end
|
#! /bin/bash
#
# bash_common/xml_to_txt.sh
#
# Nov/19/2014
#
file_xsl=/var/www/data_base/common/bash_common/xsl_files/xml_to_txt_cities.xsl
#
#xalan -xsl $file_xsl | awk '{if (2 < NF) print $1,$2,$3,$4}'
Xalan $1 $file_xsl | awk '{if (2 < NF) print $1,$2,$3,$4}'
#
|
<reponame>tamaynard/vtk-js
import { vtkSubscription, vtkDebouncedFunction, vtkProperty, vtkPropertyDomain } from "./interfaces";
/**
* Allow user to redefine vtkXXXMacro method call.
* @param name of the macro type [Log, Info, Debug, Error, Warning]
* @param fn function to use when vtkXXXMacro is called.
*/
export function setLoggerFunction(name: string, fn: (...args: any) => void): void;
/**
* Logging function used for level: Log
* @param args arguments to print
*/
export function vtkLogMacro(...args: any): void;
/**
* Logging function used for level: Info
* @param args arguments to print
*/
export function vtkInfoMacro(...args: any): void;
/**
* Logging function used for level: Debug
* @param args arguments to print
*/
export function vtkDebugMacro(...args: any): void;
/**
* Logging function used for level: Error
* @param args arguments to print
*/
export function vtkErrorMacro(...args: any): void;
/**
* Logging function used for level: Warning
* @param args arguments to print
*/
export function vtkWarningMacro(...args: any): void;
/**
* Output error only once
* This is convenient when the error happen in a loop.
* This allow you to catch the issue while not overloading your output console.
*/
export function vtkOnceErrorMacro(str: string): void;
/**
* A way to create typed array based on its name without using the window namespace
*/
export enum TYPED_ARRAYS {
Float32Array,
Float64Array,
Uint8Array,
Int8Array,
Uint16Array,
Int16Array,
Uint32Array,
Int32Array,
}
/**
* Capitalize provided string.
* This is typically used to convert the name of a field into its method name.
*
* ```
* const set = `set${capitalize(fieldName)}`;
* ```
*/
export function capitalize(str: string): string;
/**
* Lowercase the first letter of the provided string
*/
export function uncapitalize(str: string): string;
/**
* Convert byte size into a well formatted string.
*
* @param size number of bytes
* @param precision (default: 2) how many digit you want behind the unit
* @param chunkSize (default: 1000) base 1000 or 1024
*/
export function formatBytesToProperUnit(size: number, precision?: number, chunkSize?: number): string;
// ----------------------------------------------------------------------------
// Convert thousand number with proper separator
// ----------------------------------------------------------------------------
/**
*
* @param n number to format
* @param separator (default: ' ')
*/
export function formatNumbersWithThousandSeparator(n: number, separator?: string): string;
// ----------------------------------------------------------------------------
// Array helper
// ----------------------------------------------------------------------------
/**
* Replace internal arrays with new reference but with same content
*
* @param model
*/
declare function safeArrays(model: object): void;
/**
* Extract the key of an object where the given value match the given one
*
* @param e enum object to search key/value from
* @param value to look for inside object
*/
declare function enumToString(e: object, value: any): string;
/**
* If item is a VtkObject, return its getState() otherwise return itself.
*
* @param item object to extract its state from
*/
declare function getStateArrayMapFunc(item: any): any;
/**
* Call provided function on the next EDT pass
*
* @param fn function to execute
*/
export function setImmediateVTK(fn: () => void ): void;
/**
* Turns the provided publicAPI into a VtkObject
*
* @param publicAPI (default: {}) object on which public methods get attached to
* @param model (default: {}) object on which protected fields are stored
* @returns publicAPI
*/
export function obj(publicAPI?: object, model?: object): object;
/**
* Add getter methods to the provided publicAPI
*
* @param publicAPI object on which public methods get attached to
* @param model object on which protected fields are stored
* @param fieldNames list of fields available in model that we want to expose as get{FieldName} methods on the publicAPI
*/
export function get(publicAPI: object, model: object, fieldNames: Array<string>): void;
/**
* Add setter methods to the provided publicAPI
*
* @param publicAPI object on which public methods get attached to
* @param model object on which protected fields are stored
* @param fieldNames list of fields available in model that we want to expose as set{FieldName} methods on the publicAPI
*/
export function set(publicAPI: object, model: object, fields: Array<string>): void;
/**
* Add setter+getter methods to the provided publicAPI
*
* @param publicAPI object on which public methods get attached to
* @param model object on which protected fields are stored
* @param fieldNames list of fields available in model that we want to expose as set{FieldName}+get{FieldName} methods on the publicAPI
*/
export function setGet(publicAPI: object, model: object, fields: Array<string>): void;
/**
* Add getter methods to the provided publicAPI for arrays.
* A new array is used as returned value with get{FieldName}() method
* unless get{FieldName}ByReference() is used.
*
* ```
* get{FieldName}() // add getters for object of type array with copy to be safe
* get{FieldName}ByReference() // add getters for object of type array without copy
* ```
*
* @param publicAPI object on which public methods get attached to
* @param model object on which protected fields are stored
* @param fieldNames list of fields available in model that we want to expose as get{FieldName}+get{FieldName}ByReference methods on the publicAPI
*/
export function getArray(publicAPI: object, model: object, fields: Array<string>): void;
/**
* Add setter methods to the provided publicAPI for arrays.
* if 'defaultVal' is supplied, shorter arrays will be padded to 'size' with
* 'defaultVal'.
* set{FieldName}From(abc) will copy the content of abc into current field
* without calling modified.
*
* ```
* set{FieldName}(a, b, c) / set{FieldName}(abc)
* set{FieldName}From(abc)
* ```
*
* @param publicAPI
* @param model
* @param fieldNames
* @param size
* @param defaultVal
*/
export function setArray(publicAPI: object, model: object, fieldNames: Array<string>, size: Number, defaultVal?: any): void;
/**
* set/get XXX: add setter and getter for object of type array
* @param publicAPI
* @param model
* @param fieldNames
* @param size
* @param defaultVal
*/
export function setGetArray(publicAPI: object, model: object, fieldNames: Array<string>, size: Number, defaultVal?: any): void;
/**
* Add algorithm methods onto the provided publicAPI
*
* @param publicAPI object on which methods will be bounds (public)
* @param model object on which data structure will be bounds (protected)
* @param numberOfInputs
* @param numberOfOutputs
*/
export function algo(publicAPI: object, model: object, numberOfInputs: number, numberOfOutputs: number): void;
/**
* Symbols used as return value for callback
*/
export const VOID: Symbol;
/**
* Symbols used as return value for callback when you want to stop
* any further callback calls after yours.
*/
export const EVENT_ABORT: Symbol;
export function event(publicAPI: object, model: object, eventName: string): void;
/**
* Event callback
* @param args
* @returns symbol to either keep going or interrupt existing callback call stack
*/
export function VtkCallback(...args: any): void | symbol;
// Example of event(,, 'change')
export interface VtkChangeEvent {
/**
* Call any registered callbacks with the given arguments
* @param args
*/
invokeChange(...args: any): void;
/**
* Execute higher priority callback first
* negative priority use setTimeout(cb, -priority) for later callback
*
* @param VtkCallback
* @param priority (default 0.0)
*/
onChange(VtkCallback, priority?: number): vtkSubscription;
}
// ----------------------------------------------------------------------------
// newInstance
// ----------------------------------------------------------------------------
export type VtkExtend = (publicAPI: object, model: object, initialValues: object) => void;
export function newInstance(extend: VtkExtend, className: string): any;
// ----------------------------------------------------------------------------
// Chain function calls
// ----------------------------------------------------------------------------
/**
* Create a new closure that will chain the call of any provided methods
* @param fn list of function to call
*/
export function chain(...fn: Array<Function>): Function;
/**
* Test if provided object is an actual vtkObject or not
* @param instance
*/
export function isVtkObject(instance: any): boolean;
/**
* Traverse an instance tree of vtkObjects
*
* @param instance root of the tree to traverse
* @param extractFunction function used to decorate vtkObject
* @param accumulator (default []) array use to capture decorated by the extractFunction
* @param visitedInstances (default []) array use to capture visitedInstances
* @returns the accumulator is actually returned
*/
export function traverseInstanceTree(
instance: any,
extractFunction: any,
accumulator?: Array<any>,
visitedInstances?: Array<any>
): Array<any>;
/**
* Returns a function, that, as long as it continues to be invoked, will not
* be triggered. The function will be called after it stops being called for
* N milliseconds. If `immediate` is passed, trigger the function on the
* leading edge, instead of the trailing.
*
* @param func
* @param wait
* @param immediate (default false)
* @returns vtkDebouncedFunction A debounced function that can be called.
* Use .cancel() to clear any pending debounced call.
*/
export function debounce(func: (...args: any) => any, wait: number, immediate?: boolean): vtkDebouncedFunction;
/**
* Creates a throttled function that only invokes `func` at most once per
* every `wait` milliseconds.
*
* @param callback
* @param delay
*/
export function throttle(callback: (...args: any) => any, delay: number): (...args: any) => any;
/**
* keystore(publicAPI, model, initialKeystore)
*
* - initialKeystore: Initial keystore. This can be either a Map or an
* object.
*
* Generated API
* setKey(key, value) : mixed (returns value)
* getKey(key) : mixed
* getAllKeys() : [mixed]
* deleteKey(key) : Boolean
*/
export interface VtkKeyStore {
setKey: (key: string, value: any) => void;
getKey: (key: string) => any;
getAllKeys: () => Array<string>;
deleteKey: (key: string) => void;
clearKeystore: () => void;
}
/**
*
* @param publicAPI
* @param model
* @param initialKeystore (default {})
*/
export function keystore(publicAPI: object, model: object, initialKeystore?: object): void;
// ----------------------------------------------------------------------------
// proxy(publicAPI, model, sectionName, propertyUI)
//
// - sectionName: Name of the section for UI
// - propertyUI: List of props with their UI description
//
// Generated API
// getProxyId() : String
// listProxyProperties() : [string]
// updateProxyProperty(name, prop)
// getProxySection() => List of properties for UI generation
// ----------------------------------------------------------------------------
export interface VtkProxyManager {
}
export interface VtkProxySection {
id: string;
name: string;
ui: object;
properties: Array<any>,
}
export interface VtkLink {
/**
*
* @param instance
* @param propertyName
* @param updateMe (default: false)
*/
bind(instance: VtkProxy, propertyName: string, updateMe?: boolean): void;
unbind: (instance: VtkProxy, propertyName: string) => void;
unsubscribe: () => void;
persistent: boolean;
}
export interface VtkProxy extends VtkKeyStore {
getProxyId(): string;
getProxyGroup(): string;
getProxyName: () => string;
setProxyManager: (pxm: VtkProxyManager) => boolean;
getProxyManager: () => VtkProxyManager;
updateUI: (ui: object) => void;
listProxyProperties: (groupName: string) => Array<vtkProperty>;
updateProxyProperty: (propertyName: string, propUI: object) => void;
activate: () => void;
registerPropertyLinkForGC: (otherLink: VtkLink, type: string) => void;
gcPropertyLinks(type: string): void;
/**
*
* @param id
* @param persistent (default: false)
*/
getPropertyLink(id: string, persistent?: boolean): VtkLink;
/**
*
* @param groupName (default: ROOT_GROUP_NAME)
*/
getProperties(groupName?: string): Array<any>;
listPropertyNames: () => Array<string>;
getPropertyByName: (name: string) => vtkProperty;
getPropertyDomainByName: (name: string) => vtkPropertyDomain;
getProxySection: () => VtkProxySection;
delete: () => void;
}
export function proxy(publicAPI: object, model: object): void;
/**
* proxyPropertyMapping(publicAPI, model, map)
*
* ```
* map = {
* opacity: { modelKey: 'property', property: 'opacity' },
* }
* ```
*
* Generated API:
* Elevate set/get methods from internal object stored in the model to current one
*
* @param publicAPI
* @param model
* @param map
*/
export function proxyPropertyMapping(publicAPI: object, model: object, map: object): void;
/**
* proxyPropertyState(publicAPI, model, state, defaults)
*
* ```
* state = {
* representation: {
* 'Surface with edges': { property: { edgeVisibility: true, representation: 2 } },
* Surface: { property: { edgeVisibility: false, representation: 2 } },
* Wireframe: { property: { edgeVisibility: false, representation: 1 } },
* Points: { property: { edgeVisibility: false, representation: 0 } },
* },
* }
*
* defaults = {
* representation: 'Surface',
* }
* ```
*
* Generated API
* get / set Representation ( string ) => push state to various internal objects
*
* @param publicAPI
* @param model
* @param state (default: {})
* @param defaults (default: {})
*/
export function proxyPropertyState(publicAPI: object, model: object, state?: object, defaults?: object): void;
// ----------------------------------------------------------------------------
// From : https://github.com/facebookarchive/fixed-data-table/blob/master/src/vendor_upstream/dom/normalizeWheel.js
//
//
// Copyright (c) 2015, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
//
// Mouse wheel (and 2-finger trackpad) support on the web sucks. It is
// complicated, thus this doc is long and (hopefully) detailed enough to answer
// your questions.
//
// If you need to react to the mouse wheel in a predictable way, this code is
// like your bestest friend.// hugs//
//
// As of today, there are 4 DOM event types you can listen to:
//
// 'wheel' -- Chrome(31+), FF(17+), IE(9+)
// 'mousewheel' -- Chrome, IE(6+), Opera, Safari
// 'MozMousePixelScroll' -- FF(3.5 only!) (2010-2013) -- don't bother!
// 'DOMMouseScroll' -- FF(0.9.7+) since 2003
//
// So what to do? The is the best:
//
// normalizeWheel.getEventType();
//
// In your event callback, use this code to get sane interpretation of the
// deltas. This code will return an object with properties:
//
// spinX -- normalized spin speed (use for zoom) - x plane
// spinY -- " - y plane
// pixelX -- normalized distance (to pixels) - x plane
// pixelY -- " - y plane
//
// Wheel values are provided by the browser assuming you are using the wheel to
// scroll a web page by a number of lines or pixels (or pages). Values can vary
// significantly on different platforms and browsers, forgetting that you can
// scroll at different speeds. Some devices (like trackpads) emit more events
// at smaller increments with fine granularity, and some emit massive jumps with
// linear speed or acceleration.
//
// This code does its best to normalize the deltas for you:
//
// - spin is trying to normalize how far the wheel was spun (or trackpad
// dragged). This is super useful for zoom support where you want to
// throw away the chunky scroll steps on the PC and make those equal to
// the slow and smooth tiny steps on the Mac. Key data: This code tries to
// resolve a single slow step on a wheel to 1.
//
// - pixel is normalizing the desired scroll delta in pixel units. You'll
// get the crazy differences between browsers, but at least it'll be in
// pixels!
//
// - positive value indicates scrolling DOWN/RIGHT, negative UP/LEFT. This
// should translate to positive value zooming IN, negative zooming OUT.
// This matches the newer 'wheel' event.
//
// Why are there spinX, spinY (or pixels)?
//
// - spinX is a 2-finger side drag on the trackpad, and a shift + wheel turn
// with a mouse. It results in side-scrolling in the browser by default.
//
// - spinY is what you expect -- it's the classic axis of a mouse wheel.
//
// - I dropped spinZ/pixelZ. It is supported by the DOM 3 'wheel' event and
// probably is by browsers in conjunction with fancy 3D controllers .. but
// you know.
//
// Implementation info:
//
// Examples of 'wheel' event if you scroll slowly (down) by one step with an
// average mouse:
//
// OS X + Chrome (mouse) - 4 pixel delta (wheelDelta -120)
// OS X + Safari (mouse) - N/A pixel delta (wheelDelta -12)
// OS X + Firefox (mouse) - 0.1 line delta (wheelDelta N/A)
// Win8 + Chrome (mouse) - 100 pixel delta (wheelDelta -120)
// Win8 + Firefox (mouse) - 3 line delta (wheelDelta -120)
//
// On the trackpad:
//
// OS X + Chrome (trackpad) - 2 pixel delta (wheelDelta -6)
// OS X + Firefox (trackpad) - 1 pixel delta (wheelDelta N/A)
//
// On other/older browsers.. it's more complicated as there can be multiple and
// also missing delta values.
//
// The 'wheel' event is more standard:
//
// http://www.w3.org/TR/DOM-Level-3-Events/#events-wheelevents
//
// The basics is that it includes a unit, deltaMode (pixels, lines, pages), and
// deltaX, deltaY and deltaZ. Some browsers provide other values to maintain
// backward compatibility with older events. Those other values help us
// better normalize spin speed. Example of what the browsers provide:
//
// | event.wheelDelta | event.detail
// ------------------+------------------+--------------
// Safari v5/OS X | -120 | 0
// Safari v5/Win7 | -120 | 0
// Chrome v17/OS X | -120 | 0
// Chrome v17/Win7 | -120 | 0
// IE9/Win7 | -120 | undefined
// Firefox v4/OS X | undefined | 1
// Firefox v4/Win7 | undefined | 3
//
// ----------------------------------------------------------------------------
export interface VtkNormalizedWheelEvent {
spinX: number;
spinY: number;
pixelX: number;
pixelY: number;
}
export function normalizeWheel(wheelEvent: object): VtkNormalizedWheelEvent;
// ----------------------------------------------------------------------------
// Default export
// ----------------------------------------------------------------------------
declare const Macro: {
algo: typeof algo,
capitalize: typeof capitalize,
chain: typeof chain,
debounce: typeof debounce,
enumToString: typeof enumToString,
event: typeof event,
EVENT_ABORT: typeof EVENT_ABORT,
formatBytesToProperUnit: typeof formatBytesToProperUnit,
formatNumbersWithThousandSeparator: typeof formatNumbersWithThousandSeparator,
get: typeof get,
getArray: typeof getArray,
getCurrentGlobalMTime(): Number;
getStateArrayMapFunc: typeof getStateArrayMapFunc,
isVtkObject: typeof isVtkObject,
keystore: typeof keystore,
newInstance: typeof newInstance,
normalizeWheel: typeof normalizeWheel,
obj: typeof obj,
proxy: typeof proxy,
proxyPropertyMapping: typeof proxyPropertyMapping,
proxyPropertyState: typeof proxyPropertyState,
safeArrays: typeof safeArrays,
set: typeof set,
setArray: typeof setArray,
setGet: typeof setGet,
setGetArray: typeof setGetArray,
setImmediate: typeof setImmediateVTK,
setLoggerFunction: typeof setLoggerFunction,
throttle: typeof throttle,
traverseInstanceTree: typeof traverseInstanceTree,
TYPED_ARRAYS: typeof TYPED_ARRAYS,
uncapitalize: typeof uncapitalize,
VOID: typeof VOID,
vtkDebugMacro: typeof vtkDebugMacro,
vtkErrorMacro: typeof vtkErrorMacro,
vtkInfoMacro: typeof vtkInfoMacro,
vtkLogMacro: typeof vtkLogMacro,
vtkOnceErrorMacro: typeof vtkOnceErrorMacro,
vtkWarningMacro: typeof vtkWarningMacro,
};
export default Macro;
|
/*
* Copyright 2021 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.dongliu.apk.parser.bean;
import net.dongliu.apk.parser.struct.dex.DexClassStruct;
import javax.annotation.Nullable;
/**
* @author dongliu
*/
public class DexClass implements Cloneable {
/**
* the class name
*/
private String classType;
private String superClass;
private int accessFlags;
@Override
public Object clone() throws CloneNotSupportedException {
return super.clone();
}
public DexClass() {
}
public DexClass(String classType, String superClass, int accessFlags) {
this.classType = classType;
this.superClass = superClass;
this.accessFlags = accessFlags;
}
public void setClassType(String classType) {
this.classType = classType;
}
public void setSuperClass(String superClass) {
this.superClass = superClass;
}
public void setAccessFlags(int accessFlags) {
this.accessFlags = accessFlags;
}
public String getPackageName() {
String packageName = classType;
if (packageName.length() > 0) {
if (packageName.charAt(0) == 'L') {
packageName = packageName.substring(1);
}
}
if (packageName.length() > 0) {
int idx = classType.lastIndexOf('/');
if (idx > 0) {
packageName = packageName.substring(0, classType.lastIndexOf('/') - 1);
} else if (packageName.charAt(packageName.length() - 1) == ';') {
packageName = packageName.substring(0, packageName.length() - 1);
}
}
return packageName.replace('/', '.');
}
public String getClassType() {
return classType;
}
@Nullable
public String getSuperClass() {
return superClass;
}
public boolean isInterface() {
return (this.accessFlags & DexClassStruct.ACC_INTERFACE) != 0;
}
public boolean isEnum() {
return (this.accessFlags & DexClassStruct.ACC_ENUM) != 0;
}
public boolean isAnnotation() {
return (this.accessFlags & DexClassStruct.ACC_ANNOTATION) != 0;
}
public boolean isPublic() {
return (this.accessFlags & DexClassStruct.ACC_PUBLIC) != 0;
}
public boolean isProtected() {
return (this.accessFlags & DexClassStruct.ACC_PROTECTED) != 0;
}
public boolean isStatic() {
return (this.accessFlags & DexClassStruct.ACC_STATIC) != 0;
}
@Override
public String toString() {
return classType;
}
}
|
#!/bin/bash
#
# provision.sh
#
# This file is specified in Vagrantfile and is loaded by Vagrant as the primary
# provisioning script whenever the commands `vagrant up`, `vagrant provision`,
# or `vagrant reload` are used. It provides all of the default packages and
# configurations included with Varying Vagrant Vagrants.
# By storing the date now, we can calculate the duration of provisioning at the
# end of this script.
start_seconds="$(date +%s)"
# PACKAGE INSTALLATION
#
# Build a bash array to pass all of the packages we want to install to a single
# apt-get command. This avoids doing all the leg work each time a package is
# set to install. It also allows us to easily comment out or add single
# packages. We set the array as empty to begin with so that we can append
# individual packages to it as required.
apt_package_install_list=()
# Start with a bash array containing all packages we want to install in the
# virtual machine. We'll then loop through each of these and check individual
# status before adding them to the apt_package_install_list array.
apt_package_check_list=(
# PHP5
#
# Our base packages for php5. As long as php5-fpm and php5-cli are
# installed, there is no need to install the general php5 package, which
# can sometimes install apache as a requirement.
php5-fpm
php5-cli
# Common and dev packages for php
php5-common
php5-dev
# Extra PHP modules that we find useful
php5-imagick
php5-mcrypt
php5-mysqlnd
php5-imap
php5-curl
php-pear
php5-gd
php5-xmlrpc
php5-xsl
# nginx is installed as the default web server
nginx
# mysql is the default database
mariadb-server
# other packages that come in handy
imagemagick
subversion
git-core
zip
unzip
ngrep
curl
make
vim
colordiff
postfix
# ntp service to keep clock current
ntp
# Req'd for i18n tools
gettext
# Req'd for Webgrind
graphviz
# dos2unix
# Allows conversion of DOS style line endings to something we'll have less
# trouble with in Linux.
dos2unix
# nodejs for use by grunt
g++
nodejs
#Mailcatcher requirement
libsqlite3-dev
)
### FUNCTIONS
network_detection() {
# Network Detection
#
# Make an HTTP request to google.com to determine if outside access is available
# to us. If 3 attempts with a timeout of 5 seconds are not successful, then we'll
# skip a few things further in provisioning rather than create a bunch of errors.
if [[ "$(wget --tries=3 --timeout=5 --spider http://google.com 2>&1 | grep 'connected')" ]]; then
echo "Network connection detected..."
ping_result="Connected"
else
echo "Network connection not detected. Unable to reach google.com..."
ping_result="Not Connected"
fi
}
network_check() {
network_detection
if [[ ! "$ping_result" == "Connected" ]]; then
echo -e "\nNo network connection available, skipping package installation"
exit 0
fi
}
noroot() {
sudo -EH -u "vagrant" "$@";
}
profile_setup() {
# Copy custom dotfiles and bin file for the vagrant user from local
cp "/srv/config/bash_profile" "/home/vagrant/.bash_profile"
cp "/srv/config/bash_aliases" "/home/vagrant/.bash_aliases"
cp "/srv/config/vimrc" "/home/vagrant/.vimrc"
if [[ ! -d "/home/vagrant/.subversion" ]]; then
mkdir "/home/vagrant/.subversion"
fi
cp "/srv/config/subversion-servers" "/home/vagrant/.subversion/servers"
if [[ ! -d "/home/vagrant/bin" ]]; then
mkdir "/home/vagrant/bin"
fi
rsync -rvzh --delete "/srv/config/homebin/" "/home/vagrant/bin/"
echo " * Copied /srv/config/bash_profile to /home/vagrant/.bash_profile"
echo " * Copied /srv/config/bash_aliases to /home/vagrant/.bash_aliases"
echo " * Copied /srv/config/vimrc to /home/vagrant/.vimrc"
echo " * Copied /srv/config/subversion-servers to /home/vagrant/.subversion/servers"
echo " * rsync'd /srv/config/homebin to /home/vagrant/bin"
# If a bash_prompt file exists in the VVV config/ directory, copy to the VM.
if [[ -f "/srv/config/bash_prompt" ]]; then
cp "/srv/config/bash_prompt" "/home/vagrant/.bash_prompt"
echo " * Copied /srv/config/bash_prompt to /home/vagrant/.bash_prompt"
fi
}
package_check() {
# Loop through each of our packages that should be installed on the system. If
# not yet installed, it should be added to the array of packages to install.
local pkg
local package_version
for pkg in "${apt_package_check_list[@]}"; do
package_version=$(dpkg -s "${pkg}" 2>&1 | grep 'Version:' | cut -d " " -f 2)
if [[ -n "${package_version}" ]]; then
space_count="$(expr 20 - "${#pkg}")" #11
pack_space_count="$(expr 30 - "${#package_version}")"
real_space="$(expr ${space_count} + ${pack_space_count} + ${#package_version})"
printf " * $pkg %${real_space}.${#package_version}s ${package_version}\n"
else
echo " *" $pkg [not installed]
apt_package_install_list+=($pkg)
fi
done
}
package_install() {
package_check
# MySQL
#
# Use debconf-set-selections to specify the default password for the root MySQL
# account. This runs on every provision, even if MySQL has been installed. If
# MySQL is already installed, it will not affect anything.
echo mariadb-server-10.0 mysql-server/root_password password "root" | debconf-set-selections
echo mariadb-server-10.0 mysql-server/root_password_again password "root" | debconf-set-selections
# Postfix
#
# Use debconf-set-selections to specify the selections in the postfix setup. Set
# up as an 'Internet Site' with the host name 'vvv'. Note that if your current
# Internet connection does not allow communication over port 25, you will not be
# able to send mail, even with postfix installed.
echo postfix postfix/main_mailer_type select Internet Site | debconf-set-selections
echo postfix postfix/mailname string vvv | debconf-set-selections
# Disable ipv6 as some ISPs/mail servers have problems with it
echo "inet_protocols = ipv4" >> "/etc/postfix/main.cf"
# Provide our custom apt sources before running `apt-get update`
ln -sf /srv/config/apt-source-append.list /etc/apt/sources.list.d/vvv-sources.list
echo "Linked custom apt sources"
if [[ ${#apt_package_install_list[@]} = 0 ]]; then
echo -e "No apt packages to install.\n"
else
# Before running `apt-get update`, we should add the public keys for
# the packages that we are installing from non standard sources via
# our appended apt source.list
# Retrieve the Nginx signing key from nginx.org
echo "Applying Nginx signing key..."
wget --quiet "http://nginx.org/keys/nginx_signing.key" -O- | apt-key add -
# Retrieve the MariaDB signing key from ubuntu
apt-key adv --quiet --keyserver "hkp://keyserver.ubuntu.com:80" --recv-key 0xcbcb082a1bb943db 2>&1 | grep "gpg:"
apt-key export 0xcbcb082a1bb943db | apt-key add -
# Apply the nodejs assigning key
apt-key adv --quiet --keyserver "hkp://keyserver.ubuntu.com:80" --recv-key C7917B12 2>&1 | grep "gpg:"
apt-key export C7917B12 | apt-key add -
# Update all of the package references before installing anything
echo "Running apt-get update..."
apt-get update -y
# Install required packages
echo "Installing apt-get packages..."
apt-get install -y ${apt_package_install_list[@]}
# Clean up apt caches
apt-get clean
fi
}
tools_install() {
# npm
#
# Make sure we have the latest npm version and the update checker module
npm install -g npm
npm install -g npm-check-updates
# xdebug
#
# XDebug 2.2.3 is provided with the Ubuntu install by default. The PECL
# installation allows us to use a later version. Not specifying a version
# will load the latest stable.
pecl install xdebug
# ack-grep
#
# Install ack-rep directory from the version hosted at beyondgrep.com as the
# PPAs for Ubuntu Precise are not available yet.
if [[ -f /usr/bin/ack ]]; then
echo "ack-grep already installed"
else
echo "Installing ack-grep as ack"
curl -s http://beyondgrep.com/ack-2.14-single-file > "/usr/bin/ack" && chmod +x "/usr/bin/ack"
fi
# COMPOSER
#
# Install Composer if it is not yet available.
if [[ ! -n "$(composer --version --no-ansi | grep 'Composer version')" ]]; then
echo "Installing Composer..."
curl -sS "https://getcomposer.org/installer" | php
chmod +x "composer.phar"
mv "composer.phar" "/usr/local/bin/composer"
fi
if [[ -f /vagrant/provision/github.token ]]; then
ghtoken=`cat /vagrant/provision/github.token`
composer config --global github-oauth.github.com $ghtoken
echo "Your personal GitHub token is set for Composer."
fi
# Update both Composer and any global packages. Updates to Composer are direct from
# the master branch on its GitHub repository.
if [[ -n "$(composer --version --no-ansi | grep 'Composer version')" ]]; then
echo "Updating Composer..."
COMPOSER_HOME=/usr/local/src/composer composer self-update
COMPOSER_HOME=/usr/local/src/composer composer -q global require --no-update phpunit/phpunit:4.3.*
COMPOSER_HOME=/usr/local/src/composer composer -q global require --no-update phpunit/php-invoker:1.1.*
COMPOSER_HOME=/usr/local/src/composer composer -q global require --no-update mockery/mockery:0.9.*
COMPOSER_HOME=/usr/local/src/composer composer -q global require --no-update d11wtq/boris:v1.0.8
COMPOSER_HOME=/usr/local/src/composer composer -q global config bin-dir /usr/local/bin
COMPOSER_HOME=/usr/local/src/composer composer global update
fi
# Grunt
#
# Install or Update Grunt based on current state. Updates are direct
# from NPM
if [[ "$(grunt --version)" ]]; then
echo "Updating Grunt CLI"
npm update -g grunt-cli &>/dev/null
npm update -g grunt-sass &>/dev/null
npm update -g grunt-cssjanus &>/dev/null
npm update -g grunt-rtlcss &>/dev/null
else
echo "Installing Grunt CLI"
npm install -g grunt-cli &>/dev/null
npm install -g grunt-sass &>/dev/null
npm install -g grunt-cssjanus &>/dev/null
npm install -g grunt-rtlcss &>/dev/null
fi
# Graphviz
#
# Set up a symlink between the Graphviz path defined in the default Webgrind
# config and actual path.
echo "Adding graphviz symlink for Webgrind..."
ln -sf "/usr/bin/dot" "/usr/local/bin/dot"
}
nginx_setup() {
# Create an SSL key and certificate for HTTPS support.
if [[ ! -e /etc/nginx/server.key ]]; then
echo "Generate Nginx server private key..."
vvvgenrsa="$(openssl genrsa -out /etc/nginx/server.key 2048 2>&1)"
echo "$vvvgenrsa"
fi
if [[ ! -e /etc/nginx/server.crt ]]; then
echo "Sign the certificate using the above private key..."
vvvsigncert="$(openssl req -new -x509 \
-key /etc/nginx/server.key \
-out /etc/nginx/server.crt \
-days 3650 \
-subj /CN=*.wordpress-develop.dev/CN=*.wordpress.dev/CN=*.vvv.dev/CN=*.wordpress-trunk.dev 2>&1)"
echo "$vvvsigncert"
fi
echo -e "\nSetup configuration files..."
# Used to ensure proper services are started on `vagrant up`
cp "/srv/config/init/vvv-start.conf" "/etc/init/vvv-start.conf"
echo " * Copied /srv/config/init/vvv-start.conf to /etc/init/vvv-start.conf"
# Copy nginx configuration from local
cp "/srv/config/nginx-config/nginx.conf" "/etc/nginx/nginx.conf"
cp "/srv/config/nginx-config/nginx-wp-common.conf" "/etc/nginx/nginx-wp-common.conf"
if [[ ! -d "/etc/nginx/custom-sites" ]]; then
mkdir "/etc/nginx/custom-sites/"
fi
rsync -rvzh --delete "/srv/config/nginx-config/sites/" "/etc/nginx/custom-sites/"
echo " * Copied /srv/config/nginx-config/nginx.conf to /etc/nginx/nginx.conf"
echo " * Copied /srv/config/nginx-config/nginx-wp-common.conf to /etc/nginx/nginx-wp-common.conf"
echo " * Rsync'd /srv/config/nginx-config/sites/ to /etc/nginx/custom-sites"
}
phpfpm_setup() {
# Copy php-fpm configuration from local
cp "/srv/config/php5-fpm-config/php5-fpm.conf" "/etc/php5/fpm/php5-fpm.conf"
cp "/srv/config/php5-fpm-config/www.conf" "/etc/php5/fpm/pool.d/www.conf"
cp "/srv/config/php5-fpm-config/php-custom.ini" "/etc/php5/fpm/conf.d/php-custom.ini"
cp "/srv/config/php5-fpm-config/opcache.ini" "/etc/php5/fpm/conf.d/opcache.ini"
cp "/srv/config/php5-fpm-config/xdebug.ini" "/etc/php5/mods-available/xdebug.ini"
# Find the path to Xdebug and prepend it to xdebug.ini
XDEBUG_PATH=$( find /usr -name 'xdebug.so' | head -1 )
sed -i "1izend_extension=\"$XDEBUG_PATH\"" "/etc/php5/mods-available/xdebug.ini"
echo " * Copied /srv/config/php5-fpm-config/php5-fpm.conf to /etc/php5/fpm/php5-fpm.conf"
echo " * Copied /srv/config/php5-fpm-config/www.conf to /etc/php5/fpm/pool.d/www.conf"
echo " * Copied /srv/config/php5-fpm-config/php-custom.ini to /etc/php5/fpm/conf.d/php-custom.ini"
echo " * Copied /srv/config/php5-fpm-config/opcache.ini to /etc/php5/fpm/conf.d/opcache.ini"
echo " * Copied /srv/config/php5-fpm-config/xdebug.ini to /etc/php5/mods-available/xdebug.ini"
# Copy memcached configuration from local
# cp "/srv/config/memcached-config/memcached.conf" "/etc/memcached.conf"
# echo " * Copied /srv/config/memcached-config/memcached.conf to /etc/memcached.conf"
}
mysql_setup() {
# If MySQL is installed, go through the various imports and service tasks.
local exists_mysql
exists_mysql="$(service mysql status)"
if [[ "mysql: unrecognized service" != "${exists_mysql}" ]]; then
echo -e "\nSetup MySQL configuration file links..."
# Copy mysql configuration from local
cp "/srv/config/mysql-config/my.cnf" "/etc/mysql/my.cnf"
cp "/srv/config/mysql-config/root-my.cnf" "/home/vagrant/.my.cnf"
echo " * Copied /srv/config/mysql-config/my.cnf to /etc/mysql/my.cnf"
echo " * Copied /srv/config/mysql-config/root-my.cnf to /home/vagrant/.my.cnf"
# MySQL gives us an error if we restart a non running service, which
# happens after a `vagrant halt`. Check to see if it's running before
# deciding whether to start or restart.
if [[ "mysql stop/waiting" == "${exists_mysql}" ]]; then
echo "service mysql start"
service mysql start
else
echo "service mysql restart"
service mysql restart
fi
# IMPORT SQL
#
# Create the databases (unique to system) that will be imported with
# the mysqldump files located in database/backups/
if [[ -f "/srv/database/init-custom.sql" ]]; then
mysql -u "root" -p"root" < "/srv/database/init-custom.sql"
echo -e "\nInitial custom MySQL scripting..."
else
echo -e "\nNo custom MySQL scripting found in database/init-custom.sql, skipping..."
fi
# Setup MySQL by importing an init file that creates necessary
# users and databases that our vagrant setup relies on.
mysql -u "root" -p"root" < "/srv/database/init.sql"
echo "Initial MySQL prep..."
# Process each mysqldump SQL file in database/backups to import
# an initial data set for MySQL.
"/srv/database/import-sql.sh"
else
echo -e "\nMySQL is not installed. No databases imported."
fi
}
mailcatcher_setup() {
# Mailcatcher
#
# Installs mailcatcher using RVM. RVM allows us to install the
# current version of ruby and all mailcatcher dependencies reliably.
local pkg
rvm_version="$(/usr/bin/env rvm --silent --version 2>&1 | grep 'rvm ' | cut -d " " -f 2)"
if [[ -n "${rvm_version}" ]]; then
pkg="RVM"
space_count="$(( 20 - ${#pkg}))" #11
pack_space_count="$(( 30 - ${#rvm_version}))"
real_space="$(( ${space_count} + ${pack_space_count} + ${#rvm_version}))"
printf " * $pkg %${real_space}.${#rvm_version}s ${rvm_version}\n"
else
# RVM key D39DC0E3
# Signatures introduced in 1.26.0
gpg -q --no-tty --batch --keyserver "hkp://keyserver.ubuntu.com:80" --recv-keys D39DC0E3
gpg -q --no-tty --batch --keyserver "hkp://keyserver.ubuntu.com:80" --recv-keys BF04FF17
printf " * RVM [not installed]\n Installing from source"
curl --silent -L "https://get.rvm.io" | sudo bash -s stable --ruby
source "/usr/local/rvm/scripts/rvm"
fi
mailcatcher_version="$(/usr/bin/env mailcatcher --version 2>&1 | grep 'mailcatcher ' | cut -d " " -f 2)"
if [[ -n "${mailcatcher_version}" ]]; then
pkg="Mailcatcher"
space_count="$(( 20 - ${#pkg}))" #11
pack_space_count="$(( 30 - ${#mailcatcher_version}))"
real_space="$(( ${space_count} + ${pack_space_count} + ${#mailcatcher_version}))"
printf " * $pkg %${real_space}.${#mailcatcher_version}s ${mailcatcher_version}\n"
else
echo " * Mailcatcher [not installed]"
/usr/bin/env rvm default@mailcatcher --create do gem install mailcatcher --no-rdoc --no-ri
/usr/bin/env rvm wrapper default@mailcatcher --no-prefix mailcatcher catchmail
fi
if [[ -f "/etc/init/mailcatcher.conf" ]]; then
echo " *" Mailcatcher upstart already configured.
else
cp "/srv/config/init/mailcatcher.conf" "/etc/init/mailcatcher.conf"
echo " * Copied /srv/config/init/mailcatcher.conf to /etc/init/mailcatcher.conf"
fi
if [[ -f "/etc/php5/mods-available/mailcatcher.ini" ]]; then
echo " *" Mailcatcher php5 fpm already configured.
else
cp "/srv/config/php5-fpm-config/mailcatcher.ini" "/etc/php5/mods-available/mailcatcher.ini"
echo " * Copied /srv/config/php5-fpm-config/mailcatcher.ini to /etc/php5/mods-available/mailcatcher.ini"
fi
}
services_restart() {
# RESTART SERVICES
#
# Make sure the services we expect to be running are running.
echo -e "\nRestart services..."
service nginx restart
# service memcached restart
service mailcatcher restart
# Disable PHP Xdebug module by default
php5dismod xdebug
# Enable PHP mcrypt module by default
php5enmod mcrypt
# Enable PHP mailcatcher sendmail settings by default
php5enmod mailcatcher
service php5-fpm restart
# Add the vagrant user to the www-data group so that it has better access
# to PHP and Nginx related files.
usermod -a -G www-data vagrant
}
wp_cli() {
# WP-CLI Install
if [[ ! -d "/srv/www/wp-cli" ]]; then
echo -e "\nDownloading wp-cli, see http://wp-cli.org"
git clone "https://github.com/wp-cli/wp-cli.git" "/srv/www/wp-cli"
cd /srv/www/wp-cli
composer install
else
echo -e "\nUpdating wp-cli..."
cd /srv/www/wp-cli
git pull --rebase origin master
composer update
fi
# Link `wp` to the `/usr/local/bin` directory
ln -sf "/srv/www/wp-cli/bin/wp" "/usr/local/bin/wp"
}
memcached_admin() {
# Download and extract phpMemcachedAdmin to provide a dashboard view and
# admin interface to the goings on of memcached when running
if [[ ! -d "/srv/www/default/memcached-admin" ]]; then
echo -e "\nDownloading phpMemcachedAdmin, see https://github.com/wp-cloud/phpmemcacheadmin"
cd /srv/www/default
wget -q -O phpmemcachedadmin.tar.gz "https://github.com/wp-cloud/phpmemcacheadmin/archive/1.2.2.1.tar.gz"
tar -xf phpmemcachedadmin.tar.gz
mv phpmemcacheadmin* memcached-admin
rm phpmemcachedadmin.tar.gz
else
echo "phpMemcachedAdmin already installed."
fi
}
opcached_status(){
# Checkout Opcache Status to provide a dashboard for viewing statistics
# about PHP's built in opcache.
if [[ ! -d "/srv/www/default/opcache-status" ]]; then
echo -e "\nDownloading Opcache Status, see https://github.com/rlerdorf/opcache-status/"
cd /srv/www/default
git clone "https://github.com/rlerdorf/opcache-status.git" opcache-status
else
echo -e "\nUpdating Opcache Status"
cd /srv/www/default/opcache-status
git pull --rebase origin master
fi
}
webgrind_install() {
# Webgrind install (for viewing callgrind/cachegrind files produced by
# xdebug profiler)
if [[ ! -d "/srv/www/default/webgrind" ]]; then
echo -e "\nDownloading webgrind, see https://github.com/michaelschiller/webgrind.git"
git clone "https://github.com/michaelschiller/webgrind.git" "/srv/www/default/webgrind"
else
echo -e "\nUpdating webgrind..."
cd /srv/www/default/webgrind
git pull --rebase origin master
fi
}
php_codesniff() {
# PHP_CodeSniffer (for running WordPress-Coding-Standards)
if [[ ! -d "/srv/www/phpcs" ]]; then
echo -e "\nDownloading PHP_CodeSniffer (phpcs), see https://github.com/squizlabs/PHP_CodeSniffer"
git clone -b master "https://github.com/squizlabs/PHP_CodeSniffer.git" "/srv/www/phpcs"
else
cd /srv/www/phpcs
if [[ $(git rev-parse --abbrev-ref HEAD) == 'master' ]]; then
echo -e "\nUpdating PHP_CodeSniffer (phpcs)..."
git pull --no-edit origin master
else
echo -e "\nSkipped updating PHP_CodeSniffer since not on master branch"
fi
fi
# Sniffs WordPress Coding Standards
if [[ ! -d "/srv/www/phpcs/CodeSniffer/Standards/WordPress" ]]; then
echo -e "\nDownloading WordPress-Coding-Standards, sniffs for PHP_CodeSniffer, see https://github.com/WordPress-Coding-Standards/WordPress-Coding-Standards"
git clone -b master "https://github.com/WordPress-Coding-Standards/WordPress-Coding-Standards.git" "/srv/www/phpcs/CodeSniffer/Standards/WordPress"
else
cd /srv/www/phpcs/CodeSniffer/Standards/WordPress
if [[ $(git rev-parse --abbrev-ref HEAD) == 'master' ]]; then
echo -e "\nUpdating PHP_CodeSniffer WordPress Coding Standards..."
git pull --no-edit origin master
else
echo -e "\nSkipped updating PHPCS WordPress Coding Standards since not on master branch"
fi
fi
# Install the standards in PHPCS
/srv/www/phpcs/scripts/phpcs --config-set installed_paths ./CodeSniffer/Standards/WordPress/
/srv/www/phpcs/scripts/phpcs --config-set default_standard WordPress-Core
/srv/www/phpcs/scripts/phpcs -i
}
phpmyadmin_setup() {
# Download phpMyAdmin
if [[ ! -d /srv/www/default/database-admin ]]; then
echo "Downloading phpMyAdmin..."
cd /srv/www/default
wget -q -O phpmyadmin.tar.gz "https://files.phpmyadmin.net/phpMyAdmin/4.4.10/phpMyAdmin-4.4.10-all-languages.tar.gz"
tar -xf phpmyadmin.tar.gz
mv phpMyAdmin-4.4.10-all-languages database-admin
rm phpmyadmin.tar.gz
else
echo "PHPMyAdmin already installed."
fi
cp "/srv/config/phpmyadmin-config/config.inc.php" "/srv/www/default/database-admin/"
}
wordpress_default() {
# Install and configure the latest stable version of WordPress
if [[ ! -d "/srv/www/wordpress-default" ]]; then
echo "Downloading WordPress Stable, see http://wordpress.org/"
cd /srv/www/
curl -L -O "https://wordpress.org/latest.tar.gz"
noroot tar -xvf latest.tar.gz
mv wordpress wordpress-default
rm latest.tar.gz
cd /srv/www/wordpress-default
echo "Configuring WordPress Stable..."
noroot wp core config --dbname=wordpress_default --dbuser=wp --dbpass=wp --quiet --extra-php <<PHP
// Match any requests made via xip.io.
if ( isset( \$_SERVER['HTTP_HOST'] ) && preg_match('/^(local.wordpress.)\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(.xip.io)\z/', \$_SERVER['HTTP_HOST'] ) ) {
define( 'WP_HOME', 'http://' . \$_SERVER['HTTP_HOST'] );
define( 'WP_SITEURL', 'http://' . \$_SERVER['HTTP_HOST'] );
}
define( 'WP_DEBUG', true );
PHP
echo "Installing WordPress Stable..."
noroot wp core install --url=local.wordpress.dev --quiet --title="Local WordPress Dev" --admin_name=admin --admin_email="admin@local.dev" --admin_password="password"
else
echo "Updating WordPress Stable..."
cd /srv/www/wordpress-default
noroot wp core upgrade
fi
}
wpsvn_check() {
# Test to see if an svn upgrade is needed
svn_test=$( svn status -u "/srv/www/wordpress-develop/" 2>&1 );
if [[ "$svn_test" == *"svn upgrade"* ]]; then
# If the wordpress-develop svn repo needed an upgrade, they probably all need it
for repo in $(find /srv/www -maxdepth 5 -type d -name '.svn'); do
svn upgrade "${repo/%\.svn/}"
done
fi;
}
wordpress_trunk() {
# Checkout, install and configure WordPress trunk via core.svn
if [[ ! -d "/srv/www/wordpress-trunk" ]]; then
echo "Checking out WordPress trunk from core.svn, see https://core.svn.wordpress.org/trunk"
svn checkout "https://core.svn.wordpress.org/trunk/" "/srv/www/wordpress-trunk"
cd /srv/www/wordpress-trunk
echo "Configuring WordPress trunk..."
noroot wp core config --dbname=wordpress_trunk --dbuser=wp --dbpass=wp --quiet --extra-php <<PHP
// Match any requests made via xip.io.
if ( isset( \$_SERVER['HTTP_HOST'] ) && preg_match('/^(local.wordpress-trunk.)\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(.xip.io)\z/', \$_SERVER['HTTP_HOST'] ) ) {
define( 'WP_HOME', 'http://' . \$_SERVER['HTTP_HOST'] );
define( 'WP_SITEURL', 'http://' . \$_SERVER['HTTP_HOST'] );
}
define( 'WP_DEBUG', true );
PHP
echo "Installing WordPress trunk..."
noroot wp core install --url=local.wordpress-trunk.dev --quiet --title="Local WordPress Trunk Dev" --admin_name=admin --admin_email="admin@local.dev" --admin_password="password"
else
echo "Updating WordPress trunk..."
cd /srv/www/wordpress-trunk
svn up
fi
}
wordpress_develop(){
# Checkout, install and configure WordPress trunk via develop.svn
if [[ ! -d "/srv/www/wordpress-develop" ]]; then
echo "Checking out WordPress trunk from develop.svn, see https://develop.svn.wordpress.org/trunk"
svn checkout "https://develop.svn.wordpress.org/trunk/" "/srv/www/wordpress-develop"
cd /srv/www/wordpress-develop/src/
echo "Configuring WordPress develop..."
noroot wp core config --dbname=wordpress_develop --dbuser=wp --dbpass=wp --quiet --extra-php <<PHP
// Match any requests made via xip.io.
if ( isset( \$_SERVER['HTTP_HOST'] ) && preg_match('/^(src|build)(.wordpress-develop.)\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(.xip.io)\z/', \$_SERVER['HTTP_HOST'] ) ) {
define( 'WP_HOME', 'http://' . \$_SERVER['HTTP_HOST'] );
define( 'WP_SITEURL', 'http://' . \$_SERVER['HTTP_HOST'] );
} else if ( 'build' === basename( dirname( __FILE__ ) ) ) {
// Allow (src|build).wordpress-develop.dev to share the same Database
define( 'WP_HOME', 'http://build.wordpress-develop.dev' );
define( 'WP_SITEURL', 'http://build.wordpress-develop.dev' );
}
define( 'WP_DEBUG', true );
PHP
echo "Installing WordPress develop..."
noroot wp core install --url=src.wordpress-develop.dev --quiet --title="WordPress Develop" --admin_name=admin --admin_email="admin@local.dev" --admin_password="password"
cp /srv/config/wordpress-config/wp-tests-config.php /srv/www/wordpress-develop/
cd /srv/www/wordpress-develop/
echo "Running npm install for the first time, this may take several minutes..."
noroot npm install &>/dev/null
else
echo "Updating WordPress develop..."
cd /srv/www/wordpress-develop/
if [[ -e .svn ]]; then
svn up
else
if [[ $(git rev-parse --abbrev-ref HEAD) == 'master' ]]; then
git pull --no-edit git://develop.git.wordpress.org/ master
else
echo "Skip auto git pull on develop.git.wordpress.org since not on master branch"
fi
fi
echo "Updating npm packages..."
noroot npm install &>/dev/null
fi
if [[ ! -d "/srv/www/wordpress-develop/build" ]]; then
echo "Initializing grunt in WordPress develop... This may take a few moments."
cd /srv/www/wordpress-develop/
grunt
fi
}
custom_vvv(){
# Find new sites to setup.
# Kill previously symlinked Nginx configs
# We can't know what sites have been removed, so we have to remove all
# the configs and add them back in again.
find /etc/nginx/custom-sites -name 'vvv-auto-*.conf' -exec rm {} \;
# Look for site setup scripts
for SITE_CONFIG_FILE in $(find /srv/www -maxdepth 5 -name 'vvv-init.sh'); do
DIR="$(dirname "$SITE_CONFIG_FILE")"
(
cd "$DIR"
source vvv-init.sh
)
done
# Look for Nginx vhost files, symlink them into the custom sites dir
for SITE_CONFIG_FILE in $(find /srv/www -maxdepth 5 -name 'vvv-nginx.conf'); do
DEST_CONFIG_FILE=${SITE_CONFIG_FILE//\/srv\/www\//}
DEST_CONFIG_FILE=${DEST_CONFIG_FILE//\//\-}
DEST_CONFIG_FILE=${DEST_CONFIG_FILE/%-vvv-nginx.conf/}
DEST_CONFIG_FILE="vvv-auto-$DEST_CONFIG_FILE-$(md5sum <<< "$SITE_CONFIG_FILE" | cut -c1-32).conf"
# We allow the replacement of the {vvv_path_to_folder} token with
# whatever you want, allowing flexible placement of the site folder
# while still having an Nginx config which works.
DIR="$(dirname "$SITE_CONFIG_FILE")"
sed "s#{vvv_path_to_folder}#$DIR#" "$SITE_CONFIG_FILE" > "/etc/nginx/custom-sites/""$DEST_CONFIG_FILE"
done
# Parse any vvv-hosts file located in www/ or subdirectories of www/
# for domains to be added to the virtual machine's host file so that it is
# self aware.
#
# Domains should be entered on new lines.
echo "Cleaning the virtual machine's /etc/hosts file..."
sed -n '/# vvv-auto$/!p' /etc/hosts > /tmp/hosts
mv /tmp/hosts /etc/hosts
echo "Adding domains to the virtual machine's /etc/hosts file..."
find /srv/www/ -maxdepth 5 -name 'vvv-hosts' | \
while read hostfile; do
while IFS='' read -r line || [ -n "$line" ]; do
if [[ "#" != ${line:0:1} ]]; then
if [[ -z "$(grep -q "^127.0.0.1 $line$" /etc/hosts)" ]]; then
echo "127.0.0.1 $line # vvv-auto" >> "/etc/hosts"
echo " * Added $line from $hostfile"
fi
fi
done < "$hostfile"
done
}
### SCRIPT
#set -xv
network_check
# Profile_setup
echo "Bash profile setup and directories."
profile_setup
network_check
# Package and Tools Install
echo " "
echo "Main packages check and install."
package_install
tools_install
nginx_setup
mailcatcher_setup
phpfpm_setup
services_restart
mysql_setup
network_check
# WP-CLI and debugging tools
echo " "
echo "Installing/updating wp-cli and debugging tools"
wp_cli
# memcached_admin
opcached_status
webgrind_install
php_codesniff
phpmyadmin_setup
network_check
# Time for WordPress!
echo " "
echo "Installing/updating WordPress Stable, Trunk & Develop"
wordpress_default
wpsvn_check
wordpress_trunk
wordpress_develop
# VVV custom site import
echo " "
echo "VVV custom site import"
custom_vvv
#set +xv
# And it's done
end_seconds="$(date +%s)"
echo "-----------------------------"
echo "Provisioning complete in "$((${end_seconds} - ${start_seconds}))" seconds"
echo "For further setup instructions, visit http://vvv.dev"
|
#! /bin/bash -e
fn-push() {
local registry=$1
local image_name=$2
local current_tag="$(cat ./current-tag | tr -d '\n')"
docker push $registry/$image_name:$current_tag
docker push $registry/$image_name:latest
sleep 5
exit 0
}
fn-push "$@" |
const workerpool = require('workerpool');
const random = require('./algorithms/random');
const negamax = require('./algorithms/negamax');
const negamaxAB = require('./algorithms/negamax_a_b');
const negamaxABTable = require('./algorithms/negamax_a_b_table');
const negamaxABQuiescent = require('./algorithms/negamax_a_b_quiescent');
const iterativeDeepening = require('./algorithms/iterative_deepening');
const iterativeDeepeningTable = require('./algorithms/iterative_deepening_table');
const iterativeDeepeningOrder = require('./algorithms/iterative_deepening_order');
workerpool.worker({
random: random.chooseMove,
negamax: negamax.chooseMove,
negamax_a_b: negamaxAB.chooseMove,
negamax_a_b_table: negamaxABTable.chooseMove,
negamax_a_b_quiescent: negamaxABQuiescent.chooseMove,
iterative_deepening: iterativeDeepening.chooseMove,
iterative_deepening_table: iterativeDeepeningTable.chooseMove,
iterative_deepening_order: iterativeDeepeningOrder.chooseMove,
});
|
import { isEqual } from 'lodash'
import { from, Observable, of, throwError } from 'rxjs'
import { catchError, distinctUntilChanged, map, publishReplay, refCount, switchMap } from 'rxjs/operators'
import { gql, graphQLContent } from '../graphql/graphql'
import * as GQL from '../graphql/schema'
import { PlatformContext } from '../platform/context'
import { asError, createAggregateError } from '../util/errors'
import { ConfiguredRegistryExtension, extensionIDsFromSettings, toConfiguredRegistryExtension } from './extension'
/**
* @returns An observable that emits the list of extensions configured in the viewer's final settings upon
* subscription and each time it changes.
*/
export function viewerConfiguredExtensions({
settings,
queryGraphQL,
}: Pick<PlatformContext, 'settings' | 'queryGraphQL'>): Observable<ConfiguredRegistryExtension[]> {
return from(settings).pipe(
map(settings => extensionIDsFromSettings(settings)),
distinctUntilChanged((a, b) => isEqual(a, b)),
switchMap(extensionIDs => queryConfiguredRegistryExtensions({ queryGraphQL }, extensionIDs)),
catchError(error => throwError(asError(error))),
publishReplay(),
refCount()
)
}
/**
* Query the GraphQL API for registry metadata about the extensions given in {@link extensionIDs}.
*
* @returns An observable that emits once with the results.
*/
export function queryConfiguredRegistryExtensions(
{ queryGraphQL }: Pick<PlatformContext, 'queryGraphQL'>,
extensionIDs: string[]
): Observable<ConfiguredRegistryExtension[]> {
if (extensionIDs.length === 0) {
return of([])
}
const variables: GQL.IExtensionsOnExtensionRegistryArguments = {
first: extensionIDs.length,
prioritizeExtensionIDs: extensionIDs,
}
return from(
queryGraphQL<GQL.IQuery>(
gql`
query Extensions($first: Int!, $prioritizeExtensionIDs: [String!]!) {
extensionRegistry {
extensions(first: $first, prioritizeExtensionIDs: $prioritizeExtensionIDs) {
nodes {
id
extensionID
url
manifest {
raw
}
viewerCanAdminister
}
}
}
}
`[graphQLContent],
variables,
false
)
).pipe(
map(({ data, errors }) => {
if (
!data ||
!data.extensionRegistry ||
!data.extensionRegistry.extensions ||
!data.extensionRegistry.extensions.nodes
) {
throw createAggregateError(errors)
}
return data.extensionRegistry.extensions.nodes.map(
({ id, extensionID, url, manifest, viewerCanAdminister }) => ({
id,
extensionID,
url,
manifest: manifest ? { raw: manifest.raw } : null,
viewerCanAdminister,
})
)
}),
map(registryExtensions => {
const configuredExtensions: ConfiguredRegistryExtension[] = []
for (const extensionID of extensionIDs) {
const registryExtension = registryExtensions.find(x => x.extensionID === extensionID)
configuredExtensions.push(
registryExtension
? toConfiguredRegistryExtension(registryExtension)
: { id: extensionID, manifest: null, rawManifest: null, registryExtension: undefined }
)
}
return configuredExtensions
})
)
}
|
<gh_stars>10-100
//Security Service - MongoDB implementation -------
var permissionModel;
function apply(models) {
permissionModel = models.models._permissions.model;
}
//Get permissions for a given <role, resource> or null if not found
function getPermissionsFor(role, resource, cb) {
if (!role || !resource) {
return cb(null, null);
}
permissionModel.findOne({
role: role,
resource: {$regex: new RegExp('^' + resource.toLowerCase(), 'i')}
}, function(err, perm) {
if (err) {
return cb(err, null);
}
return cb(null, perm);
});
}
//Get all permissions for role (in all resources)
function getAllPermissionsForRole(role, cb) {
if (!role) {
return cb(null, null);
}
permissionModel.find({
role: role
}, function(err, perms) {
if (err) {
return cb(err, null);
}
return cb(null, perms);
});
}
//Get role names configured in security
function getRoles(cb) {
permissionModel.distinct('role', {}, function(err, result) {
if (err) {
return cb(err, null);
}
return cb(null, result);
});
}
//Get resource names configured in security
function getResources(cb) {
permissionModel.distinct('resource', {}, function(err, result) {
if (err) {
return cb(err, null);
}
return cb(null, result);
});
}
function canExecuteOperation(permissions, operation) {
if (operation.resource === null) {
return true;
}
var allowList = null;
var denyList = null;
if (permissions && permissions.operations) {
allowList = permissions.operations.allow;
denyList = permissions.operations.deny;
}
var operationKey = translateToOperationKey(operation);
var isInAllow = isInAcl(allowList, operationKey, false);
var isInDeny = isInAcl(denyList, operationKey, false);
if (isInAllow && !isInDeny) {
return true;
}
return false;
}
function translateToOperationKey(operation) {
var verb = operation.verb.toUpperCase();
if (verb === 'POST') {
return 'create';
}
else if (verb === 'PUT') {
return 'update';
}
if (verb === 'GET') {
return 'query';
}
if (verb === 'DELETE') {
return 'delete';
}
return verb + ' ' + operation.url;
}
function getHorizontalPolicy(permissions, defaultValueOnNoData) {
if (permissions && permissions.horizontalSecurity && permissions.horizontalSecurity.type) {
return permissions.horizontalSecurity.type;
}
return defaultValueOnNoData || 'public';
}
function getFieldsToInclude(permissions) {
if (permissions && permissions.fields && permissions.fields.allow) {
return permissions.fields.allow;
}
return ['*']; //Default: show all fields
}
function getFieldsToRemove(permissions) {
if (permissions && permissions.fields && permissions.fields.deny) {
return permissions.fields.deny;
}
return []; //Default: do not hide fields
}
function isInAcl(list, key, defaultValueOnNoData) {
if (!list) {
return defaultValueOnNoData;
}
var normalizedKey = key.toLowerCase();
for(var i=0; i<list.length; i++) {
var item = list[i];
if (item === '*' || item.toLowerCase() === normalizedKey) {
return true;
}
}
return false;
}
function canAccessToResource(role, resourceName, permission) {
if (!permission) {
return false;
}
if (permission.resource.toLowerCase() === resourceName.toLowerCase()) {
//permission found
if (role === 'Admin') {
return true; //superAdmin
}
var policy = getHorizontalPolicy(permission, 'none');
if (policy === 'none') {
return false;
}
if (!canExecuteOperation(permission, {verb: 'GET'})) {
return false;
}
return true;
}
return false;
}
function getAllowedOperations(permission) {
var allowList = [];
var denyList = [];
if (permission && permission.operations) {
allowList = permission.operations.allow;
denyList = permission.operations.deny;
}
var baseOperations = ['query', 'create', 'update', 'delete'];
var allowList1 = expandList(allowList, baseOperations);
var denyList1 = expandList(denyList, baseOperations);
var result = removeDenied(allowList1, denyList1);
return result;
}
//If * is found as an item, expanded items will be added with no duplications to the list
function expandList(list, expandedItems) {
if (!list) {
return [];
}
var startIndex = list.indexOf('*');
if (startIndex != -1) {
list.splice(startIndex, 1);
//add expended items
for(var i=0; i<expandedItems.length; i++) {
var item = expandedItems[i];
if (list.indexOf(item) === -1) {
list.push(item);
}
}
}
return list;
}
function removeDenied(included, denied) {
var res = [];
for(var i=0; i<included.length; i++) {
var item = included[i];
if (denied.indexOf(item) === -1) {
res.push(item);
}
}
return res;
}
module.exports = {
apply : apply,
getRoles : getRoles,
getResources : getResources,
getPermissionsFor : getPermissionsFor,
getAllPermissionsForRole : getAllPermissionsForRole,
getAllowedOperations : getAllowedOperations,
canAccessToResource : canAccessToResource,
canExecuteOperation : canExecuteOperation,
getHorizontalPolicy : getHorizontalPolicy,
getFieldsToInclude : getFieldsToInclude,
getFieldsToRemove : getFieldsToRemove
}; |
<gh_stars>0
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework import permissions
core_api_schema = get_schema_view(
openapi.Info(
title="Chems Todo API",
default_version='v1',
description="Just to do list API",
contact=openapi.Contact(email="<EMAIL>"),
license=openapi.License(name="MIT License"),
),
public=True,
permission_classes=[permissions.AllowAny],
)
|
/**
* Created by lvjianyao on 16/6/27.
*/
module.exports = {
mongo: {
databaseUrl: process.env.MONGODB_URI
},
redis: {
host: process.env.REDIS_HOST,
port: parseInt(process.env.REDIS_PORT),
password: process.env.REDIS_PASSWORD
}
};
|
<reponame>mission-apprentissage/prise-de-rdv
const { Schema } = require("mongoose");
module.exports = new Schema({
username: {
type: String,
default: null,
description: "Le nom de l'utilisateur",
},
date: {
type: Date,
default: () => new Date(),
description: "La date de l'evenement",
},
type: {
type: String,
default: null,
description: "Le type d'action",
},
action: {
type: String,
default: null,
description: "L'action ayant eu lieu",
},
data: {
type: String,
default: null,
description: "La donnée liéé à l'action",
},
});
|
<filename>website/drawquest/management/commands/coin_balance_histogram.py<gh_stars>10-100
import datetime
from django.core.management.base import BaseCommand
from matplotlib import pyplot
from drawquest.apps.drawquest_auth.models import User
from drawquest import knobs, economy
class Command(BaseCommand):
args = ''
help = ''
def handle(self, *args, **options):
balances = []
for user in User.objects.filter(date_joined__gte=datetime.datetime.now() - datetime.timedelta(days=90)):
if user.comments.count() < 2:
continue
balances.append(economy.balance(user))
figures = {'count': 0}
def hist(bins):
figures['count'] += 1
pyplot.figure(figures['count'])
pyplot.hist(balances, bins=bins, facecolor='green', alpha=0.75)
pyplot.xlabel('Coins')
pyplot.ylabel('User count')
pyplot.suptitle(r'Coin balances')
pyplot.grid(True)
pyplot.savefig('/home/ubuntu/graphs/{}.svg'.format(figures['count']), dpi=180)
hist(range(0, 101, 1))
hist(range(0, 301, 5))
hist(range(0, 1001, 10))
hist(range(1001, 10000, 100))
|
<reponame>hellochenms/m4-scroll-half-stop
//
// M4TouchPenetrateScrollView.h
// m4-scroll-half-stop
//
// Created by Chen,Meisong on 2018/10/23.
// Copyright © 2018年 xyz.chenms. All rights reserved.
//
#import <UIKit/UIKit.h>
NS_ASSUME_NONNULL_BEGIN
@interface M4TouchPenetrateScrollView : UITableView
@end
NS_ASSUME_NONNULL_END
|
package com.galfins.gnss_compare.DataViewers;
import android.content.pm.ApplicationInfo;
import android.content.pm.PackageItemInfo;
import android.content.pm.PackageManager;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.drawable.Drawable;
import android.location.Location;
import android.os.Bundle;
import android.support.annotation.ColorInt;
import android.support.annotation.DrawableRes;
import android.support.annotation.Nullable;
import android.support.v4.app.Fragment;
import android.support.v4.content.res.ResourcesCompat;
import android.support.v4.graphics.drawable.DrawableCompat;
import android.text.method.LinkMovementMethod;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.TextView;
import com.galfins.gnss_compare.CalculationModule;
import com.galfins.gnss_compare.MainActivity;
import com.galfins.gogpsextracts.Coordinates;
import com.galfins.gnss_compare.R;
import com.google.android.gms.maps.CameraUpdate;
import com.google.android.gms.maps.CameraUpdateFactory;
import com.google.android.gms.maps.GoogleMap;
import com.google.android.gms.maps.LocationSource;
import com.google.android.gms.maps.MapView;
import com.google.android.gms.maps.OnMapReadyCallback;
import com.google.android.gms.maps.model.BitmapDescriptor;
import com.google.android.gms.maps.model.BitmapDescriptorFactory;
import com.google.android.gms.maps.model.LatLng;
import com.google.android.gms.maps.model.Marker;
import com.google.android.gms.maps.model.MarkerOptions;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Observable;
import java.util.Observer;
/**
* Created by <NAME> on 31/03/2018.
* This class is for...
*/
public class MapFragment extends Fragment implements DataViewer, OnMapReadyCallback {
MapView mapView;
GoogleMap map;
LatLng mapCameraLocation = new LatLng(0.0, 0.0);
boolean mapCameraLocationInitialized = false;
float mapCameraZoom = 12;
CameraUpdate mapCameraUpdateAnimation;
ArrayList<MapDataSeries> dataSeries = new ArrayList<>();
private boolean mapInActivity = false;
private class MapDataSeries implements DataViewer.CalculationModuleDataSeries, LocationSource {
private static final String TAG = "MapDataSeries";
private CalculationModule calculationModuleReference;
private final int MAX_PLOTTED_POINTS;
ArrayList<SafeMarkerDescription> registeredMarkerOptions;
ArrayList<Marker> registeredMarkers;
private Observer observer;
private OnLocationChangedListener mListener;
@Override
public void activate(OnLocationChangedListener onLocationChangedListener) {
mListener = onLocationChangedListener;
}
@Override
public void deactivate() {
mListener = null;
}
private class SafeMarkerDescription {
private Coordinates location;
private int drawableReference;
private int color;
SafeMarkerDescription(Coordinates location, int drawableReference, int color) {
this.location = location;
this.drawableReference = drawableReference;
this.color = color;
}
MarkerOptions getMarkerOptions() {
return new MarkerOptions()
.position(new LatLng(
location.getGeodeticLatitude(),
location.getGeodeticLongitude()))
.icon(vectorToBitmap(
drawableReference,
color));
}
}
MapDataSeries(final CalculationModule calculationModule, int plottedPoints) {
MAX_PLOTTED_POINTS = plottedPoints;
calculationModuleReference = calculationModule;
registeredMarkerOptions = new ArrayList<>();
registeredMarkers = new ArrayList<>();
observer = new Observer() {
@Override
public void update(Observable observable, Object o) {
Coordinates currentPose = calculationModuleReference.getPose();
if (map != null) {
registeredMarkerOptions.add(
new SafeMarkerDescription(
currentPose,
R.drawable.map_dot_black_24dp,
calculationModuleReference.getDataColor()));
SafeMarkerDescription lastMarker =
registeredMarkerOptions.get(registeredMarkerOptions.size() - 1);
if (mapInActivity) {
registeredMarkers.add(
map.addMarker(lastMarker.getMarkerOptions()));
}
if (registeredMarkerOptions.size() > MAX_PLOTTED_POINTS) {
registeredMarkerOptions.remove(0);
if (registeredMarkers.size() > 0) {
registeredMarkers.get(0).remove();
registeredMarkers.remove(0);
}
}
if (mListener != null && mapInActivity) {
mListener.onLocationChanged(calculationModuleReference.getLocationFromGoogleServices());
}
} else {
Log.w(TAG, "update: Map not yet initialized...");
}
}
};
}
public Observer getDataObserver() {
return observer;
}
public void resetMarkers() {
registeredMarkers.clear();
for (SafeMarkerDescription markerDescription : registeredMarkerOptions) {
registeredMarkers.add(map.addMarker(markerDescription.getMarkerOptions()));
}
}
public ArrayList<Marker> getMarkers() {
return registeredMarkers;
}
@Override
public CalculationModule getCalculationModuleReference() {
return calculationModuleReference;
}
}
@Nullable
@Override
public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) {
ViewGroup rootView;
if(MainActivity.getMetaDataString("com.google.android.geo.API_KEY").equals("YOUR_API_KEY")){
rootView = (ViewGroup) inflater.inflate(
R.layout.map_disabled_layout, container, false);
TextView t2 = rootView.findViewById(R.id.description);
t2.setMovementMethod(LinkMovementMethod.getInstance());
} else {
rootView = (ViewGroup) inflater.inflate(
R.layout.map_page, container, false);
mapView = rootView.findViewById(R.id.map);
mapView.onCreate(null);
mapView.getMapAsync(this);
mapInActivity = true;
}
return rootView;
}
@Override
public void onDestroyView() {
super.onDestroyView();
if(map != null) {
if(mapCameraLocationInitialized) {
mapCameraLocation = map.getCameraPosition().target;
mapCameraZoom = map.getCameraPosition().zoom;
}
map.clear();
}
mapInActivity = false;
}
@Override
public void addSeries(CalculationModule calculationModule) {
registerSeries(calculationModule);
addLocationSource();
calculationModule.addObserver(getSeries(calculationModule).getDataObserver());
}
/**
* Demonstrates converting a {@link Drawable} to a {@link BitmapDescriptor},
* for use as a marker icon.
*/
private BitmapDescriptor vectorToBitmap(@DrawableRes int id, @ColorInt int color) {
Drawable vectorDrawable = ResourcesCompat.getDrawable(getResources(), id, null);
Bitmap bitmap = Bitmap.createBitmap(vectorDrawable.getIntrinsicWidth(),
vectorDrawable.getIntrinsicHeight(), Bitmap.Config.ARGB_8888);
Canvas canvas = new Canvas(bitmap);
vectorDrawable.setBounds(0, 0, canvas.getWidth(), canvas.getHeight());
DrawableCompat.setTint(vectorDrawable, color);
vectorDrawable.draw(canvas);
return BitmapDescriptorFactory.fromBitmap(bitmap);
}
private void registerSeries(CalculationModule calculationModule){
if(!seriesRegistered(calculationModule)){
dataSeries.add(new MapDataSeries(calculationModule, 10));
}
}
private boolean seriesRegistered(CalculationModule calculationModule) {
for(CalculationModuleDataSeries series : dataSeries){
if(series.getCalculationModuleReference() == calculationModule)
return true;
}
return false;
}
private int getSeriesId(CalculationModule calculationModule){
for(int i = 0; i< dataSeries.size(); i++){
if(dataSeries.get(i).getCalculationModuleReference() == calculationModule)
return i;
}
return -1;
}
private MapDataSeries getSeries(CalculationModule calculationModule){
return dataSeries.get(getSeriesId(calculationModule));
}
/**
* Removes the series associated with {@code calculationModule} from the plot
* @param calculationModule reference to the calculation module
*/
@Override
public void removeSeries(CalculationModule calculationModule){
if (map != null) {
Iterator<MapDataSeries> itr = dataSeries.iterator();
MapDataSeries reference;
while (itr.hasNext()) {
reference = itr.next();
if (reference.getCalculationModuleReference() == calculationModule) {
for (Marker marker : reference.getMarkers())
marker.remove();
itr.remove();
calculationModule.removeObserver(reference.getDataObserver());
}
}
}
}
@Override
public void onLocationFromGoogleServicesResult(Location location) {
if(!mapCameraLocationInitialized) {
mapCameraLocation = new LatLng(location.getLatitude(), location.getLongitude());
mapCameraLocationInitialized = true;
if(map!=null){
mapCameraUpdateAnimation = CameraUpdateFactory.newLatLngZoom(
mapCameraLocation, mapCameraZoom);
map.moveCamera(mapCameraUpdateAnimation);
}
}
}
private void addLocationSource(){
if (dataSeries.size() == 1 && map!=null) {
map.setLocationSource(dataSeries.get(0));
}
}
@Override
public void onMapReady(GoogleMap map) {
this.map = map;
mapCameraUpdateAnimation = CameraUpdateFactory.newLatLngZoom(
mapCameraLocation, mapCameraZoom);
map.moveCamera(mapCameraUpdateAnimation);
addLocationSource();
for(MapDataSeries series: dataSeries){
series.resetMarkers();
}
}
@Override
public void onResume() {
super.onResume();
if(mapView != null)
mapView.onResume();
}
@Override
public void onStart() {
super.onStart();
if(mapView != null)
mapView.onStart();
}
@Override
public void onStop() {
super.onStop();
if(mapView != null)
mapView.onStop();
}
@Override
public void onPause() {
if(mapView != null)
mapView.onPause();
super.onPause();
}
@Override
public void onDestroy() {
if(mapView != null)
mapView.onDestroy();
super.onDestroy();
}
@Override
public void onLowMemory() {
super.onLowMemory();
if(mapView != null)
mapView.onLowMemory();
}
}
|
{
var _Polymer = Polymer,
html = _Polymer.html;
/**
`<cells-pdf-viewer>` Description.
Example:
```html
<cells-pdf-viewer></cells-pdf-viewer>
```
## Styling
The following custom properties and mixins are available for styling:
### Custom Properties
| Custom Property | Selector | CSS Property | Value |
| ------------------- | -------- | ------------ | ----------- |
| --cells-fontDefault | :host | font-family | sans-serif |
### @apply
| Mixins | Selector | Value |
| --------- | -------- | ----- |
| --cells-pdf-viewer | :host | {} |
* @customElement
* @polymer
* @extends {Polymer.Element}
* @demo demo/index.html
*/
var CellsPdfViewer =
/*#__PURE__*/
function (_Polymer$mixinBehavio) {
babelHelpers.inherits(CellsPdfViewer, _Polymer$mixinBehavio);
function CellsPdfViewer() {
babelHelpers.classCallCheck(this, CellsPdfViewer);
return babelHelpers.possibleConstructorReturn(this, babelHelpers.getPrototypeOf(CellsPdfViewer).apply(this, arguments));
}
babelHelpers.createClass(CellsPdfViewer, [{
key: "_srcChanged",
/**
* Observer called on src change
* @param {String} newVal
* @param {String} oldVal
*/
value: function _srcChanged(newVal, oldVal) {
if (newVal && typeof newVal === 'string') {
var bytes = this.createBytesArray(newVal);
var url = this.createBlobUrl(bytes);
this.setViewerPath(url);
}
}
/**
* converts base64 to bytes arry from it's binary data
* @param {String} base64
* @return {Array} bytes array from pdf binary data
*/
}, {
key: "createBytesArray",
value: function createBytesArray(base64) {
var binaryFromBase64 = atob(base64);
var bytes = new Uint8Array(binaryFromBase64.length);
bytes.forEach(function (byte, index) {
return bytes[index] = binaryFromBase64.charCodeAt(index);
});
return bytes;
}
/**
* creates blob url from bytes array
* @param {Array} bytes
* @return {String} url from Object Url
*/
}, {
key: "createBlobUrl",
value: function createBlobUrl(bytes) {
var blob = new Blob([bytes], {
type: ''
});
return URL.createObjectURL(blob);
}
/**
* Sets PDF viewer path with blob url
* @param {String} url
*/
}, {
key: "setViewerPath",
value: function setViewerPath(url) {
this.path = this.resolveUrl(this.viewerPath) + encodeURIComponent(url);
}
}], [{
key: "is",
get: function get() {
return 'cells-pdf-viewer';
}
}, {
key: "properties",
get: function get() {
return {
/**
* takes base64 as a src for rendering PDF
* @type {String}
*/
src: {
type: String,
notify: true,
observer: '_srcChanged'
},
/**
* relative path to iframe to embed PDFJS viewer
* @type {String}
*/
path: {
type: String
},
/**
* pdf viewer path resolves to pdfjs/web/viewer.html
* @type {String}
*/
viewerPath: {
type: String,
notify: true
}
};
}
}]);
return CellsPdfViewer;
}(Polymer.mixinBehaviors([CellsBehaviors.i18nBehavior], Polymer.Element));
customElements.define(CellsPdfViewer.is, CellsPdfViewer);
} |
<gh_stars>1-10
/*
* Copyright (c) <NAME>. All rights reserved.
* License : Apache 2.0
* @author <NAME>
*
*/
package com.dhana.servicebus.saswrapper;
import java.io.IOException;
import java.io.StringWriter;
import java.nio.charset.Charset;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.inject.Inject;
import javax.ws.rs.core.MediaType;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.microsoft.windowsazure.services.core.ServiceException;
import com.microsoft.windowsazure.services.core.UserAgentFilter;
import com.microsoft.windowsazure.services.core.utils.ServiceExceptionFactory;
import com.microsoft.windowsazure.services.serviceBus.implementation.WrapAccessTokenResult;
import com.microsoft.windowsazure.services.serviceBus.implementation.WrapContract;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.representation.Form;
public class SASRestProxy implements WrapContract {
Client channel;
static Log log = LogFactory.getLog(WrapContract.class);
private Pattern pattern = Pattern.compile("ExpiresOn=(\\d+)");
@Inject
public SASRestProxy(Client channel, UserAgentFilter userAgentFilter) {
this.channel = channel;
this.channel.addFilter(userAgentFilter);
}
public WrapAccessTokenResult wrapAccessToken(String uri, String name,
String password, String scope) throws ServiceException {
Form requestForm = new Form();
requestForm.add("grant_type", "authorization_code");
requestForm.add("client_id", name);
requestForm.add("client_secret", password);
requestForm.add("scope", scope);
//System.out.println("client Id:" + name);
//System.out.println("scope :" + scope);
ClientResponse cResponse;
StringWriter writer = new StringWriter();
try {
cResponse = channel.resource(uri)
.accept(MediaType.APPLICATION_FORM_URLENCODED)
.type(MediaType.APPLICATION_FORM_URLENCODED)
.post(ClientResponse.class, requestForm);
IOUtils.copy(cResponse.getEntityInputStream(), writer, Charset
.forName("UTF-8").toString());
} catch (UniformInterfaceException e) {
log.warn("WRAP server returned error acquiring access_token", e);
throw ServiceExceptionFactory.process("SASWrapper", new ServiceException(
"WRAP server returned error acquiring access_token", e));
} catch (IOException e) {
log.warn("Error while reading access_token", e);
throw ServiceExceptionFactory.process("SASWrapper", new ServiceException(
"Error while reading access_token", e));
}
WrapAccessTokenResult response = new WrapAccessTokenResult();
String tokenString = writer.toString();
//System.out.println("responseForm " + tokenString);
Matcher m = pattern.matcher(tokenString);
String expiresOn = "";
while (m.find()) {
expiresOn = m.group().split("=")[1];
}
long expiresInMillis = Long.parseLong(expiresOn) * 1000
- System.currentTimeMillis();
response.setAccessToken(tokenString);
//In Seconds
response.setExpiresIn(expiresInMillis / 1000);
return response;
}
}
|
<reponame>taowu750/LeetCodeJourney
package training.array;
import org.junit.jupiter.api.Test;
import java.util.function.ToIntFunction;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* 给定一个包含 [0, n] 中 n 个数的数组 nums ,找出 [0, n] 这个范围内没有出现在数组中的那个数。
*
* 你能否实现线性时间复杂度、仅使用额外常数空间的算法解决此问题?
*
* 例 1:
* 输入:nums = [3,0,1]
* 输出:2
* 解释:n = 3,因为有 3 个数字,所以所有的数字都在范围 [0,3] 内。2 是丢失的数字,因为它没有出现在 nums 中。
*
* 例 2:
* 输入:nums = [0,1]
* 输出:2
* 解释:n = 2,因为有 2 个数字,所以所有的数字都在范围 [0,2] 内。2 是丢失的数字,因为它没有出现在 nums 中。
*
* 例 3:
* 输入:nums = [9,6,4,2,3,5,7,0,1]
* 输出:8
* 解释:n = 9,因为有 9 个数字,所以所有的数字都在范围 [0,9] 内。8 是丢失的数字,因为它没有出现在 nums 中。
*
* 例 4:
* 输入:nums = [0]
* 输出:1
* 解释:n = 1,因为有 1 个数字,所以所有的数字都在范围 [0,1] 内。1 是丢失的数字,因为它没有出现在 nums 中。
*
* 约束:
* - n == nums.length
* - 1 <= n <= 10**4
* - 0 <= nums[i] <= n
* - nums 中的所有数字都「独一无二」
*/
public class E268_Easy_MissingNumber {
static void test(ToIntFunction<int[]> method) {
assertEquals(method.applyAsInt(new int[]{3,0,1}), 2);
assertEquals(method.applyAsInt(new int[]{0,1}), 2);
assertEquals(method.applyAsInt(new int[]{9,6,4,2,3,5,7,0,1}), 8);
assertEquals(method.applyAsInt(new int[]{0}), 1);
}
/**
* LeetCode 耗时:0 ms - 100.00%
* 内存消耗:39.3 MB - 5.20%
*/
public int missingNumber(int[] nums) {
int actual = 0, sum = 0;
for (int i = 0; i < nums.length; i++) {
actual += i + 1;
sum += nums[i];
}
return actual - sum;
}
@Test
public void testMissingNumber() {
test(this::missingNumber);
}
}
|
/**
*
* Experiment Table
*
*/
import React from 'react';
import PropTypes from 'prop-types';
import { connect } from 'react-redux';
import { createStructuredSelector } from 'reselect';
import { Table, Tag, Icon } from 'antd';
import LoadingIndicator from 'components/LoadingIndicator';
import { Link } from 'react-router-dom';
import {
makeSelectExperimentList,
makeSelectExperimentColumns,
makeSelectExperimentMetricColumns,
makeSelectExperimentColumnsPID,
makeSelectLoading,
} from './selectors';
import { getDataAction,
loadExperimentAction,
createExperimentLabelsAction,
deleteExperimentLabelsAction,
addDeleteExperimentIdAction,
removeDeleteExperimentIdAction,
clearDeleteExperimentsAction,
} from './actions';
import { onMenuSelectionAction } from './ExperimentMenu/actions';
import { MENU_EXPERIMENT } from './ExperimentMenu/constants';
import Label from 'components/Label/Loadable';
import { makeSelectDeleteVisible } from './ExperimentMenu/selectors';
/* eslint-disable react/prefer-stateless-function */
export class ExperimentTable extends React.Component {
constructor(props) {
super(props);
this.columns = [
{
title: 'Name',
dataIndex: 'name',
key: 'name',
render: (text, record) => {
let result = text;
if (text === record.experiment_id) result = text.substring(0, 7);
return <Link to={`/experiment-detail/${record.id}`}>{result}</Link>;
},
},
{
title: 'Status',
dataIndex: 'status',
key: 'status',
render: text => {
if (text === 1)
return <Icon type="sync" spin style={{ color: 'orange' }} />;
return (
<Icon type="check-circle" theme="twoTone" twoToneColor="#52c41a" />
);
},
},
{
title: 'Submitted By',
dataIndex: 'submitted_by',
key: 'submitted_by',
},
{
title: 'Date Created',
key: 'date_created_epoch',
dataIndex: 'date_created_epoch',
render: dt => {
if (!dt) return '';
const t = new Date(dt * 1000);
return t.toString().split('GMT')[0];
},
sorter: (a, b) => a.date_created_epoch - b.date_created_epoch,
},
{
title: 'Tags',
key: 'labels',
dataIndex: 'labels',
render: (tags,record) => {
return (
<span>
<Label
style={{ marginTop: '20px' }}
buttonDisplay={false}
modelId={record.id}
labelData={tags}
onLabelDelete={this.onLabelDelete}
onLabelSubmit={this.onLabelSubmit}
/>
</span>
);
},
},
];
}
onLabelSubmit = (modelId, values) => {
const projectId = this.props.match.params.id;
this.props.createExperimentLabelsAction(modelId, values, projectId);
}
onLabelDelete = (label, modelId, ) => {
const projectId = this.props.match.params.id;
this.props.deleteExperimentLabelsAction(modelId, label, projectId);
}
componentDidMount() {
const projectId = this.props.match.params.id;
this.props.menuSelection(MENU_EXPERIMENT);
this.props.initiateDataFetch();
this.props.getExperimentData(projectId);
this.timer = setInterval(() => {
this.props.getExperimentData(projectId);
}, 2000);
}
componentWillUnmount() {
clearInterval(this.timer);
this.props.clearDeleteExperimentsAction();
}
onDeleteCheckbox = (e) => {
if(e.target.checked){
this.props.addDeleteExperimentIdAction(e.target.value);
} else {
this.props.removeDeleteExperimentIdAction(e.target.value);
}
}
addOptionalColumns(data, deleteVisible) {
const opCol = this.props.optionalColumns;
const opMCol = this.props.optionalMetricColumns;
const result = [];
const deleteCheckColumn = !deleteVisible ? [{ title: 'Sl',
key: 'sl',
render: (text, record) => {
return <input type="checkbox" value={record.id} onChange={this.onDeleteCheckbox} />;
}
}] : [];
if (this.props.match.params.id !== this.props.optionalColumnsPID)
return [...deleteCheckColumn , ...data];
// Add metric columns
if (opMCol && opMCol.length > 0) {
for (let i = 0; i < opMCol.length; i += 1) {
const metric = opMCol[i];
let metricName = metric.split('$');
metricName =
metricName[1] === '1'
? `${metricName[0]}(max)`
: `${metricName[0]}(min)`;
result.push({
title: metricName,
dataIndex: `metric_fields.${metric}`,
key: opMCol[i],
render: text => (text ? Math.round(text * 100) / 100 : null),
// TODO: Improve the logic later
sorter: (a, b) => {
if (!(metric in a.metric_fields)) {
return -999999999999;
}
if (!(metric in b.metric_fields)) {
return 999999999999;
}
return a.metric_fields[metric] - b.metric_fields[metric];
},
});
}
}
// Add parameter columns
if (opCol && opCol.length > 0) {
for (let i = 0; i < opCol.length; i += 1) {
const param = opCol[i];
result.push({
title: param,
dataIndex: `param_fields.${param}`,
key: param,
// TODO: Improve the logic later
sorter: (a, b) => {
if (!(param in a.param_fields)) {
return -999999999999;
}
if (!(param in b.param_fields)) {
return 999999999999;
}
return a.param_fields[param] - b.param_fields[param];
},
});
}
}
return [...deleteCheckColumn, ...data, ...result, ];
}
render() {
return this.props.loading ? (
<LoadingIndicator />
) : (
<Table
columns={this.addOptionalColumns(this.columns, this.props.deleteVisible)}
dataSource={this.props.experimentList}
addDeleteExperimentIdAction={this.props.addDeleteExperimentIdAction}
rowKey="id"
/>
);
}
}
ExperimentTable.propTypes = {
getExperimentData: PropTypes.func.isRequired,
experimentList: PropTypes.array,
optionalColumns: PropTypes.array,
optionalColumnsPID: PropTypes.string,
loading: PropTypes.bool,
initiateDataFetch: PropTypes.func,
optionalMetricColumns: PropTypes.array,
match: PropTypes.object,
menuSelection: PropTypes.string,
deleteVisible: PropTypes.bool,
};
const mapStateToProps = createStructuredSelector({
experimentList: makeSelectExperimentList(),
loading: makeSelectLoading(),
optionalColumns: makeSelectExperimentColumns(),
optionalMetricColumns: makeSelectExperimentMetricColumns(),
optionalColumnsPID: makeSelectExperimentColumnsPID(),
deleteVisible: makeSelectDeleteVisible(),
});
function mapDispatchToProps(dispatch) {
return {
getExperimentData: projectId => dispatch(loadExperimentAction(projectId)),
initiateDataFetch: () => dispatch(getDataAction()),
menuSelection: key => dispatch(onMenuSelectionAction(key)),
deleteExperimentLabelsAction: (modelId, label, projectId) =>
dispatch(deleteExperimentLabelsAction(modelId, label, projectId)),
createExperimentLabelsAction: (modelId, values, projectId) =>
dispatch(createExperimentLabelsAction(modelId, values, projectId)),
addDeleteExperimentIdAction: (eid) =>
dispatch(addDeleteExperimentIdAction(eid)),
removeDeleteExperimentIdAction: (eid) =>
dispatch(removeDeleteExperimentIdAction(eid)),
clearDeleteExperimentsAction: () => dispatch(clearDeleteExperimentsAction()),
};
}
export default connect(
mapStateToProps,
mapDispatchToProps,
)(ExperimentTable);
|
<gh_stars>0
require 'spec_helper'
class Boat < Record
has_many :sails
attribute :type, index: :string, search: true
attribute :color, index: :string, search: true
attribute :owner, index: :string, search: true
attribute :size
end
class Sail < Record
belongs_to :boat
attribute :type, index: :string, search: true
attribute :color, index: :string, search: true
end
RSpec.describe Record do
describe "#where" do
it "should find objects " do
boat1 = Boat.build id:1, type:'ferry', color:'white', size:'big'
boat2 = Boat.build id:2, type:'yacht', color: 'white', size:'small'
boat3 = Boat.build id:3, type:'yacht', color: 'blue', size:'medium'
objects = Boat.where(type:'yacht').result.map! { |t| t.id }
expect(objects).to eq([2,3])
end
it "should find objects by single attribute" do
boat1 = Boat.build id:1, type:'ferry', color:'white', size:'big'
boat2 = Boat.build id:2, type:'yacht', color: 'white', size:'small'
boat3 = Boat.build id:3, type:'yacht', color: 'blue', size:'medium'
expect(Boat.where(type:'ferry').ids).to eq([1])
expect(Boat.where(type:'yacht').ids).to eq([2,3])
expect(Boat.where(type:'raft').ids).to eq([])
expect(Boat.where(color:'white').ids).to eq([1,2])
expect(Boat.where(color:'blue').ids).to eq([3])
expect(Boat.where(color:'red').ids).to eq([])
end
it "should return on array-like object" do
boat1 = Boat.build id:1, type:'ferry', color:'white', size:'big'
boat2 = Boat.build id:2, type:'yacht', color: 'white', size:'small'
boat3 = Boat.build id:3, type:'yacht', color: 'blue', size:'medium'
expect(Boat.where(type:'yacht').ids).to eq([2,3])
expect(Boat.where(color:'white').ids).to eq([1,2])
expect(Boat.where(type:'yacht').ids.size).to eq(2)
end
it "should be chainable" do
boat1 = Boat.build id:1, type:'ferry', color:'white', size:'big'
boat2 = Boat.build id:2, type:'yacht', color: 'white', size:'small'
boat3 = Boat.build id:3, type:'yacht', color: 'blue', size:'medium'
expect(Boat.where(type:'ferry').where(color:'white').ids).to eq([1])
expect(Boat.where(type:'ferry').where(color:'blue').ids).to eq([])
expect(Boat.where(type:'yacht').where(size:'small').ids).to eq([])
end
it "should find objects by multiple attributes" do
boat1 = Boat.build id:1, type:'ferry', color:'white', size:'big'
boat2 = Boat.build id:2, type:'yacht', color: 'white', size:'small'
boat3 = Boat.build id:3, type:'yacht', color: 'blue', size:'medium'
expect(Boat.where(type:'ferry', color:'white').ids).to eq([1])
expect(Boat.where(type:'ferry', color:'blue').ids).to eq([])
expect(Boat.where(type:'yacht', color:'white').ids).to eq([2])
expect(Boat.where(type:'yacht', color:'blue').ids).to eq([3])
end
it "should not find anything for attributes without search" do
expect(Boat.where(size:'big').ids).to eq([])
end
end
describe "#where and #any?" do
it "should return true if any objects" do
boat1 = Boat.build id:1, type:'ferry', color:'white', size:'big'
boat2 = Boat.build id:2, type:'yacht', color: 'white', size:'small'
boat3 = Boat.build id:3, type:'yacht', color: 'blue', size:'medium'
expect(Boat.where(type:'ferry').any?).to eq(true)
expect(Boat.where(type:'yacht').any?).to eq(true)
expect(Boat.where(type:'bike').any?).to eq(false)
expect(Boat.where(type:'ferry', color:'white').any?).to eq(true)
expect(Boat.where(type:'ferry', color:'blue').any?).to eq(false)
end
end
describe "#count" do
it "should return number of results" do
boat1 = Boat.build id:1, type:'ferry', color:'white', size:'big'
boat2 = Boat.build id:2, type:'yacht', color: 'white', size:'small'
boat3 = Boat.build id:3, type:'yacht', color: 'blue', size:'medium'
expect(Boat.count).to eq(3)
# check compare methods:
expect(Boat.count == 3).to eq(true)
expect(Boat.count != 2).to eq(true)
expect(Boat.count > 2).to eq(true)
expect(Boat.count < 4).to eq(true)
expect(Boat.count >= 3).to eq(true)
expect(Boat.count <= 3).to eq(true)
end
end
describe "#count and #where" do
it "should be combinable" do
boat1 = Boat.build id:1, type:'ferry', color:'white', size:'big'
boat2 = Boat.build id:2, type:'yacht', color: 'white', size:'small'
boat3 = Boat.build id:3, type:'yacht', color: 'blue', size:'medium'
expect(Boat.where(type:'yacht').count).to eq(2)
expect(Boat.where(type:'yacht').count).to eq(2)
expect(Boat.where(color:'white').where(type:'yacht').count).to eq(1)
end
end
describe "#random and #where" do
it "should be combinable" do
boat1 = Boat.build id:1, type:'ferry', color:'white', size:'big'
boat2 = Boat.build id:2, type:'yacht', color: 'white', size:'small'
boat3 = Boat.build id:3, type:'yacht', color: 'blue', size:'medium'
boat = Boat.where(type:'yacht').ids.random
expect([2,3]).to include(boat)
end
end
describe "#where and #last/#first" do
it "should be combinable" do
boat1 = Boat.build id:1, type:'ferry', color:'white', size:'big'
boat2 = Boat.build id:2, type:'yacht', color: 'white', size:'small'
boat3 = Boat.build id:3, type:'yacht', color: 'blue', size:'medium'
boat = Boat.where(type:'ferry').first
expect(boat.id).to eq(1)
boat = Boat.where(type:'ferry').last
expect(boat.id).to eq(1)
boat = Boat.where(type:'yacht').first
expect(boat.id).to eq(2)
boat = Boat.where(type:'yacht').last
expect(boat.id).to eq(3)
boat = Boat.where(type:'yacht',color:'blue').first
expect(boat.id).to eq(3)
boat = Boat.where(type:'yacht',color:'blue').last
expect(boat.id).to eq(3)
end
end
describe "#first/last" do
it "should accept single attribute" do
boat1 = Boat.build id:1, type:'ferry', color:'white', size:'big'
boat2 = Boat.build id:2, type:'yacht', color: 'white', size:'small'
boat3 = Boat.build id:3, type:'yacht', color: 'blue', size:'medium'
boat = Boat.first(type:'ferry')
expect(boat.id).to eq(1)
boat = Boat.last(type:'ferry')
expect(boat.id).to eq(1)
boat = Boat.first(type:'yacht')
expect(boat.id).to eq(2)
boat = Boat.last(type:'yacht')
expect(boat.id).to eq(3)
end
it "should accept multiple attribute" do
boat1 = Boat.build id:1, type:'ferry', color:'white', size:'big'
boat2 = Boat.build id:2, type:'yacht', color: 'white', size:'small'
boat3 = Boat.build id:3, type:'yacht', color: 'blue', size:'medium'
boat = Boat.first(type:'yacht',color:'blue')
expect(boat.id).to eq(3)
boat = Boat.last(type:'yacht',color:'blue')
expect(boat.id).to eq(3)
end
it "should return nil if not found" do
boat1 = Boat.build id:1, type:'ferry', color:'white', size:'big'
boat2 = Boat.build id:2, type:'yacht', color: 'white', size:'small'
boat3 = Boat.build id:3, type:'yacht', color: 'blue', size:'medium'
boat = Boat.first(type:'raceboat')
expect(boat).to eq(nil)
boat = Boat.last(type:'raceboat')
expect(boat).to eq(nil)
end
end
describe "#destroy" do
it "should update search" do
boat1 = Boat.build id:1, type:'ferry', color:'white'
boat2 = Boat.build id:2, type:'yacht', color:'white'
expect(Boat.where(color:'white').ids).to eq([1,2])
boat1.destroy
expect(Boat.where(color:'white').ids).to eq([2])
boat2.destroy
expect(Boat.where(color:'white').ids).to eq([])
end
end
describe "#update_attributes" do
it "should update search" do
boat1 = Boat.build id:1, type:'ferry', color:'white'
boat2 = Boat.build id:2, type:'yacht', color:'white'
expect(Boat.where(color:'white').ids).to eq([1,2])
expect(Boat.where(color:'blue').ids).to eq([])
boat1.update_attributes color:'blue'
expect(Boat.where(color:'white').ids).to eq([2])
expect(Boat.where(color:'blue').ids).to eq([1])
end
end
describe "#update_attribute" do
it "should update search" do
boat1 = Boat.build id:1, type:'ferry', color:'white'
boat2 = Boat.build id:2, type:'ferry', color:'blue'
expect(Boat.where(color:'blue').ids).to eq([2])
boat1.update_attribute :color, 'blue'
expect(Boat.where(color:'blue').ids).to eq([1,2])
boat2.update_attribute :color, 'green'
expect(Boat.where(color:'blue').ids).to eq([1])
boat1.update_attribute :color, 'green'
expect(Boat.where(color:'blue').ids).to eq([])
end
end
describe "#attribute=" do
it "should update search after a save" do
boat1 = Boat.build id:1, color:'white'
expect(Boat.where(color:'white').ids).to eq([1])
expect(Boat.where(color:'black').ids).to eq([])
boat1.attributes = { color: 'black' }
expect(Boat.where(color:'white').ids).to eq([1])
expect(Boat.where(color:'black').ids).to eq([])
boat1.save
expect(Boat.where(color:'white').ids).to eq([])
expect(Boat.where(color:'black').ids).to eq([1])
end
end
describe "#where and #order" do
it "should be combinable" do
boat1 = Boat.build id:1, type:'ferry', color:'white', size:'big'
boat2 = Boat.build id:2, type:'yacht', color: 'white', size:'small'
boat3 = Boat.build id:3, type:'yacht', color: 'blue', size:'medium'
ids = Boat.where(type:'yacht').sort(:color).order(:asc).ids
expect(ids).to eq([3,2])
ids = Boat.where(type:'yacht').sort(:color).order(:desc).ids
expect(ids).to eq([2,3])
ids = Boat.where(type:'ferry').sort(:color).order(:asc).ids
expect(ids).to eq([1])
ids = Boat.where(color:'white').sort(:type).order(:asc).ids
expect(ids).to eq([1,2])
ids = Boat.where(color:'white').sort(:type).order(:desc).ids
expect(ids).to eq([2,1])
end
end
describe "#where and #order" do
it "should be combinable" do
boat1 = Boat.build id:1, type:'ferry', color:'white', owner:'Anne'
boat2 = Boat.build id:2, type:'ferry', color: 'white', owner:'Betty'
boat3 = Boat.build id:3, type:'ferry', color: 'blue', owner:'Tom'
boat4 = Boat.build id:4, type:'yacht', color: 'white', owner:'Sarah'
boat4 = Boat.build id:5, type:'yacht', color: 'white', owner:'Clara'
boat4 = Boat.build id:6, type:'yacht', color: 'blue', owner:'Ben'
ids = Boat.where(color:'white', type:'yacht').sort(:owner).order(:asc).ids
expect(ids).to eq([5,4])
end
end
describe "#first" do
it "should find the first item" do
record1 = Record.build id:1
record2 = Record.build id:2
record3 = Record.build id:3
item = Record.first
expect(item).to be_a(Record)
expect(item.id).to eq(record1.id)
end
end
describe "#last" do
it "should find the last item" do
record1 = Record.build id:1
record2 = Record.build id:2
record3 = Record.build id:3
item = Record.last
expect(item).to be_a(Record)
expect(item.id).to eq(record3.id)
end
end
describe "#random" do
# really testing for randomness is hard
# we simply check that a valid id is returned
it "should find a random item" do
record1 = Record.build id:1
record2 = Record.build id:2
record3 = Record.build id:25
ids = [record1.id, record2.id, record3.id]
item = Record.ids.random
expect(ids.include? item).to eq(true)
end
end
describe "queries on relations" do
before(:each) do
@boat1 = Boat.build id:1, type: 'yacht', color: 'white'
@sail1 = Sail.build id:1, type: 'small', color: 'white'
@sail2 = Sail.build id:2, type: 'big', color: 'blue'
@sail3 = Sail.build id:3, type: 'big', color: 'white'
@boat1.sails.add @sail1
@boat1.sails.add @sail2
@boat1.sails.add @sail3
@boat2 = Boat.build id:2, type: 'ferry', color: 'blue'
@sail4 = Sail.build id:4, type: 'small', color: 'red'
@sail5 = Sail.build id:5, type: 'big', color: 'red'
@sail6 = Sail.build id:6, type: 'small', color: 'white'
@boat2.sails.add @sail4
@boat2.sails.add @sail5
@boat2.sails.add @sail6
end
describe "#count on a relation" do
it "should return number of relations" do
expect(@boat1.sails.count).to eq(3)
expect(@boat2.sails.count).to eq(3)
end
end
describe "#ids on a relation" do
it "should return object ids" do
expect(@boat1.sails.ids).to eq([1,2,3])
expect(@boat2.sails.ids).to eq([4,5,6])
end
end
describe "#where and #ids on a relation" do
it "should return correct ids" do
expect(@boat1.sails.where(type:'big').ids).to eq([2,3])
expect(@boat2.sails.where(type:'big').ids).to eq([5])
end
end
describe "#where on a relation" do
it "should return correct objects" do
expect(@boat1.sails.where(type:'big').count).to eq(2)
expect(@boat2.sails.where(type:'big').count).to eq(1)
end
end
describe "#where and #count on a relation" do
it "should return correct number" do
expect([2,3]).to include(@boat1.sails.where(type:'big').ids.random)
expect([5]).to include(@boat2.sails.where(type:'big').ids.random)
end
end
describe "#first on a relation" do
it "should return first object" do
expect(@boat1.sails.ids.first).to eq(1)
expect(@boat2.sails.ids.first).to eq(4)
end
end
describe "#last on a relation" do
it "should return last object" do
expect(@boat1.sails.ids.last).to eq(3)
expect(@boat2.sails.ids.last).to eq(6)
end
end
describe "#where and #first/#last on a relation" do
it "should return first/last object" do
expect(@boat1.sails.where(color:'white').ids.first).to eq(1)
expect(@boat1.sails.where(color:'white').ids.last).to eq(3)
expect(@boat2.sails.where(color:'white').ids.first).to eq(6)
expect(@boat2.sails.where(color:'white').ids.last).to eq(6)
expect(@boat1.sails.ids.first(color:'white')).to eq(1)
expect(@boat1.sails.ids.last(color:'white')).to eq(3)
expect(@boat2.sails.ids.first(color:'white')).to eq(6)
expect(@boat2.sails.ids.last(color:'white')).to eq(6)
end
end
describe "#sort and #order on a relation" do
it "should return last object" do
expect(@boat1.sails.sort(:color).ids).to eq([2,1,3])
expect(@boat1.sails.sort(:type).ids).to eq([2,3,1])
expect(@boat1.sails.sort(:color).order(:desc).ids).to eq([1,3,2])
expect(@boat1.sails.sort(:type).order(:desc).ids).to eq([1,2,3])
end
end
describe "#random on a relation" do
it "should return random object" do
expect([1,2,3]).to include(@boat1.sails.ids.random)
end
end
end
end |
#!/usr/bin/env bash
set -e # bail out early if any command fails
set -o pipefail # fail if any component of any pipe fails
# basht macro, shellcheck fix
export T_fail
# shellcheck disable=SC1091
. spec/bash/test_helpers
# shellcheck disable=SC1091
. jobs/rabbitmq-server/templates/setup.bash
T_setup_environment() {
(
local env
DIR="$(mktemp -d)"
VCAP_HOME="$(mktemp -d)"
trap "rm -rf ${DIR}" EXIT
trap "rm -rf ${VCAP_HOME}" EXIT
CLUSTER_PARTITION_HANDLING="autoheal"
DISK_ALARM_THRESHOLD="{mem_relative,0.4}"
SELF_NODE="my-node-name"
RABBITMQ_NODES_STRING="node-1,node-2"
LOAD_DEFINITIONS="my-definitions"
SSL_KEY="ssk-key"
SSL_VERIFY=false
SSL_VERIFICATION_DEPTH="5"
SSL_FAIL_IF_NO_PEER_CERT=true
SSL_SUPPORTED_TLS_VERSIONS="['tlsv1.2','tlsv1.1']"
SSL_SUPPORTED_TLS_CIPHERS=",{ciphers, ['cipher1','cipher2']}"
ERLANG_COOKIE="my-awesome-cookie"
VCAP_USER="$(id -u)"
VCAP_GROUP="$(id -g)"
UPGRADE_PREPARATION_NODES_FILE="$(mktemp)"
trap "rm -rf ${UPGRADE_PREPARATION_NODES_FILE}" EXIT
main
env="$(<$DIR/env)"
expect_file_to_exist "${DIR}/env"
expect_to_equal "$(<$DIR/env.backup)" ""
expect_to_contain "$env" "'-rabbit cluster_nodes {[node-1,node-2],disc}"
expect_to_contain "$env" " -rabbit cluster_partition_handling autoheal"
expect_to_contain "$env" " -rabbit log_levels [{connection,info}]"
expect_to_contain "$env" " -rabbit disk_free_limit {mem_relative,0.4}"
expect_to_contain "$env" " -rabbit halt_on_upgrade_failure false"
expect_to_contain "$env" " -rabbitmq_mqtt subscription_ttl 1800000"
expect_to_contain "$env" " -rabbitmq_management http_log_dir \"${HTTP_ACCESS_LOG_DIR}\""
expect_to_contain "$env" "RABBITMQ_MNESIA_DIR=/var/vcap/store/rabbitmq/mnesia/db"
expect_to_contain "$env" "RABBITMQ_PLUGINS_EXPAND_DIR=/var/vcap/store/rabbitmq/mnesia/db-plugins-expand"
expect_to_contain "$env" "NODENAME='my-node-name'"
expect_to_contain "$env" "RABBITMQ_BOOT_MODULE=rabbit"
expect_to_contain "$env" "CONFIG_FILE='"
expect_to_contain "$env" " -rabbitmq_management load_definitions"
expect_to_contain "$env" " -rabbit tcp_listeners []"
expect_to_contain "$env" " -rabbit ssl_listeners [5671]"
expect_to_contain "$env" "{verify,verify_none},"
# SSL
expect_to_contain "$env" " -rabbitmq_management listener [{port,15672},{ssl,false}]"
expect_to_contain "$env" " -rabbitmq_mqtt ssl_listeners [8883]"
expect_to_contain "$env" " -rabbitmq_stomp ssl_listeners [61614]"
expect_to_contain "$env" " -rabbit ssl_options [{cacertfile,"
expect_to_contain "$env" "{certfile,"
expect_to_contain "$env" "{keyfile,"
expect_to_contain "$env" "{verify,verify_none},"
expect_to_contain "$env" "{depth,5},"
expect_to_contain "$env" "{fail_if_no_peer_cert,true},"
expect_to_contain "$env" "{versions,['\"'\"'tlsv1.2'\"'\"','\"'\"'tlsv1.1'\"'\"']}"
expect_to_contain "$env" "{ciphers, ['\"'\"'cipher1'\"'\"','\"'\"'cipher2'\"'\"']}"
# ERLANG COOKIE
erlang_cookie_path="${DIR}/.erlang.cookie"
expect_file_to_exist $erlang_cookie_path
erlang_cookie="$(<$erlang_cookie_path)"
expect_to_contain "$erlang_cookie" "my-awesome-cookie"
) || $T_fail "Failed to configure environment"
}
T_create_cluster_args() {
(
local rabbitmq_nodes disk_alarm_threshold cluster_partition_handling http_access_log_dir cluster_args
rabbitmq_nodes="node-1,node-2"
disk_alarm_threshold="{mem_relative,0.4}"
cluster_partition_handling="autoheal"
http_access_log_dir="/path/to/http-access.log"
cluster_args="$(create_cluster_args $rabbitmq_nodes $disk_alarm_threshold $cluster_partition_handling, $http_access_log_dir)"
expect_to_contain "$cluster_args" "-rabbit cluster_nodes {[node-1,node-2],disc}"
expect_to_contain "$cluster_args" " -rabbit log_levels [{connection,info}]"
expect_to_contain "$cluster_args" " -rabbit disk_free_limit {mem_relative,0.4}"
expect_to_contain "$cluster_args" " -rabbit cluster_partition_handling autoheal"
expect_to_contain "$cluster_args" " -rabbit halt_on_upgrade_failure false"
expect_to_contain "$cluster_args" " -rabbitmq_mqtt subscription_ttl 1800000"
expect_to_contain "$cluster_args" " -rabbitmq_management http_log_dir \"/path/to/http-access.log\""
) || $T_fail "Failed to create cluster args to pass to SERVER_START_ARGS"
}
T_configure_load_definitions() {
(
local load_definitions script_dir load_definitions_file_path definitions
load_definitions="my-definitions"
script_dir="/path/to/script/dir"
definitions="$(configure_load_definitions $load_definitions $script_dir)"
expect_to_equal "$definitions" "-rabbitmq_management load_definitions \"/path/to/script/dir/../etc/definitions.json\""
) || $T_fail "Failed to load definitions"
}
T_configure_tls_listeners() {
(
local ssl_key listeners
ssl_key="--this is my key--"
listeners="$(configure_tls_listeners \"$ssl_key\")"
expect_to_equal "$listeners" "-rabbit tcp_listeners [] -rabbit ssl_listeners [5671] -rabbitmq_management listener [{port,15672},{ssl,false}] -rabbitmq_mqtt ssl_listeners [8883] -rabbitmq_stomp ssl_listeners [61614]"
) || $T_fail "Failed to configure TLS listeners"
}
T_configure_tls_options() {
(
local ssl_key ssl_verify ssl_verification_mode ssl_verification_depth script_dir ssl_fail_if_no_peer_cert ssl_supported_tls_versions ssl_options options
ssl_key="--this is my key--"
ssl_verify="true"
ssl_verification_depth="5"
ssl_fail_if_no_peer_cert=true
ssl_supported_tls_versions="['tlsv1.2','tlsv1.1']"
script_dir="/path/to/script/dir"
ssl_supported_tls_ciphers=",{ciphers, ['DHE_AES128_GCM_SHA256','DHE_AES256_GCM_SHA256']}"
options="$(configure_tls_options "${ssl_key}" "${ssl_verify}" "${ssl_verification_depth}" "${ssl_fail_if_no_peer_cert}" "${ssl_supported_tls_versions}" "${ssl_supported_tls_ciphers}" "${script_dir}")"
expect_to_equal "$options" " -rabbit ssl_options [{cacertfile,\"${script_dir}/../etc/cacert.pem\"},{certfile,\"${script_dir}/../etc/cert.pem\"},{keyfile,\"${script_dir}/../etc/key.pem\"},{verify,verify_peer},{depth,$ssl_verification_depth},{fail_if_no_peer_cert,$ssl_fail_if_no_peer_cert},{versions,$ssl_supported_tls_versions}$ssl_supported_tls_ciphers]"
) || $T_fail "Failed to configure TLS options"
}
T_create_config_file() {
(
local conf_env_file self_node dir nodename config script_dir prefix suffix server_start_args
conf_env_file=""
self_node="node-1"
dir="$(mktemp -d)"
script_dir="/path/to/script/dir"
server_start_args="SERVER_START_ARGS='this-is-my-config'"
trap "rm -rf ${dir}" EXIT
create_config_file "$conf_env_file" "$self_node" "$dir" "$script_dir" "$server_start_args"
expect_file_to_exist "${dir}/env"
expect_file_to_exist "${dir}/env.backup"
expect_to_contain "$(<$dir/env)" "CONFIG_FILE='/path/to/script/dir/../etc/rabbitmq'"
expect_to_contain "$(<$dir/env)" "NODENAME='node-1'"
expect_to_contain "$(<$dir/env)" "SERVER_START_ARGS='this-is-my-config'"
expect_to_contain "$(<$dir/env)" "RABBITMQ_MNESIA_DIR=/var/vcap/store/rabbitmq/mnesia/db"
expect_to_contain "$(<$dir/env)" "RABBITMQ_PLUGINS_EXPAND_DIR=/var/vcap/store/rabbitmq/mnesia/db-plugins-expand"
) || $T_fail "Failed to create conf_env file"
}
T_creates_a_file_with_all_the_nodes_to_be_used_during_upgrades() {
(
local nodes_file
nodes_file="$(mktemp)"
trap 'rm -rf ${nodes_file}' EXIT
prepare_for_upgrade "node1,node2" "$nodes_file"
expect_to_equal "$(<$nodes_file)" "$(echo -e node1\\nnode2)"
) || $T_fail "Failed to create file with nodes"
}
T_if_a_file_with_all_the_nodes_exist_should_ignore_its_content() {
(
local nodes_file
nodes_file="$(mktemp)"
trap 'rm -rf ${nodes_file}' EXIT
echo "some existing nodes from previous deployments" > $nodes_file
prepare_for_upgrade "node1,node2" "$nodes_file"
expect_to_equal "$(<$nodes_file)" "$(echo -e node1\\nnode2)"
) || $T_fail "Failed to check nodes file"
}
T_create_erlang_cookie() {
(
if [ "$(uname -s)" != "Linux" ]; then
echo "WARNING: This test can only be run on Linux plaftorms... skipping!"
exit 0
fi
#before we need to create vcap user
sudo adduser --disabled-password --gecos "" vcap
local erlang_cookie dir
erlang_cookie="this-is-my-cookie"
dir="$(mktemp -d)"
trap "rm -rf ${dir}" EXIT
create_erlang_cookie "$dir" "$erlang_cookie" "vcap"
expect_file_to_exist "${dir}/.erlang.cookie"
expect_to_equal "$(<$dir/.erlang.cookie)" "$erlang_cookie"
) || $T_fail "Failed to create erlang cookie"
}
|
SELECT *
FROM Employee
WHERE salary > (SELECT AVG(salary) FROM Employee); |
#import necessary dependencies
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
import numpy as np
#load the dataset
dataset = datasets.load_iris()
#split data into training and test sets
x_train,x_test,y_train,y_test = train_test_split(dataset.data,dataset.target, test_size=0.2,random_state=0)
#linear SVC Model
model = LinearSVC(C=1, tol=0.01, max_iter=1000)
model.fit(x_train,y_train)
# predict the output for test set
predicted = model.predict(x_test)
# Generate predictions for class A and class B
predicted_A = model.predict(x_test[predicted == 0]).astype(np.int)
predicted_B = model.predict(x_test[predicted == 1]).astype(np.int) |
cd web-app
yarn install
yarn build-css
NODE_ENV="production" PUBLIC_URL="https://osoc17.github.io/code9000/" REACT_APP_ROUTER="HASH" REACT_APP_API_URL="https://birds.today/api" yarn build
cd ..
git branch -D gh-pages
git push origin --delete gh-pages
git add web-app/build && git commit -m "Initial dist subtree commit"
git subtree split --prefix web-app/build -b gh-pages
git push -f origin gh-pages:gh-pages
|
import argparse
class PeopleManager:
def __init__(self):
self.people = []
def add_person(self, name):
self.people.append(name)
def remove_person(self, name):
if name in self.people:
self.people.remove(name)
else:
print(f"{name} not found in the list.")
def display_people(self):
print("People's Names:")
for person in self.people:
print(person)
def search_person(self, name):
return name in self.people
def main():
parser = argparse.ArgumentParser(description="Manage a list of people's names")
parser.add_argument('--add', help="Add a person's name to the list")
parser.add_argument('--remove', help="Remove a person's name from the list")
parser.add_argument('--display', action='store_true', help="Display all the names in the list")
parser.add_argument('--search', help="Search for a person's name in the list")
args = parser.parse_args()
people_manager = PeopleManager()
if args.add:
people_manager.add_person(args.add)
elif args.remove:
people_manager.remove_person(args.remove)
elif args.display:
people_manager.display_people()
elif args.search:
if people_manager.search_person(args.search):
print(f"{args.search} found in the list.")
else:
print(f"{args.search} not found in the list.")
if __name__ == '__main__':
main() |
<gh_stars>10-100
//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, vJAXB 2.1.10 in JDK 6
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2010.01.22 at 02:23:57 PM MST
//
package net.opengis.cat.csw._202;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.JAXBElement;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlElementRef;
import javax.xml.bind.annotation.XmlType;
import net.opengis.ows._100.BoundingBoxType;
import net.opengis.ows._100.WGS84BoundingBoxType;
/**
*
* This type extends DCMIRecordType to add ows:BoundingBox;
* it may be used to specify a spatial envelope for the
* catalogued resource.
*
*
* <p>Java class for RecordType complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="RecordType">
* <complexContent>
* <extension base="{http://www.opengis.net/cat/csw/2.0.2}DCMIRecordType">
* <sequence>
* <element name="AnyText" type="{http://www.opengis.net/cat/csw/2.0.2}EmptyType" maxOccurs="unbounded" minOccurs="0"/>
* <element ref="{http://www.opengis.net/ows}BoundingBox" maxOccurs="unbounded" minOccurs="0"/>
* </sequence>
* </extension>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "RecordType", propOrder = {
"anyText",
"boundingBox"
})
public class RecordType
extends DCMIRecordType
{
@XmlElement(name = "AnyText")
protected List<EmptyType> anyText;
@XmlElementRef(name = "BoundingBox", namespace = "http://www.opengis.net/ows", type = JAXBElement.class)
protected List<JAXBElement<? extends BoundingBoxType>> boundingBox;
/**
* Gets the value of the anyText property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the anyText property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getAnyText().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link EmptyType }
*
*
*/
public List<EmptyType> getAnyText() {
if (anyText == null) {
anyText = new ArrayList<EmptyType>();
}
return this.anyText;
}
/**
* Gets the value of the boundingBox property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the boundingBox property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getBoundingBox().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link JAXBElement }{@code <}{@link BoundingBoxType }{@code >}
* {@link JAXBElement }{@code <}{@link WGS84BoundingBoxType }{@code >}
*
*
*/
public List<JAXBElement<? extends BoundingBoxType>> getBoundingBox() {
if (boundingBox == null) {
boundingBox = new ArrayList<JAXBElement<? extends BoundingBoxType>>();
}
return this.boundingBox;
}
}
|
<filename>fs/vfs/fs_rmdir.c
/****************************************************************************
* fs/vfs/fs_rmdir.c
*
* Copyright (C) 2007-2009, 2014, 2017 <NAME>. All rights reserved.
* Author: <NAME> <<EMAIL>>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include "vfs_config.h"
#include "unistd.h"
#include "errno.h"
#include "stdlib.h"
#include "vnode.h"
#include "sys/stat.h"
#include "string.h"
#include "limits.h"
#include "fs/mount.h"
/****************************************************************************
* Private Functions
****************************************************************************/
static int check_target(struct Vnode *vnode, char *name)
{
if (vnode == NULL)
{
return -ENOENT;
}
if ((vnode->originMount) && (vnode->originMount->mountFlags & MS_RDONLY))
{
return -EROFS;
}
if (vnode->type != VNODE_TYPE_DIR)
{
return -ENOTDIR;
}
if (vnode->useCount > 0)
{
return -EBUSY;
}
if ((vnode->flag & VNODE_FLAG_MOUNT_ORIGIN)
|| (vnode->flag & VNODE_FLAG_MOUNT_NEW))
{
return -EBUSY;
}
char cwd[PATH_MAX];
char *pret = getcwd(cwd, PATH_MAX);
if (pret != NULL)
{
struct Vnode *cwdnode = NULL;
int ret = VnodeLookup(cwd, &cwdnode, 0);
if (ret == OK && (cwdnode == vnode))
{
return -EBUSY;
}
}
return OK;
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: do_rmdir
*
* Description: Remove a file managed a mountpoint
*
****************************************************************************/
int do_rmdir(int dirfd, const char *pathname)
{
struct Vnode *vnode = NULL;
char *fullpath = NULL;
char *relativepath = NULL;
char *name = NULL;
int ret;
/* Get relative path by dirfd*/
ret = get_path_from_fd(dirfd, &relativepath);
if (ret < 0)
{
goto errout;
}
if (relativepath)
{
ret = vfs_normalize_path((const char *)relativepath, pathname, &fullpath);
free(relativepath);
if (ret < 0)
{
goto errout;
}
name = strrchr(fullpath, '/');
VnodeHold();
ret = VnodeLookup(fullpath, &vnode, 0);
}
else
{
name = strrchr(pathname, '/');
VnodeHold();
if (name == NULL)
{
name = (char *)pathname;
}
else
{
name++;
}
ret = VnodeLookup(pathname, &vnode, 0);
}
if (ret != OK)
{
goto errout_with_lock;
}
ret = check_target(vnode, name);
if (ret != OK) {
PRINT_ERR("rmdir failed err = %d\n", ret);
goto errout_with_lock;
}
if (VfsVnodePermissionCheck(vnode->parent, (WRITE_OP | EXEC_OP))) {
ret = -EACCES;
goto errout_with_lock;
}
if (vnode && vnode->vop && vnode->vop->Rmdir) {
ret = vnode->vop->Rmdir(vnode->parent, vnode, name);
} else {
ret = -ENOSYS;
}
if (ret < 0) {
goto errout_with_lock;
}
VnodeFree(vnode);
VnodeDrop();
/* Successfully unlinked */
if (fullpath)
{
free(fullpath);
}
return OK;
errout_with_lock:
VnodeDrop();
errout:
if (fullpath)
{
free(fullpath);
}
set_errno(-ret);
return VFS_ERROR;
}
/****************************************************************************
* Name: rmdir
*
* Description: Remove a file managed a mountpoint
*
****************************************************************************/
int rmdir(const char *pathname)
{
return do_rmdir(AT_FDCWD, pathname);
}
|
const { referrers } = require("../../src/common/model/constants/referrers");
const sampleAppointment = {
id_rco_formation: "21_114876|21_114876|106291",
candidat_id: "91a370e6-3eb1-4e09-80f0-b7cc6be84fac",
etablissement_id: "2828558M",
formation_id: "68769673",
motivations: "TEST MOTIVATION",
referrer: referrers.LBA.name,
};
const sampleUpdateAppointment = {
candidat_id: "77a370e6-3eb1-4e09-80f0-b7cc6be84fac",
etablissement_id: "9998558M",
formation_id: "68769999",
motivations: "TEST MOTIVATION UPDATE",
referrer: referrers.PARCOURSUP.name,
};
const sampleParameter = {
code_postal: "75000",
etablissement_siret: "32922456200234",
etablissement_raison_sociale: "TEST RAISON SOCIALE",
formation_intitule: "Test Formation",
formation_cfd: "26033206",
email_rdv: "<EMAIL>",
id_rco_formation: "14_AF_0000091719|14_SE_0000494236|18894",
referrers: [referrers.LBA.code, referrers.PARCOURSUP.code],
};
const sampleUpdateParameter = {
code_postal: "75000",
etablissement_siret: "32922456299999",
etablissement_raison_sociale: "UPDATE RAISON SOCIALE",
formation_intitule: "Update Formation",
formation_cfd: "260999999",
email_rdv: "<EMAIL>",
id_rco_formation: "15_554095|15_1117617|106339",
referrers: [referrers.PARCOURSUP.code],
};
const sampleParameters = [
{
etablissement_siret: "32922456200234",
etablissement_raison_sociale: "TEST RAISON SOCIALE",
formation_intitule: "Test Formation",
formation_cfd: "26033206",
email_rdv: "<EMAIL>",
id_rco_formation: "15_554095|15_1117617|106339",
referrers: [referrers.LBA.code],
},
{
etablissement_siret: "32922456200235",
etablissement_raison_sociale: "TEST RAISON SOCIALE 2",
formation_intitule: "Test Formation 2",
formation_cfd: "26033205",
email_rdv: "<EMAIL>",
id_rco_formation: "15_554095|15_1117617|12345",
referrers: [referrers.PARCOURSUP.code],
},
];
module.exports = {
sampleParameter,
sampleUpdateParameter,
sampleParameters,
sampleAppointment,
sampleUpdateAppointment,
};
|
api_key=<YOUR_API_KEY>
app_key=<YOUR_APP_KEY>
curl "https://api.datadoghq.com/api/v1/dash?api_key=${api_key}&application_key=${app_key}"
|
<gh_stars>1-10
from __future__ import print_function
import os
import base64
import logging
import sys
from six.moves import urllib, http_client
from six.moves.urllib.parse import urlparse, urlunparse
DEFAULT_CHUNK_SIZE = 10 * 1024 * 1024
TUS_VERSION = '1.0.0'
class MethodRequest(urllib.request.Request):
# See: https://gist.github.com/logic/2715756
def __init__(self, *args, **kwargs):
if 'method' in kwargs:
self._method = kwargs['method']
del kwargs['method']
else:
self._method = None
urllib.request.Request.__init__(self, *args, **kwargs)
def get_method(self, *args, **kwargs):
# noinspection PyArgumentList
return self._method if self._method is not None else urllib.request.Request.get_method(self, *args, **kwargs)
class TusError(Exception):
def __init__(self, message, response=None, code=None):
self.message = message # python2 compatibility
super(TusError, self).__init__(message)
self.response = response
self.code = code
def __str__(self):
if self.response is not None:
text = self.response.text
return "TusError('%s', response=(%s, '%s'))" % (
self.message,
self.response.status_code,
text.strip())
else:
return "TusError('%s')" % self.message or self.code
class _RequestsResponse(object):
def __init__(self, resp, data):
self.status_code = int(resp.code)
self.headers = {k.title(): v for k, v in resp.info().items()}
self.data = data
def _request(req):
resp = urllib.request.urlopen(req)
data = resp.read()
resp.close()
return _RequestsResponse(resp, data)
def _requests(endpoint, method, headers, data):
# endpoint.encode("utf-8")
req = MethodRequest(endpoint, data, method=method)
for k, v in headers.items():
req.add_header(k, v)
try:
return _request(req)
except urllib.error.HTTPError as e:
raise TusError(e, code=e.code)
except urllib.error.URLError:
raise TusError('connection error')
def requests_post(endpoint, headers=None, data=None):
return _requests(endpoint, 'POST', headers or {}, data)
def requests_patch(endpoint, headers=None, data=None):
return _requests(endpoint, 'PATCH', headers or {}, data)
def requests_head(endpoint, headers=None, data=None):
return _requests(endpoint, 'HEAD', headers or {}, data)
def requests_options(endpoint, headers=None, data=None):
return _requests(endpoint, 'OPTIONS', headers or {}, data)
def upload(file_obj,
tus_endpoint,
chunk_size=DEFAULT_CHUNK_SIZE,
file_name=None,
headers=None,
metadata=None):
file_name = file_name or os.path.basename(file_obj.name)
file_size = _get_file_size(file_obj)
file_endpoint = create(
tus_endpoint,
file_name,
file_size,
headers=headers,
metadata=metadata)
resume(
file_obj,
file_endpoint,
chunk_size=chunk_size,
headers=headers,
offset=0)
def _get_file_size(f):
if not _is_seekable(f):
return
pos = f.tell()
f.seek(0, os.SEEK_END)
size = f.tell()
f.seek(pos)
return size
def _is_seekable(f):
if sys.version_info.major == 2:
return hasattr(f, 'seek')
else:
return f.seekable()
def _absolute_file_location(tus_endpoint, file_endpoint):
parsed_file_endpoint = urlparse(file_endpoint)
if parsed_file_endpoint.netloc:
return file_endpoint
parsed_tus_endpoint = urlparse(tus_endpoint)
return urlunparse((
parsed_tus_endpoint.scheme,
parsed_tus_endpoint.netloc,
) + parsed_file_endpoint[2:])
def create(tus_endpoint, file_name, file_size, headers=None, metadata=None, _log=None):
_log.info("Creating file endpoint: %s" % tus_endpoint)
h = {"Tus-Resumable": TUS_VERSION}
if file_size is None:
h['Upload-Defer-Length'] = '1'
else:
h['Upload-Length'] = str(file_size)
_log.info('Setting upload length')
if headers:
h.update(headers)
if metadata is None:
metadata = {}
metadata['filename'] = file_name
pairs = [
k + ' ' + base64.b64encode(v.encode('utf-8')).decode()
for k, v in metadata.items()
]
h["Upload-Metadata"] = ','.join(pairs)
response = requests_post(tus_endpoint, headers=h)
if response.status_code != 201:
raise TusError("Create failed", response=response, code=response.status_code)
location = response.headers["Location"]
_log.info("Created: %s", location)
return _absolute_file_location(tus_endpoint, location)
def resume(file_obj,
file_endpoint,
chunk_size=DEFAULT_CHUNK_SIZE,
headers=None,
offset=None,
sent_cb=None,
_log=None):
if offset is None:
offset = _get_offset(file_endpoint, headers=headers, _log=_log)
if offset != 0:
if not _is_seekable(file_obj):
raise Exception("file is not seekable")
file_obj.seek(offset)
total_sent = 0
data = file_obj.read(chunk_size)
while data:
upload_chunk(data, offset, file_endpoint, headers=headers, _log=_log)
total_sent += len(data)
if sent_cb is not None:
sent_cb(total_sent)
else:
_log.info("Total bytes sent: %i", total_sent)
offset += len(data)
data = file_obj.read(chunk_size)
if not _is_seekable(file_obj):
if headers is None:
headers = {}
else:
headers = dict(headers)
set_final_size(file_endpoint, offset, headers, _log=_log)
def set_final_size(file_endpoint, size, headers, _log=None):
_log.info('Setting upload length')
headers['Upload-Length'] = str(size)
upload_chunk(bytes(), size, file_endpoint, headers=headers, _log=_log)
def _get_offset(file_endpoint, headers=None, _log=None):
_log.info("Getting offset")
h = {"Tus-Resumable": TUS_VERSION}
if headers:
h.update(headers)
response = requests_head(file_endpoint, headers=h)
offset = int(response.headers["Upload-Offset"])
_log.info("offset=%i", offset)
return offset
def upload_chunk(data, offset, file_endpoint, headers=None, _log=None):
_log.info("Uploading %d bytes chunk from offset: %i", len(data), offset)
h = {
'Content-Type': 'application/offset+octet-stream',
'Upload-Offset': str(offset),
'Tus-Resumable': TUS_VERSION,
}
if headers:
h.update(headers)
response = requests_patch(file_endpoint, headers=h, data=data)
if response.status_code != 204:
raise TusError("Upload chunk failed", response=response, code=response.status_code)
|
-- --------------------------------------------------------
-- Хост: 127.0.0.1
-- Версия сервера: 5.5.45 - MySQL Community Server (GPL)
-- ОС Сервера: Win32
-- HeidiSQL Версия: 9.3.0.4984
-- --------------------------------------------------------
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET NAMES utf8mb4 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-- Дамп структуры базы данных qiwi
CREATE DATABASE IF NOT EXISTS `qiwi` /*!40100 DEFAULT CHARACTER SET utf8 */;
USE `qiwi`;
-- Дамп структуры для таблица qiwi.auth_assignment
CREATE TABLE IF NOT EXISTS `auth_assignment` (
`item_name` varchar(64) COLLATE utf8_unicode_ci NOT NULL,
`user_id` varchar(64) COLLATE utf8_unicode_ci NOT NULL,
`created_at` int(11) DEFAULT NULL,
PRIMARY KEY (`item_name`,`user_id`),
CONSTRAINT `auth_assignment_ibfk_1` FOREIGN KEY (`item_name`) REFERENCES `auth_item` (`name`) ON DELETE CASCADE ON UPDATE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
-- Дамп данных таблицы qiwi.auth_assignment: ~0 rows (приблизительно)
/*!40000 ALTER TABLE `auth_assignment` DISABLE KEYS */;
/*!40000 ALTER TABLE `auth_assignment` ENABLE KEYS */;
-- Дамп структуры для таблица qiwi.auth_item
CREATE TABLE IF NOT EXISTS `auth_item` (
`name` varchar(64) COLLATE utf8_unicode_ci NOT NULL,
`type` smallint(6) NOT NULL,
`description` text COLLATE utf8_unicode_ci,
`rule_name` varchar(64) COLLATE utf8_unicode_ci DEFAULT NULL,
`data` blob,
`created_at` int(11) DEFAULT NULL,
`updated_at` int(11) DEFAULT NULL,
PRIMARY KEY (`name`),
KEY `rule_name` (`rule_name`),
KEY `idx-auth_item-type` (`type`),
CONSTRAINT `auth_item_ibfk_1` FOREIGN KEY (`rule_name`) REFERENCES `auth_rule` (`name`) ON DELETE SET NULL ON UPDATE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
-- Дамп данных таблицы qiwi.auth_item: ~14 rows (приблизительно)
/*!40000 ALTER TABLE `auth_item` DISABLE KEYS */;
INSERT INTO `auth_item` (`name`, `type`, `description`, `rule_name`, `data`, `created_at`, `updated_at`) VALUES
('admin', 1, NULL, 'app\\components\\UserRoleRule', NULL, 1500534387, 1500534387),
('guest', 1, NULL, 'app\\components\\UserRoleRule', NULL, 1500534387, 1500534387),
('request/close', 2, NULL, NULL, NULL, 1500534387, 1500534387),
('request/create', 2, NULL, NULL, NULL, 1500534387, 1500534387),
('request/delete', 2, NULL, NULL, NULL, 1500534387, 1500534387),
('request/getwork', 2, NULL, NULL, NULL, 1500534387, 1500534387),
('request/index', 2, NULL, NULL, NULL, 1500534387, 1500534387),
('request/sendtoreview', 2, NULL, NULL, NULL, 1500534387, 1500534387),
('request/update', 2, NULL, NULL, NULL, 1500534387, 1500534387),
('request/view', 2, NULL, NULL, NULL, 1500534387, 1500534387),
('site/error', 2, NULL, NULL, NULL, 1500534387, 1500534387),
('site/index', 2, NULL, NULL, NULL, 1500534387, 1500534387),
('site/login', 2, NULL, NULL, NULL, 1500534387, 1500534387),
('site/logout', 2, NULL, NULL, NULL, 1500534387, 1500534387),
('user', 1, NULL, 'app\\components\\UserRoleRule', NULL, 1500534387, 1500534387);
/*!40000 ALTER TABLE `auth_item` ENABLE KEYS */;
-- Дамп структуры для таблица qiwi.auth_item_child
CREATE TABLE IF NOT EXISTS `auth_item_child` (
`parent` varchar(64) COLLATE utf8_unicode_ci NOT NULL,
`child` varchar(64) COLLATE utf8_unicode_ci NOT NULL,
PRIMARY KEY (`parent`,`child`),
KEY `child` (`child`),
CONSTRAINT `auth_item_child_ibfk_1` FOREIGN KEY (`parent`) REFERENCES `auth_item` (`name`) ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT `auth_item_child_ibfk_2` FOREIGN KEY (`child`) REFERENCES `auth_item` (`name`) ON DELETE CASCADE ON UPDATE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
-- Дамп данных таблицы qiwi.auth_item_child: ~24 rows (приблизительно)
/*!40000 ALTER TABLE `auth_item_child` DISABLE KEYS */;
INSERT INTO `auth_item_child` (`parent`, `child`) VALUES
('admin', 'request/close'),
('admin', 'request/create'),
('user', 'request/create'),
('admin', 'request/delete'),
('admin', 'request/getwork'),
('user', 'request/getwork'),
('admin', 'request/index'),
('user', 'request/index'),
('admin', 'request/sendtoreview'),
('user', 'request/sendtoreview'),
('admin', 'request/update'),
('admin', 'request/view'),
('user', 'request/view'),
('admin', 'site/error'),
('guest', 'site/error'),
('user', 'site/error'),
('admin', 'site/index'),
('guest', 'site/index'),
('user', 'site/index'),
('admin', 'site/login'),
('guest', 'site/login'),
('user', 'site/login'),
('admin', 'site/logout'),
('guest', 'site/logout'),
('user', 'site/logout');
/*!40000 ALTER TABLE `auth_item_child` ENABLE KEYS */;
-- Дамп структуры для таблица qiwi.auth_rule
CREATE TABLE IF NOT EXISTS `auth_rule` (
`name` varchar(64) COLLATE utf8_unicode_ci NOT NULL,
`data` blob,
`created_at` int(11) DEFAULT NULL,
`updated_at` int(11) DEFAULT NULL,
PRIMARY KEY (`name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
-- Дамп данных таблицы qiwi.auth_rule: ~1 rows (приблизительно)
/*!40000 ALTER TABLE `auth_rule` DISABLE KEYS */;
INSERT INTO `auth_rule` (`name`, `data`, `created_at`, `updated_at`) VALUES
('app\\components\\UserRoleRule', _binary 0x4F3A32373A226170705C636F6D706F6E656E74735C55736572526F6C6552756C65223A333A7B733A343A226E616D65223B733A32373A226170705C636F6D706F6E656E74735C55736572526F6C6552756C65223B733A393A22637265617465644174223B693A313530303533343338373B733A393A22757064617465644174223B693A313530303533343338373B7D, 1500534387, 1500534387);
/*!40000 ALTER TABLE `auth_rule` ENABLE KEYS */;
-- Дамп структуры для таблица qiwi.migration
CREATE TABLE IF NOT EXISTS `migration` (
`version` varchar(180) NOT NULL,
`apply_time` int(11) DEFAULT NULL,
PRIMARY KEY (`version`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- Дамп данных таблицы qiwi.migration: ~3 rows (приблизительно)
/*!40000 ALTER TABLE `migration` DISABLE KEYS */;
INSERT INTO `migration` (`version`, `apply_time`) VALUES
('m000000_000000_base', 1500206639),
('m140506_102106_rbac_init', 1500206643),
('m170716_133902_add_users', 1500320299);
/*!40000 ALTER TABLE `migration` ENABLE KEYS */;
-- Дамп структуры для таблица qiwi.request
CREATE TABLE IF NOT EXISTS `request` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(255) DEFAULT NULL,
`description` text,
`status` varchar(50) DEFAULT '0',
`result` text,
`worked_by` int(11) DEFAULT NULL,
`created_by` int(11) DEFAULT NULL,
`created_at` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=17 DEFAULT CHARSET=utf8;
-- Дамп данных таблицы qiwi.request: ~10 rows (приблизительно)
/*!40000 ALTER TABLE `request` DISABLE KEYS */;
INSERT INTO `request` (`id`, `name`, `description`, `status`, `result`, `worked_by`, `created_by`, `created_at`) VALUES
(7, 'test', 'test', 'inreview', 'заявка заврешнеа', 6, 3, '2017-07-19 20:35:09'),
(8, 'test2', 'test2', 'inreview', 'Сделал всё прекрасно', 4, 3, '2017-07-19 20:35:17'),
(9, 'test3', 'test3', 'inreview', 'Сделал', 3, 4, '2017-07-19 20:35:43'),
(10, 'test4', 'test4', 'inreview', 'ыфваываываыв', 4, 4, '2017-07-19 20:35:53'),
(11, 'test4', 'test4', 'new', NULL, NULL, 4, '2017-07-19 20:36:01'),
(12, 'sadfasd', 'fasdfsd', 'new', NULL, NULL, 4, '2017-07-19 20:40:19'),
(13, 'sadfsdf', 'asdfsd', 'inreview', '', 4, 4, '2017-07-19 20:40:25'),
(14, 'sadfsdfsda', 'fsdafsdfsd', 'new', NULL, NULL, 4, '2017-07-19 20:40:36'),
(15, 'asdfsd', 'fsdfsdf', 'new', NULL, NULL, 4, '2017-07-19 20:40:45'),
(16, 'sadfasd', 'fsdfasdfsdf', 'inreview', 'результат', 4, 4, '2017-07-19 20:40:51'),
(17, 'gfjhsfj', 'sfjs', 'new', NULL, NULL, 4, '2017-07-20 09:43:54'),
(18, 'fgh', 'sfhgjgfj', 'new', NULL, NULL, 4, '2017-07-20 09:47:35'),
(19, 'fdgaf', 'gdfgadfg', 'new', NULL, NULL, 4, '2017-07-20 09:49:47');
/*!40000 ALTER TABLE `request` ENABLE KEYS */;
-- Дамп структуры для таблица qiwi.request_history
CREATE TABLE IF NOT EXISTS `request_history` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`request_id` int(11) NOT NULL DEFAULT '0',
`changed_by` int(11) DEFAULT NULL,
`description` varchar(255) NOT NULL DEFAULT '0',
`created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=8 DEFAULT CHARSET=utf8;
-- Дамп данных таблицы qiwi.request_history: ~7 rows (приблизительно)
/*!40000 ALTER TABLE `request_history` DISABLE KEYS */;
INSERT INTO `request_history` (`id`, `request_id`, `changed_by`, `description`, `created_at`) VALUES
(1, 9, 3, 'Заявка переведена в статус "В работе"', '2017-07-19 23:19:09'),
(2, 9, 3, 'Заявка переведена в статус "На проверке"', '2017-07-19 23:20:21'),
(3, 8, 4, 'Заявка переведена в статус "В работе"', '2017-07-20 01:13:11'),
(4, 10, 4, 'Заявка переведена в статус "В работе"', '2017-07-20 01:13:21'),
(5, 8, 4, 'Заявка переведена в статус "На проверке"', '2017-07-20 01:15:55'),
(6, 10, 4, 'Заявка переведена в статус "На проверке"', '2017-07-20 01:16:05'),
(7, 16, 4, 'Заявка переведена в статус "В работе"', '2017-07-20 01:16:27'),
(8, 13, 4, 'Заявка переведена в статус "В работе"', '2017-07-20 09:54:28'),
(9, 13, 4, 'Заявка переведена в статус "На проверке"', '2017-07-20 09:54:35'),
(10, 16, 4, 'Заявка переведена в статус "На проверке"', '2017-07-20 09:59:57');
/*!40000 ALTER TABLE `request_history` ENABLE KEYS */;
-- Дамп структуры для таблица qiwi.user
CREATE TABLE IF NOT EXISTS `user` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`login` varchar(255) DEFAULT NULL,
`password` varchar(255) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`surname` varchar(255) DEFAULT NULL,
`middle_name` varchar(255) DEFAULT NULL,
`role` varchar(50) DEFAULT NULL,
`salt` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=7 DEFAULT CHARSET=utf8;
-- Дамп данных таблицы qiwi.user: ~4 rows (приблизительно)
/*!40000 ALTER TABLE `user` DISABLE KEYS */;
INSERT INTO `user` (`id`, `login`, `password`, `name`, `surname`, `middle_name`, `role`, `salt`) VALUES
(3, 'manager1', '<PASSWORD>', 'Андрей', 'Попов', 'Алексеевич', 'user', '596d122b57'),
(4, 'manager2', '<PASSWORD>', 'Виктор', 'Семёнов', 'Андреевич', 'user', '<PASSWORD>'),
(5, 'admin', '82902c42095c91510266bdd64a28<PASSWORD>', 'Роман', 'Пермитин', 'Сергеевич', 'admin', '5<PASSWORD>'),
(6, 'manager3', '82902c42095c91510266bdd64a2<PASSWORD>', 'Сергей', 'Петров', 'Олегович', 'user', '596d122b57');
/*!40000 ALTER TABLE `user` ENABLE KEYS */;
/*!40101 SET SQL_MODE=IFNULL(@OLD_SQL_MODE, '') */;
/*!40014 SET FOREIGN_KEY_CHECKS=IF(@OLD_FOREIGN_KEY_CHECKS IS NULL, 1, @OLD_FOREIGN_KEY_CHECKS) */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
|
// Parses a JSON string and returns an array of its values
function parseJSONString(jsonString) {
const jsonObj = JSON.parse(jsonString);
const result = [];
for (let prop in jsonObj) {
result.push(jsonObj[prop]);
}
return result;
}
// Example
const input = "{'name':'John', 'age':20}";
const output = parseJSONString(input); // ['John', 20] |
####################### chroot_mlfs.sh #########################################
#!/bin/bash
#
# Copyright 2018, 2019,2020,2021 J. E. Garrott Sr, Puyallup, WA, USA
# Copyright 2018, 2019,2020,2021 "nobodino", Bordeaux, FRANCE
# All rights reserved.
#
# Redistribution and use of this script, with or without modification, is
# permitted provided that the following conditions are met:
#
# 1. Redistributions of this script must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#--------------------------------------------------------------------------
#
# Note: Much of this script is inspired from the LFS manual chapter 5
# Copyright © 1999-2021 Gerard Beekmans and may be
# copied under the MIT License.
#
#--------------------------------------------------------------------------
#
# Above july 2018, revisions made through github project:
# https://github.com/nobodino/slackware-from-scratch
#
#**********************************
export GREEN="\\033[1;32m"
export NORMAL="\\033[0;39m"
export RED="\\033[1;31m"
export PINK="\\033[1;35m"
export BLUE="\\033[1;34m"
export YELLOW="\\033[1;33m"
#**********************************
# chown from mlfs:mlfs to root:root
#**********************************
cd .. && chown -R root:root $MLFS/tools
#**********************************
mkdir -pv $MLFS/{dev,proc,sys,run}
#**********************************
# When the kernel boots the system, it requires the presence of
# a few device nodes, in particular the console and null
# devices. The device nodes must be created on the hard
# disk so that they are available before udevd has been
# started, and additionally when Linux is started with
# init=/bin/bash. Create the devices by running the following commands:
#**********************************
mknod -m 600 $MLFS/dev/console c 5 1
mknod -m 666 $MLFS/dev/null c 1 3
#**********************************
# Mounting and Populating /dev ('cause udev ain't yet!)
#**********************************
mount -v --bind /dev $MLFS/dev
#**********************************
# Now mount the remaining virtual kernel filesystems:
#**********************************
mount -v --bind /dev/pts $MLFS/dev/pts -o gid=5,mode=620
mount -vt proc proc $MLFS/proc
mount -vt sysfs sysfs $MLFS/sys
mount -vt tmpfs tmpfs $MLFS/run
#**********************************
# In some host systems, /dev/shm is a symbolic link to /run/shm.
# The /run tmpfs was mounted above so in this case only a directory
# needs to be created.
#**********************************
if [ -h $MLFS/dev/shm ]; then
mkdir -pv $MLFS/$(readlink $MLFS/dev/shm)
fi
#**********************************
echo
echo "The MLFS directory is now ready for building."
echo
echo "From now, you are on the $MLFS side."
echo "Be sure you are ready before doing anything."
echo "You will enter the /sources directory, and establish links between:"
echo
echo " - /tools/bin and /bin"
echo " - /tools/lib and /usr/lib:"
echo
echo "You can execute now the following instructions."
echo
echo -e "$YELLOW" "cd /sources && ./sfsbuild1.sh link.list" "$NORMAL"
echo
#**********************************
# and finally, enter the chroot environment.
#**********************************
chroot "$MLFS" /tools/bin/env -i \
HOME=/root \
TERM="$TERM" \
PS1='\u:\w\$ ' \
PATH=/bin:/usr/bin:/sbin:/usr/sbin:/cross-tools/bin:/tools/bin \
/tools/bin/bash --login +h
#**********************************
|
import logging
import os
def create_logger(module_name, log_file_path):
# Create a logger
logger = logging.getLogger(module_name)
logger.setLevel(logging.DEBUG)
# Create a file handler and set the log level
file_handler = logging.FileHandler(log_file_path)
file_handler.setLevel(logging.DEBUG)
# Create a formatter and set the format for log messages
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
# Add the file handler to the logger
logger.addHandler(file_handler)
return logger |
<gh_stars>1-10
// don't put in jp.skypencil.brainjack to test call from other package
package jp.skypencil.brainjack.interpreter;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import jp.skypencil.brainjack.AbstractTest;
import jp.skypencil.brainjack.Interpreter;
public class InterpreterTest extends AbstractTest {
@Override
protected String execute(String commands, InputStream input) throws IOException {
Interpreter interpreter = new Interpreter();
ByteArrayOutputStream output = new ByteArrayOutputStream();
interpreter.execute(commands, input, output);
return output.toString("UTF-8");
}
}
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'date'
require 'google/apis/core/base_service'
require 'google/apis/core/json_representation'
require 'google/apis/core/hashable'
require 'google/apis/errors'
module Google
module Apis
module ClouddebuggerV2
class AliasContext
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class Breakpoint
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class CloudRepoSourceContext
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class CloudWorkspaceId
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class CloudWorkspaceSourceContext
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class Debuggee
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class Empty
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class ExtendedSourceContext
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class FormatMessage
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class GerritSourceContext
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class GetBreakpointResponse
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class GitSourceContext
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class ListActiveBreakpointsResponse
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class ListBreakpointsResponse
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class ListDebuggeesResponse
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class ProjectRepoId
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class RegisterDebuggeeRequest
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class RegisterDebuggeeResponse
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class RepoId
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class SetBreakpointResponse
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class SourceContext
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class SourceLocation
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class StackFrame
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class StatusMessage
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class UpdateActiveBreakpointRequest
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class UpdateActiveBreakpointResponse
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class Variable
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class AliasContext
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :kind, as: 'kind'
property :name, as: 'name'
end
end
class Breakpoint
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :action, as: 'action'
property :condition, as: 'condition'
property :create_time, as: 'createTime'
collection :evaluated_expressions, as: 'evaluatedExpressions', class: Google::Apis::ClouddebuggerV2::Variable, decorator: Google::Apis::ClouddebuggerV2::Variable::Representation
collection :expressions, as: 'expressions'
property :final_time, as: 'finalTime'
property :id, as: 'id'
property :is_final_state, as: 'isFinalState'
hash :labels, as: 'labels'
property :location, as: 'location', class: Google::Apis::ClouddebuggerV2::SourceLocation, decorator: Google::Apis::ClouddebuggerV2::SourceLocation::Representation
property :log_level, as: 'logLevel'
property :log_message_format, as: 'logMessageFormat'
collection :stack_frames, as: 'stackFrames', class: Google::Apis::ClouddebuggerV2::StackFrame, decorator: Google::Apis::ClouddebuggerV2::StackFrame::Representation
property :status, as: 'status', class: Google::Apis::ClouddebuggerV2::StatusMessage, decorator: Google::Apis::ClouddebuggerV2::StatusMessage::Representation
property :user_email, as: 'userEmail'
collection :variable_table, as: 'variableTable', class: Google::Apis::ClouddebuggerV2::Variable, decorator: Google::Apis::ClouddebuggerV2::Variable::Representation
end
end
class CloudRepoSourceContext
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :alias_context, as: 'aliasContext', class: Google::Apis::ClouddebuggerV2::AliasContext, decorator: Google::Apis::ClouddebuggerV2::AliasContext::Representation
property :alias_name, as: 'aliasName'
property :repo_id, as: 'repoId', class: Google::Apis::ClouddebuggerV2::RepoId, decorator: Google::Apis::ClouddebuggerV2::RepoId::Representation
property :revision_id, as: 'revisionId'
end
end
class CloudWorkspaceId
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :name, as: 'name'
property :repo_id, as: 'repoId', class: Google::Apis::ClouddebuggerV2::RepoId, decorator: Google::Apis::ClouddebuggerV2::RepoId::Representation
end
end
class CloudWorkspaceSourceContext
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :snapshot_id, as: 'snapshotId'
property :workspace_id, as: 'workspaceId', class: Google::Apis::ClouddebuggerV2::CloudWorkspaceId, decorator: Google::Apis::ClouddebuggerV2::CloudWorkspaceId::Representation
end
end
class Debuggee
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :agent_version, as: 'agentVersion'
property :description, as: 'description'
collection :ext_source_contexts, as: 'extSourceContexts', class: Google::Apis::ClouddebuggerV2::ExtendedSourceContext, decorator: Google::Apis::ClouddebuggerV2::ExtendedSourceContext::Representation
property :id, as: 'id'
property :is_disabled, as: 'isDisabled'
property :is_inactive, as: 'isInactive'
hash :labels, as: 'labels'
property :project, as: 'project'
collection :source_contexts, as: 'sourceContexts', class: Google::Apis::ClouddebuggerV2::SourceContext, decorator: Google::Apis::ClouddebuggerV2::SourceContext::Representation
property :status, as: 'status', class: Google::Apis::ClouddebuggerV2::StatusMessage, decorator: Google::Apis::ClouddebuggerV2::StatusMessage::Representation
property :uniquifier, as: 'uniquifier'
end
end
class Empty
# @private
class Representation < Google::Apis::Core::JsonRepresentation
end
end
class ExtendedSourceContext
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :context, as: 'context', class: Google::Apis::ClouddebuggerV2::SourceContext, decorator: Google::Apis::ClouddebuggerV2::SourceContext::Representation
hash :labels, as: 'labels'
end
end
class FormatMessage
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :format, as: 'format'
collection :parameters, as: 'parameters'
end
end
class GerritSourceContext
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :alias_context, as: 'aliasContext', class: Google::Apis::ClouddebuggerV2::AliasContext, decorator: Google::Apis::ClouddebuggerV2::AliasContext::Representation
property :alias_name, as: 'aliasName'
property :gerrit_project, as: 'gerritProject'
property :host_uri, as: 'hostUri'
property :revision_id, as: 'revisionId'
end
end
class GetBreakpointResponse
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :breakpoint, as: 'breakpoint', class: Google::Apis::ClouddebuggerV2::Breakpoint, decorator: Google::Apis::ClouddebuggerV2::Breakpoint::Representation
end
end
class GitSourceContext
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :revision_id, as: 'revisionId'
property :url, as: 'url'
end
end
class ListActiveBreakpointsResponse
# @private
class Representation < Google::Apis::Core::JsonRepresentation
collection :breakpoints, as: 'breakpoints', class: Google::Apis::ClouddebuggerV2::Breakpoint, decorator: Google::Apis::ClouddebuggerV2::Breakpoint::Representation
property :next_wait_token, as: 'nextWaitToken'
property :wait_expired, as: 'waitExpired'
end
end
class ListBreakpointsResponse
# @private
class Representation < Google::Apis::Core::JsonRepresentation
collection :breakpoints, as: 'breakpoints', class: Google::Apis::ClouddebuggerV2::Breakpoint, decorator: Google::Apis::ClouddebuggerV2::Breakpoint::Representation
property :next_wait_token, as: 'nextWaitToken'
end
end
class ListDebuggeesResponse
# @private
class Representation < Google::Apis::Core::JsonRepresentation
collection :debuggees, as: 'debuggees', class: Google::Apis::ClouddebuggerV2::Debuggee, decorator: Google::Apis::ClouddebuggerV2::Debuggee::Representation
end
end
class ProjectRepoId
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :project_id, as: 'projectId'
property :repo_name, as: 'repoName'
end
end
class RegisterDebuggeeRequest
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :debuggee, as: 'debuggee', class: Google::Apis::ClouddebuggerV2::Debuggee, decorator: Google::Apis::ClouddebuggerV2::Debuggee::Representation
end
end
class RegisterDebuggeeResponse
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :debuggee, as: 'debuggee', class: Google::Apis::ClouddebuggerV2::Debuggee, decorator: Google::Apis::ClouddebuggerV2::Debuggee::Representation
end
end
class RepoId
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :project_repo_id, as: 'projectRepoId', class: Google::Apis::ClouddebuggerV2::ProjectRepoId, decorator: Google::Apis::ClouddebuggerV2::ProjectRepoId::Representation
property :uid, as: 'uid'
end
end
class SetBreakpointResponse
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :breakpoint, as: 'breakpoint', class: Google::Apis::ClouddebuggerV2::Breakpoint, decorator: Google::Apis::ClouddebuggerV2::Breakpoint::Representation
end
end
class SourceContext
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :cloud_repo, as: 'cloudRepo', class: Google::Apis::ClouddebuggerV2::CloudRepoSourceContext, decorator: Google::Apis::ClouddebuggerV2::CloudRepoSourceContext::Representation
property :cloud_workspace, as: 'cloudWorkspace', class: Google::Apis::ClouddebuggerV2::CloudWorkspaceSourceContext, decorator: Google::Apis::ClouddebuggerV2::CloudWorkspaceSourceContext::Representation
property :gerrit, as: 'gerrit', class: Google::Apis::ClouddebuggerV2::GerritSourceContext, decorator: Google::Apis::ClouddebuggerV2::GerritSourceContext::Representation
property :git, as: 'git', class: Google::Apis::ClouddebuggerV2::GitSourceContext, decorator: Google::Apis::ClouddebuggerV2::GitSourceContext::Representation
end
end
class SourceLocation
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :line, as: 'line'
property :path, as: 'path'
end
end
class StackFrame
# @private
class Representation < Google::Apis::Core::JsonRepresentation
collection :arguments, as: 'arguments', class: Google::Apis::ClouddebuggerV2::Variable, decorator: Google::Apis::ClouddebuggerV2::Variable::Representation
property :function, as: 'function'
collection :locals, as: 'locals', class: Google::Apis::ClouddebuggerV2::Variable, decorator: Google::Apis::ClouddebuggerV2::Variable::Representation
property :location, as: 'location', class: Google::Apis::ClouddebuggerV2::SourceLocation, decorator: Google::Apis::ClouddebuggerV2::SourceLocation::Representation
end
end
class StatusMessage
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :description, as: 'description', class: Google::Apis::ClouddebuggerV2::FormatMessage, decorator: Google::Apis::ClouddebuggerV2::FormatMessage::Representation
property :is_error, as: 'isError'
property :refers_to, as: 'refersTo'
end
end
class UpdateActiveBreakpointRequest
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :breakpoint, as: 'breakpoint', class: Google::Apis::ClouddebuggerV2::Breakpoint, decorator: Google::Apis::ClouddebuggerV2::Breakpoint::Representation
end
end
class UpdateActiveBreakpointResponse
# @private
class Representation < Google::Apis::Core::JsonRepresentation
end
end
class Variable
# @private
class Representation < Google::Apis::Core::JsonRepresentation
collection :members, as: 'members', class: Google::Apis::ClouddebuggerV2::Variable, decorator: Google::Apis::ClouddebuggerV2::Variable::Representation
property :name, as: 'name'
property :status, as: 'status', class: Google::Apis::ClouddebuggerV2::StatusMessage, decorator: Google::Apis::ClouddebuggerV2::StatusMessage::Representation
property :type, as: 'type'
property :value, as: 'value'
property :var_table_index, as: 'varTableIndex'
end
end
end
end
end
|
class SessionManager: SessionManagerProtocol {
static let shared = SessionManager()
private var onLoadStateChange: ((Bool) -> Void)?
private var sessionManager: SessionManagerProtocol
init(sessionManager: SessionManagerProtocol = SessionManager.shared) {
self.sessionManager = sessionManager
super.init()
}
var isLoading: Bool = true {
didSet {
self.onLoadStateChange?(isLoading)
}
}
func loadCurrentUser(withSuccessClosure success: @escaping () -> Void) {
// Simulate loading the current user's data
DispatchQueue.global().async {
// Simulate a delay for loading
Thread.sleep(forTimeInterval: 2)
DispatchQueue.main.async {
success() // Invoke the success closure when loading is complete
}
}
}
func load() {
self.isLoading = true
self.sessionManager.loadCurrentUser(withSuccessClosure: {
self.isLoading = false
// Add any additional logic here
})
}
// Add any additional properties or methods here
} |
import { StepInfo } from "@spikedpunch/forge"
import { RequestEntry } from "./Plugin"
import { toRestVerb } from "./Utils"
export class RestOptions {
url: string | undefined
json: boolean = false
debug: boolean= false
requests: RequestEntry[] = new Array<RequestEntry>()
gotOptions: any = {}
static fromStep(
url: string | undefined,
info: StepInfo,
gotOptions: any,
): RestOptions {
let options = new RestOptions()
options.url = url == null ? undefined : url
options.json = info.json || false
options.debug = info.debug || false
if(info.verb || info.path) {
//@ts-ignore
validateRequestEntry(info)
options.requests.push({
verb: toRestVerb(info.verb),
path: info.path
})
}
if(info.requests) {
if(!Array.isArray(info.requests)) {
throw new Error(`The 'requests' field in a Rest step is expected to be an Array, but instead got ${typeof info.requests}`)
}
for(let req of info.requests) {
validateRequestEntry(req)
}
options.requests = info.requests
}
gotOptions = gotOptions || {}
options.gotOptions = options.gotOptions || {}
options.gotOptions = {
...gotOptions,
...options.gotOptions
}
return options
}
}
function validateRequestEntry(entry: RequestEntry): void {
if(entry.verb == null) {
throw new Error(`Missing the 'verb' field. The 'verb' field in a Rest step must exist if a 'path' is provided.`)
}
if(entry.path == null) {
throw new Error(`Missing the 'path' field. The 'path' field in a Rest step must exist if a 'verb' is provided.`)
}
if(typeof entry.verb !== 'string') {
throw new Error(`The 'verb' field in a Rest Step is incorrect. Expecting a 'string', but instead got ${typeof entry.verb}`)
}
if(typeof entry.path !== 'string') {
throw new Error(`The 'path' field in a Rest Step is incorrect. Expecting a 'string', but instead got ${typeof entry.path}`)
}
} |
# 1. Setup github ssh key
# git clone https://gitee.com/icyzeroice/ssh-save.git .ssh
# 2. Setup Tmux
# https://github.com/gpakosz/.tmux
cd ~
git clone https://github.com/gpakosz/.tmux.git
ln -s -f .tmux/.tmux.conf
cp .tmux/.tmux.conf.local .
# 3. Setup Windows Terminal starting directory
# https://superuser.com/questions/1185033/what-is-the-home-directory-on-windows-subsystem-for-linux
# \\wsl$\Ubuntu\home\hyouka
# 4. Setup Chinese
# https://docs.microsoft.com/en-us/windows/wsl/faq#how-do-i-change-the-display-language-of-wsl-
# 5. Spacemacs
cd ~
# cc is needed for emacSQL
# https://www.reddit.com/r/emacs/comments/adlmh2/emacswindows_emacsqlsqliteensurebinary_no_emacsql/
sudo apt install clang gcc g++
sudo apt install sqlite
git clone -b develop https://github.com/syl20bnr/spacemacs ~/.emacs.d
git clone git@github.com:icyzeroice/.spacemacs.d.git
# 6. WSLG Chinese
# 这个我在没安装中文字体的时候试了,不太行,会绿屏:https://blog.csdn.net/defrag257/article/details/104029924
# 这个我在安装中文字体后试了,可以:https://blog.csdn.net/xie1xiao1jun/article/details/54377946
# End the file
|
"""
A bot to play the game of Battleships
"""
import random
class Bot:
def __init__(self):
self.shots_fired = 0
def make_attack(self, board):
rand_row = random.randint(0, len(board)-1)
rand_col = random.randint(0, len(board[0])-1)
while board[rand_row][rand_col] != 0:
rand_row = random.randint(0, len(board)-1)
rand_col = random.randint(0, len(board[0])-1)
self.shots_fired += 1
board[rand_row][rand_col] = 'X'
def get_shots_fired(self):
return self.shots_fired |
import numpy as np
from sklearn.grid_search import GridSearchCV
def grid_estimation(model, data, target):
# prepare a range of alpha values to test
gammas = np.array([1, 0.1, 0.01, 0.001, 0.0001])
cs = np.array([1, 10, 100, 1000])
# create and fit a ridge regression model, testing each alpha
grid = GridSearchCV(estimator=model, param_grid=dict(gamma=gammas, C=cs))
grid.fit(data, target)
return grid.best_score_, grid.best_estimator_.gamma, grid.best_estimator_.C
|
# install pip
sudo apt-get install python-pip python-dev
# sudo apt-get install python3-pip
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple pip -U
pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
## 升级后报错 修改如下:
ImportError: cannot import name main when running pip
>nano /usr/bin/pip
```
from pip import __main__
if __name__ == '__main__':
sys.exit(__main__._main())
```
## centos
yum -y install epel-release
yum -y install python-pip
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple pip -U
pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
|
<gh_stars>0
package mg.finance.conrtoller;
import lombok.AllArgsConstructor;
import mg.finance.dto.CryptoMarketDto;
import mg.finance.service.crypto.CryptoMarketService;
import org.springframework.web.bind.annotation.CrossOrigin;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
@RestController
@CrossOrigin("http://localhost:3000")
@RequestMapping("/api/v1/finance/crypto/currencies/market")
@AllArgsConstructor
public class CryptoCurrencyMarketRestController {
private final CryptoMarketService cryptoMarketService;
@GetMapping
public CryptoMarketDto getCryptoMarketInfo() {
return cryptoMarketService.getCryptoMarketInfo();
}
}
|
import {MapLayer, Point} from '../../interfaces/cross-code-map';
import * as Phaser from 'phaser-ce';
import {Sortable} from '../../interfaces/sortable';
import {Helper} from '../helper';
import {Globals} from '../../globals';
export class CCMapLayer extends Phaser.Image implements Sortable {
public details: MapLayer;
public backgroundColor: { r: number, g: number, b: number, a: number };
private bitmap: Phaser.BitmapData;
private tilesetImage: Phaser.Image;
private tileCrop: Phaser.Rectangle;
private tilesetSize: Point;
zIndex: number;
constructor(game: Phaser.Game, details: MapLayer) {
super(game, 0, 0, '');
// this.backgroundColor = {r: 255, g: 128, b: 0, a: 1};
if (typeof details.level === 'string') {
// possible levels
// 'first'
// 'last'
// 'light'
// 'postlight'
// 'object1'
// 'object2'
// 'object3'
if (!isNaN(<any>details.level)) {
details.level = parseInt(details.level, 10);
} else {
details.levelName = details.level;
if (details.level.startsWith('first')) {
details.level = 0;
} else {
// TODO: get actual max level;
details.level = 10;
}
}
}
if (typeof details.distance === 'string') {
details.distance = parseFloat(details.distance);
}
this.details = details;
this.bitmap = game.make.bitmapData(details.width * details.tilesize, details.height * details.tilesize);
this.loadTexture(this.bitmap);
game.add.existing(this);
if (details.tilesetName) {
this.tilesetImage = game.make.image(0, 0, details.tilesetName);
this.tilesetSize = Helper.getTilesetSize(game.cache.getImage(details.tilesetName));
this.tileCrop = new Phaser.Rectangle(0, 0, Globals.TILE_SIZE, Globals.TILE_SIZE);
this.tilesetImage.crop(this.tileCrop);
}
const skip = 'Navigation Collision HeightMap'.split(' ');
// const skip = 'Navigation Background HeightMap'.split(' ');
skip.forEach(type => {
if (type === details.type) {
this.visible = false;
}
});
this.zIndex = this.details.level * 10;
if (isNaN(this.zIndex)) {
this.zIndex = 999;
}
// this.visible = false;
this.renderAll();
}
renderAll() {
const bitmap = this.bitmap;
const tileset = this.tilesetImage;
const details = this.details;
const tileSize = details.tilesize;
bitmap.clear();
if (this.backgroundColor) {
const bg = this.backgroundColor;
bitmap.fill(bg.r, bg.g, bg.b, bg.a);
}
for (let y = 0; y < details.data.length; y++) {
for (let x = 0; x < details.data[y].length; x++) {
const tile = details.data[y][x];
if (tile === 0) {
continue;
}
this.makeTile(tile);
bitmap.draw(tileset, x * tileSize, y * tileSize, tileSize, tileSize);
}
}
}
// checks bounds before drawing
updateTileChecked(x: number, y: number, tile: number) {
if (x >= 0 && x < this.details.data[0].length) {
if (y >= 0 && y < this.details.data.length) {
this.details.data[y][x] = tile;
}
}
}
drawTile(x: number, y: number, tile: number) {
const bitmap = this.bitmap;
const tileset = this.tilesetImage;
const details = this.details;
const tileSize = details.tilesize;
const oldTile = details.data[y][x];
if (oldTile === tile) {
return;
}
details.data[y][x] = tile;
const tileX = x * tileSize;
const tileY = y * tileSize;
bitmap.clear(tileX, tileY, tileSize, tileSize);
if (tile !== 0) {
this.makeTile(tile);
bitmap.draw(tileset, tileX, tileY);
}
}
makeTile(index: number) {
const tilesize = this.details.tilesize;
const crop = this.tileCrop;
const p = Helper.getTilePos(this.tilesetSize, index);
crop.x = p.x * tilesize;
crop.y = p.y * tilesize;
this.tilesetImage.updateCrop();
}
getTile(x: number, y: number) {
let index = x + 1;
index += y * this.tilesetSize.x;
return index;
}
clear() {
this.bitmap.clear();
this.details.data.forEach(arr => arr.fill(0));
}
destroy() {
if (this.bitmap) {
this.bitmap.destroy();
}
if (this.tilesetImage) {
this.tilesetImage.destroy();
}
super.destroy();
}
resize(width: number, height: number, skipRender = false) {
const data = this.details.data;
data.length = height;
for (let i = 0; i < data.length; i++) {
if (!data[i]) {
data[i] = new Array(width).fill(0);
} else {
if (width < this.details.width) {
data[i].length = width;
} else {
while (data[i].length < width) {
data[i].push(0);
}
}
}
}
this.details.width = width;
this.details.height = height;
this.bitmap.resize(width * Globals.TILE_SIZE, height * Globals.TILE_SIZE);
if (!skipRender) {
this.renderAll();
}
}
offsetLayer(offset: Point, borderTiles = false, skipRender = false) {
const data = this.details.data;
const newData: number[][] = JSON.parse(JSON.stringify(data));
for (let y = 0; y < data.length; y++) {
for (let x = 0; x < data[y].length; x++) {
let newTile = 0;
let row = data[y - offset.y];
if (!row && borderTiles) {
row = offset.y > 0 ? data[0] : data[data.length - 1];
}
if (row) {
newTile = row[x - offset.x];
if (borderTiles && newTile === undefined) {
newTile = offset.x > 0 ? row[0] : row[row.length - 1];
}
}
newData[y][x] = newTile || 0;
}
}
this.details.data = newData;
if (!skipRender) {
this.renderAll();
}
}
fill(newTile: number, p: Point) {
const data = this.details.data;
const prev = data[p.y][p.x];
if (newTile === prev) {
return;
}
let toCheck: Point[] = [p];
while (toCheck.length > 0) {
const currP = toCheck.pop();
const tile = data[currP.y][currP.x];
if (tile === prev) {
data[currP.y][currP.x] = newTile;
toCheck = toCheck.concat(this.getNeighbours(currP));
}
}
this.renderAll();
}
private getNeighbours(p: Point): Point[] {
const out: Point[] = [];
if (p.x > 0) {
out.push({x: p.x - 1, y: p.y});
}
if (p.x < this.details.width - 1) {
out.push({x: p.x + 1, y: p.y});
}
if (p.y > 0) {
out.push({x: p.x, y: p.y - 1});
}
if (p.y < this.details.height - 1) {
out.push({x: p.x, y: p.y + 1});
}
return out;
}
exportLayer() {
const out: MapLayer = Object.assign({}, this.details);
if (out.levelName) {
out.level = out.levelName;
out.levelName = undefined;
}
return out;
}
}
|
#!/bin/bash --login
[[ -s "$HOME/.rvm/scripts/rvm" ]] && . "$HOME/.rvm/scripts/rvm" # This loads RVM
source .rvmrc
bundle install
COVERAGE=on bundle exec rake ci:setup:rspec spec
|
function isDivisibleByThree($number){
return ($number % 3 == 0);
} |
#ifndef __SCENE_HPP__
#define __SCENE_HPP__
#include <vector>
#include <set>
#include <unordered_map>
#include "glm.hpp"
#include "primitives.hpp"
#include "texture.hpp"
struct UncompressedKdNode;
struct CompressedKdNode;
class aiScene;
class aiNode;
class aiMesh;
class aiMaterial;
class Scene{
public:
Scene() {};
~Scene();
// Copying is forbidden
Scene(const Scene&) = delete;
Scene& operator=(const Scene&) = delete;
void LoadAiNode(const aiScene* scene, const aiNode* ainode, glm::mat4 transform, std::string force_mat = "");
void LoadAiMesh(const aiScene* scene, const aiMesh* mesh, glm::mat4 transform, std::string force_mat = "");
void LoadAiSceneMeshes(const aiScene* scene, glm::mat4 transform, std::string force_mat = "");
void LoadAiSceneMaterials(const aiScene* scene, std::string default_brdf, std::string working_directory, bool override_materials = false);
void RegisterMaterial(std::shared_ptr<Material> material, bool override = false);
void AddPrimitive(const primitive_data& primitive, glm::mat4 transform, std::string material, glm::mat3 texture_transform);
// Makes a set of thin glass material references that match any of given substrings.
void MakeThinglassSet(std::vector<std::string>);
// Copies the data from load buffers to optimized, contignous structures.
void Commit();
// Compresses the kd-tree. Called automatically by Commit()
void Compress();
// Prints the entire buffer to stdout.
void Dump() const;
// Searches for the nearest intersection in the diretion specified by ray.
Intersection FindIntersectKd (const Ray& r)
__restrict__ const __attribute__((hot));
// Searches for any interection (not necessarily nearest). Slightly faster than FindIntersectKd.
const Triangle* FindIntersectKdAny(const Ray& r)
__restrict__ const __attribute__((hot));
// Searches for the nearest intersection, but ignores all intersections with the specified ignored triangle.
Intersection FindIntersectKdOtherThan(const Ray& r, const Triangle* ignored)
__restrict__ const __attribute__((hot));
// Searches for the nearest intersection, but ignores both the specified ignored triangle, as well as all triangles
// that use material specified in thinglass set. However, such materials are gathered into a list (ordered) in the
// returned value. Useful for simulating thin colored glass.
Intersection FindIntersectKdOtherThanWithThinglass(const Ray& r, const Triangle* ignored)
__restrict__ const __attribute__((hot));
// Returns true IFF the two points are visible from each other.
// Incorporates no cache of any kind.
bool Visibility(glm::vec3 a, glm::vec3 b) __restrict__ const __attribute__((hot));
bool VisibilityWithThinglass(glm::vec3 a, glm::vec3 b,
/*out*/ ThinglassIsections&)
__restrict__ const __attribute__((hot));
// Loads a texture from file, or returns a (scene-locally) cached version
std::shared_ptr<ReadableTexture> GetTexture(std::string path);
// Adds a new solid color texture owned by the scene, and returns a pointer to it
std::shared_ptr<ReadableTexture> CreateSolidTexture(Color c);
// TODO: have triangle access these, and keep these fields private
glm::vec3* vertices = nullptr;
unsigned int n_vertices = 0;
Triangle* triangles = nullptr;
unsigned int n_triangles = 0;
glm::vec3* normals = nullptr;
unsigned int n_normals = 0;
glm::vec3* tangents = nullptr;
unsigned int n_tangents = 0;
glm::vec2* texcoords = nullptr;
unsigned int n_texcoords = 0;
// Point lights
std::vector<Light> pointlights;
void AddPointLight(Light);
// Areal lights
struct ArealLight{
// TODO: Rember to sort this (descending order)
std::vector<std::pair<float,unsigned int>> triangles_with_areas;
mutable float total_area = 0.0f;
Radiance emission;
float power = 0.0f;
Light GetRandomLight(const Scene& parent, float light_sample, glm::vec2 triangle_sample, bool debug) const;
};
float total_areal_power;
float total_point_power;
std::vector<std::pair<float,ArealLight>> areal_lights;
Light GetRandomLight(glm::vec2 choice_sample, float light_sample, glm::vec2 triangle_sample, bool debug) const;
// Indexed by triangles.
std::vector<float> xevents;
std::vector<float> yevents;
std::vector<float> zevents;
// The bounding box for the entire scene.
std::pair<float,float> xBB;
std::pair<float,float> yBB;
std::pair<float,float> zBB;
// Dynamically determined by examining scene's diameter
float epsilon = 0.0001f;
void SetSkyboxColor(Color c, float intensity){
skybox_mode = SimpleRadiance;
skybox_color = c;
skybox_intensity = intensity;
}
void SetSkyboxEnvmap(std::string path, float intensity, float rotate){
skybox_mode = Envmap;
skybox_texture = GetTexture(path);
skybox_intensity = intensity;
skybox_rotate = rotate;
}
Radiance GetSkyboxRay(glm::vec3 direction, bool debug=false) const;
std::set<std::shared_ptr<const Material>> thinglass;
std::shared_ptr<Material> GetMaterialByName(std::string name) const;
private:
UncompressedKdNode* uncompressed_root = nullptr;
CompressedKdNode* compressed_array = nullptr;
unsigned int compressed_array_size = 0;
unsigned int* compressed_triangles = nullptr;
unsigned int compressed_triangles_size = 0;
void CompressRec(const UncompressedKdNode* node, unsigned int& array_pos, unsigned int& triangle_pos);
mutable std::vector<glm::vec3> vertices_buffer;
mutable std::vector<Triangle> triangles_buffer;
mutable std::vector<glm::vec3> normals_buffer;
mutable std::vector<glm::vec3> tangents_buffer;
mutable std::vector<glm::vec2> texcoords_buffer;
std::vector<std::shared_ptr<Material>> materials; //TODO: This vector is redundant, the map below is enough
std::unordered_map<std::string, std::shared_ptr<Material>> materials_by_name;
// TODO: Material* default_material;
// This map is the owner of all file-based textures in this scene
std::unordered_map<std::string, std::shared_ptr<ReadableTexture>> textures;
// This vector owns all solid-color textures in this scene.
std::vector<std::shared_ptr<ReadableTexture>> aux_textures;
enum SkyboxMode : int{
SimpleRadiance,
Envmap,
};
SkyboxMode skybox_mode;
Color skybox_color;
std::shared_ptr<ReadableTexture> skybox_texture;
float skybox_intensity;
float skybox_rotate;
void FreeBuffers();
void FreeTextures();
void FreeMaterials();
void FreeCompressedTree();
};
#define EMPTY_BONUS 0.5f
#define ISECT_COST 80.0f
#define TRAV_COST 2.0f
struct UncompressedKdNode{
const Scene* parent_scene;
enum {LEAF, INTERNAL} type = LEAF;
unsigned int depth = 0;
std::pair<float,float> xBB;
std::pair<float,float> yBB;
std::pair<float,float> zBB;
std::vector<unsigned int> triangle_indices;
void Subdivide(unsigned int max_depth);
UncompressedKdNode* ch0 = nullptr;
UncompressedKdNode* ch1 = nullptr;
float prob0, prob1;
int dups = 0;
// Total triangles / leaf nodes / total nodes / total dups
std::tuple<int, int, int, int> GetTotals() const;
void FreeRecursivelly();
float GetCost() const;
int split_axis;
float split_pos;
};
struct CompressedKdNode{
inline bool IsLeaf() const {return (kind & 0x03) == 0x03;}
inline short GetSplitAxis() const {return kind & 0x03;}
inline float GetSplitPlane() const {return split_plane;}
inline uint32_t GetTrianglesN() const {return triangles_num >> 2;}
inline uint32_t GetFirstTrianglePos() const {return triangles_start;}
inline uint32_t GetOtherChildIndex() const {return other_child >> 2;}
// Default constructor;
CompressedKdNode() {}
// Constructor for internal nodes
CompressedKdNode(short axis, float split) {
kind = axis;
split_plane = split;
}
// Constructor for leaf nodes
CompressedKdNode(unsigned int num, unsigned int start){
triangles_num = (num << 2) | 0x03;
triangles_start = start;
}
// Once the other child is placed, it's position has to be set in parent node
inline void SetOtherChild(unsigned int pos){
other_child = (other_child & 0x03) | (pos << 2);
}
private:
union{
float split_plane; // For internal nodes
uint32_t triangles_start; // For leaf nodes
};
union{
// For internal nodes (shifted right 2 bits). One child is
// just after this stuct in memory layout, other is at this
// location.
uint32_t other_child;
// For leaf nodes (shifter right 2 bits).
uint32_t triangles_num;
// For any kind of node, 2 LSB.
uint32_t kind;
};
} __attribute__((packed,aligned(4)));
#endif //__SCENE_HPP__
|
<reponame>milancermak/beam
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package graphx
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/apache/beam/sdks/go/pkg/beam/core/funcx"
"github.com/apache/beam/sdks/go/pkg/beam/core/graph"
"github.com/apache/beam/sdks/go/pkg/beam/core/graph/coder"
"github.com/apache/beam/sdks/go/pkg/beam/core/graph/window"
"github.com/apache/beam/sdks/go/pkg/beam/core/runtime"
v1pb "github.com/apache/beam/sdks/go/pkg/beam/core/runtime/graphx/v1"
"github.com/apache/beam/sdks/go/pkg/beam/core/typex"
"github.com/apache/beam/sdks/go/pkg/beam/core/util/jsonx"
"github.com/apache/beam/sdks/go/pkg/beam/core/util/reflectx"
"github.com/apache/beam/sdks/go/pkg/beam/internal/errors"
)
var genFnType = reflect.TypeOf((*func(string, reflect.Type, []byte) reflectx.Func)(nil)).Elem()
// EncodeMultiEdge converts the preprocessed representation into the wire
// representation of the multiedge, capturing input and output type information.
func EncodeMultiEdge(edge *graph.MultiEdge) (*v1pb.MultiEdge, error) {
ret := &v1pb.MultiEdge{}
ret.Opcode = string(edge.Op)
if edge.DoFn != nil {
ref, err := encodeFn((*graph.Fn)(edge.DoFn))
if err != nil {
wrapped := errors.Wrap(err, "bad userfn")
return nil, errors.WithContextf(wrapped, "encoding userfn %v", edge)
}
ret.Fn = ref
}
if edge.CombineFn != nil {
ref, err := encodeFn((*graph.Fn)(edge.CombineFn))
if err != nil {
wrapped := errors.Wrap(err, "bad combinefn")
return nil, errors.WithContextf(wrapped, "encoding userfn %v", edge)
}
ret.Fn = ref
}
if edge.WindowFn != nil {
ret.WindowFn = encodeWindowFn(edge.WindowFn)
}
for _, in := range edge.Input {
kind := encodeInputKind(in.Kind)
t, err := encodeFullType(in.Type)
if err != nil {
wrapped := errors.Wrap(err, "bad input type")
return nil, errors.WithContextf(wrapped, "encoding userfn %v", edge)
}
ret.Inbound = append(ret.Inbound, &v1pb.MultiEdge_Inbound{Kind: kind, Type: t})
}
for _, out := range edge.Output {
t, err := encodeFullType(out.Type)
if err != nil {
wrapped := errors.Wrap(err, "bad output type")
return nil, errors.WithContextf(wrapped, "encoding userfn %v", edge)
}
ret.Outbound = append(ret.Outbound, &v1pb.MultiEdge_Outbound{Type: t})
}
return ret, nil
}
// DecodeMultiEdge converts the wire representation into the preprocessed
// components representing that edge. We deserialize to components to avoid
// inserting the edge into a graph or creating a detached edge.
func DecodeMultiEdge(edge *v1pb.MultiEdge) (graph.Opcode, *graph.Fn, *window.Fn, []*graph.Inbound, []*graph.Outbound, error) {
var u *graph.Fn
var wfn *window.Fn
var inbound []*graph.Inbound
var outbound []*graph.Outbound
opcode := graph.Opcode(edge.Opcode)
if edge.Fn != nil {
var err error
u, err = decodeFn(edge.Fn)
if err != nil {
wrapped := errors.Wrap(err, "bad function")
return "", nil, nil, nil, nil, errors.WithContextf(wrapped, "decoding userfn %v", edge)
}
}
if edge.WindowFn != nil {
wfn = decodeWindowFn(edge.WindowFn)
}
for _, in := range edge.Inbound {
kind, err := decodeInputKind(in.Kind)
if err != nil {
wrapped := errors.Wrap(err, "bad input kind")
return "", nil, nil, nil, nil, errors.WithContextf(wrapped, "decoding userfn %v", edge)
}
t, err := decodeFullType(in.Type)
if err != nil {
wrapped := errors.Wrap(err, "bad input type")
return "", nil, nil, nil, nil, errors.WithContextf(wrapped, "decoding userfn %v", edge)
}
inbound = append(inbound, &graph.Inbound{Kind: kind, Type: t})
}
for _, out := range edge.Outbound {
t, err := decodeFullType(out.Type)
if err != nil {
wrapped := errors.Wrap(err, "bad output type")
return "", nil, nil, nil, nil, errors.WithContextf(wrapped, "decoding userfn %v", edge)
}
outbound = append(outbound, &graph.Outbound{Type: t})
}
return opcode, u, wfn, inbound, outbound, nil
}
func encodeCustomCoder(c *coder.CustomCoder) (*v1pb.CustomCoder, error) {
t, err := encodeType(c.Type)
if err != nil {
return nil, errors.WithContextf(err, "encoding custom coder %v for type %v", c, c.Type)
}
enc, err := encodeUserFn(c.Enc)
if err != nil {
wrapped := errors.Wrap(err, "bad encoding function")
return nil, errors.WithContextf(wrapped, "encoding custom coder %v", c)
}
dec, err := encodeUserFn(c.Dec)
if err != nil {
wrapped := errors.Wrap(err, "bad decoding function")
return nil, errors.WithContextf(wrapped, "encoding custom coder %v", c)
}
ret := &v1pb.CustomCoder{
Name: c.Name,
Type: t,
Enc: enc,
Dec: dec,
}
return ret, nil
}
func decodeCustomCoder(c *v1pb.CustomCoder) (*coder.CustomCoder, error) {
t, err := decodeType(c.Type)
if err != nil {
return nil, errors.WithContextf(err, "decoding custom coder %v for type %v", c, c.Type)
}
enc, err := decodeUserFn(c.Enc)
if err != nil {
wrapped := errors.Wrap(err, "bad encoding function")
return nil, errors.WithContextf(wrapped, "decoding custom coder %v", c)
}
dec, err := decodeUserFn(c.Dec)
if err != nil {
wrapped := errors.Wrap(err, "bad decoding function")
return nil, errors.WithContextf(wrapped, "decoding custom coder %v", c)
}
ret, err := coder.NewCustomCoder(c.Name, t, enc, dec)
if err != nil {
return nil, errors.WithContextf(err, "decoding custom coder %v", c)
}
return ret, nil
}
func encodeWindowFn(w *window.Fn) *v1pb.WindowFn {
return &v1pb.WindowFn{
Kind: string(w.Kind),
SizeMs: duration2ms(w.Size),
PeriodMs: duration2ms(w.Period),
GapMs: duration2ms(w.Gap),
}
}
func decodeWindowFn(w *v1pb.WindowFn) *window.Fn {
return &window.Fn{
Kind: window.Kind(w.Kind),
Size: ms2duration(w.SizeMs),
Period: ms2duration(w.PeriodMs),
Gap: ms2duration(w.GapMs),
}
}
func duration2ms(d time.Duration) int64 {
return d.Nanoseconds() / 1e6
}
func ms2duration(d int64) time.Duration {
return time.Duration(d) * time.Millisecond
}
func encodeFn(u *graph.Fn) (*v1pb.Fn, error) {
switch {
case u.DynFn != nil:
gen := reflectx.FunctionName(u.DynFn.Gen)
t, err := encodeType(u.DynFn.T)
if err != nil {
wrapped := errors.Wrap(err, "bad function type")
return nil, errors.WithContextf(wrapped, "encoding dynamic DoFn %v", u)
}
return &v1pb.Fn{Dynfn: &v1pb.DynFn{
Name: u.DynFn.Name,
Type: t,
Data: u.DynFn.Data,
Gen: gen,
}}, nil
case u.Fn != nil:
fn, err := encodeUserFn(u.Fn)
if err != nil {
wrapped := errors.Wrap(err, "bad userfn")
return nil, errors.WithContextf(wrapped, "encoding DoFn %v", u)
}
return &v1pb.Fn{Fn: fn}, nil
case u.Recv != nil:
t := reflect.TypeOf(u.Recv)
k, ok := runtime.TypeKey(reflectx.SkipPtr(t))
if !ok {
err := errors.Errorf("failed to create TypeKey for receiver type %T", u.Recv)
return nil, errors.WithContextf(err, "encoding structural DoFn %v", u)
}
if _, ok := runtime.LookupType(k); !ok {
err := errors.Errorf("receiver type %v must be registered", t)
return nil, errors.WithContextf(err, "encoding structural DoFn %v", u)
}
typ, err := encodeType(t)
if err != nil {
wrapped := errors.Wrapf(err, "failed to encode receiver type %T", u.Recv)
panic(errors.WithContextf(wrapped, "encoding structural DoFn %v", u))
}
data, err := jsonx.Marshal(u.Recv)
if err != nil {
wrapped := errors.Wrapf(err, "failed to marshal receiver %v", u.Recv)
return nil, errors.WithContextf(wrapped, "encoding structural DoFn %v", u)
}
return &v1pb.Fn{Type: typ, Opt: string(data)}, nil
default:
panic(fmt.Sprintf("Failed to encode DoFn %v, missing fn", u))
}
}
func decodeFn(u *v1pb.Fn) (*graph.Fn, error) {
if u.Dynfn != nil {
gen, err := runtime.ResolveFunction(u.Dynfn.Gen, genFnType)
if err != nil {
wrapped := errors.Wrapf(err, "bad symbol %v", u.Dynfn.Gen)
return nil, errors.WithContextf(wrapped, "decoding dynamic DoFn %v", u)
}
t, err := decodeType(u.Dynfn.Type)
if err != nil {
wrapped := errors.Wrap(err, "bad type")
return nil, errors.WithContextf(wrapped, "failed to decode dynamic DoFn %v", u)
}
return graph.NewFn(&graph.DynFn{
Name: u.Dynfn.Name,
T: t,
Data: u.Dynfn.Data,
Gen: gen.(func(string, reflect.Type, []byte) reflectx.Func),
})
}
if u.Fn != nil {
fn, err := decodeUserFn(u.Fn)
if err != nil {
wrapped := errors.Wrap(err, "failed to decode userfn")
return nil, errors.WithContextf(wrapped, "decoding DoFn %v", u)
}
fx, err := funcx.New(reflectx.MakeFunc(fn))
if err != nil {
wrapped := errors.Wrap(err, "failed to construct userfn")
return nil, errors.WithContextf(wrapped, "decoding DoFn %v", u)
}
return &graph.Fn{Fn: fx}, nil
}
t, err := decodeType(u.Type)
if err != nil {
wrapped := errors.Wrap(err, "bad type")
return nil, errors.WithContextf(wrapped, "decoding structural DoFn %v", u)
}
elem := reflect.New(t)
if err := jsonx.UnmarshalFrom(elem.Interface(), strings.NewReader(u.Opt)); err != nil {
wrapped := errors.Wrap(err, "bad struct encoding")
return nil, errors.WithContextf(wrapped, "decoding structural DoFn %v", u)
}
fn := elem.Elem().Interface()
return graph.NewFn(fn)
}
// encodeUserFn translates the preprocessed representation of a Beam user function
// into the wire representation, capturing all the inputs and outputs needed.
func encodeUserFn(u *funcx.Fn) (*v1pb.UserFn, error) {
// TODO(herohde) 5/23/2017: reject closures and dynamic functions. They can't
// be serialized.
symbol := u.Fn.Name()
t, err := encodeType(u.Fn.Type())
if err != nil {
wrapped := errors.Wrap(err, "bad function type")
return nil, errors.WithContextf(wrapped, "encoding userfn %v", u)
}
return &v1pb.UserFn{Name: symbol, Type: t}, nil
}
// decodeUserFn receives the wire representation of a Beam user function,
// extracting the preprocessed representation, expanding all inputs and outputs
// of the function.
func decodeUserFn(ref *v1pb.UserFn) (interface{}, error) {
t, err := decodeType(ref.GetType())
if err != nil {
return nil, err
}
return runtime.ResolveFunction(ref.Name, t)
}
func encodeFullType(t typex.FullType) (*v1pb.FullType, error) {
var components []*v1pb.FullType
if t.Class() == typex.Composite {
// Drop the Aggregate convenience component.
for _, comp := range t.Components() {
c, err := encodeFullType(comp)
if err != nil {
return nil, err
}
components = append(components, c)
}
}
prim, err := encodeType(t.Type())
if err != nil {
wrapped := errors.Wrap(err, "bad type")
return nil, errors.WithContextf(wrapped, "encoding full type %v", t)
}
return &v1pb.FullType{Type: prim, Components: components}, nil
}
func decodeFullType(t *v1pb.FullType) (typex.FullType, error) {
var components []typex.FullType
for _, comp := range t.Components {
c, err := decodeFullType(comp)
if err != nil {
return nil, err
}
components = append(components, c)
}
prim, err := decodeType(t.Type)
if err != nil {
wrapped := errors.Wrap(err, "bad type")
return nil, errors.WithContextf(wrapped, "decoding full type %v", t)
}
return typex.New(prim, components...), nil
}
func encodeType(t reflect.Type) (*v1pb.Type, error) {
if s, ok := tryEncodeSpecial(t); ok {
return &v1pb.Type{Kind: v1pb.Type_SPECIAL, Special: s}, nil
}
if k, ok := runtime.TypeKey(t); ok {
if _, present := runtime.LookupType(k); present {
// External type. Serialize by key and lookup in registry
// on decoding side.
return &v1pb.Type{Kind: v1pb.Type_EXTERNAL, ExternalKey: k}, nil
}
}
// The supplied type isn't special, so apply the standard encodings.
switch t.Kind() {
case reflect.Bool:
return &v1pb.Type{Kind: v1pb.Type_BOOL}, nil
case reflect.Int:
return &v1pb.Type{Kind: v1pb.Type_INT}, nil
case reflect.Int8:
return &v1pb.Type{Kind: v1pb.Type_INT8}, nil
case reflect.Int16:
return &v1pb.Type{Kind: v1pb.Type_INT16}, nil
case reflect.Int32:
return &v1pb.Type{Kind: v1pb.Type_INT32}, nil
case reflect.Int64:
return &v1pb.Type{Kind: v1pb.Type_INT64}, nil
case reflect.Uint:
return &v1pb.Type{Kind: v1pb.Type_UINT}, nil
case reflect.Uint8:
return &v1pb.Type{Kind: v1pb.Type_UINT8}, nil
case reflect.Uint16:
return &v1pb.Type{Kind: v1pb.Type_UINT16}, nil
case reflect.Uint32:
return &v1pb.Type{Kind: v1pb.Type_UINT32}, nil
case reflect.Uint64:
return &v1pb.Type{Kind: v1pb.Type_UINT64}, nil
case reflect.Float32:
return &v1pb.Type{Kind: v1pb.Type_FLOAT32}, nil
case reflect.Float64:
return &v1pb.Type{Kind: v1pb.Type_FLOAT64}, nil
case reflect.String:
return &v1pb.Type{Kind: v1pb.Type_STRING}, nil
case reflect.Slice:
elm, err := encodeType(t.Elem())
if err != nil {
wrapped := errors.Wrap(err, "bad element type")
return nil, errors.WithContextf(wrapped, "encoding slice %v", t)
}
return &v1pb.Type{Kind: v1pb.Type_SLICE, Element: elm}, nil
case reflect.Struct:
var fields []*v1pb.Type_StructField
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.PkgPath != "" {
wrapped := errors.Errorf("type has unexported field: %v", f.Name)
return nil, errors.WithContextf(wrapped, "encoding struct %v", t)
}
fType, err := encodeType(f.Type)
if err != nil {
wrapped := errors.Wrap(err, "bad field type")
return nil, errors.WithContextf(wrapped, "encoding struct %v", t)
}
field := &v1pb.Type_StructField{
Name: f.Name,
PkgPath: f.PkgPath,
Type: fType,
Tag: string(f.Tag),
Offset: int64(f.Offset),
Index: encodeInts(f.Index),
Anonymous: f.Anonymous,
}
fields = append(fields, field)
}
return &v1pb.Type{Kind: v1pb.Type_STRUCT, Fields: fields}, nil
case reflect.Func:
var in []*v1pb.Type
for i := 0; i < t.NumIn(); i++ {
param, err := encodeType(t.In(i))
if err != nil {
wrapped := errors.Wrap(err, "bad parameter type")
return nil, errors.WithContextf(wrapped, "encoding function %v", t)
}
in = append(in, param)
}
var out []*v1pb.Type
for i := 0; i < t.NumOut(); i++ {
ret, err := encodeType(t.Out(i))
if err != nil {
wrapped := errors.Wrap(err, "bad return type")
return nil, errors.WithContextf(wrapped, "encoding function %v", t)
}
out = append(out, ret)
}
return &v1pb.Type{Kind: v1pb.Type_FUNC, ParameterTypes: in, ReturnTypes: out, IsVariadic: t.IsVariadic()}, nil
case reflect.Chan:
elm, err := encodeType(t.Elem())
if err != nil {
wrapped := errors.Wrap(err, "bad element type")
return nil, errors.WithContextf(wrapped, "encoding channel %v", t)
}
dir := encodeChanDir(t.ChanDir())
return &v1pb.Type{Kind: v1pb.Type_CHAN, Element: elm, ChanDir: dir}, nil
case reflect.Ptr:
elm, err := encodeType(t.Elem())
if err != nil {
wrapped := errors.Wrap(err, "bad base type")
return nil, errors.WithContextf(wrapped, "encoding pointer %v", t)
}
return &v1pb.Type{Kind: v1pb.Type_PTR, Element: elm}, nil
default:
return nil, errors.Errorf("unencodable type %v", t)
}
}
func tryEncodeSpecial(t reflect.Type) (v1pb.Type_Special, bool) {
switch t {
case reflectx.Error:
return v1pb.Type_ERROR, true
case reflectx.Context:
return v1pb.Type_CONTEXT, true
case reflectx.Type:
return v1pb.Type_TYPE, true
case typex.EventTimeType:
return v1pb.Type_EVENTTIME, true
case typex.WindowType:
return v1pb.Type_WINDOW, true
case typex.KVType:
return v1pb.Type_KV, true
case typex.CoGBKType:
return v1pb.Type_COGBK, true
case typex.WindowedValueType:
return v1pb.Type_WINDOWEDVALUE, true
case typex.TType:
return v1pb.Type_T, true
case typex.UType:
return v1pb.Type_U, true
case typex.VType:
return v1pb.Type_V, true
case typex.WType:
return v1pb.Type_W, true
case typex.XType:
return v1pb.Type_X, true
case typex.YType:
return v1pb.Type_Y, true
case typex.ZType:
return v1pb.Type_Z, true
default:
return v1pb.Type_ILLEGAL, false
}
}
func decodeType(t *v1pb.Type) (reflect.Type, error) {
if t == nil {
err := errors.New("empty type")
return nil, errors.WithContextf(err, "decoding type %v", t)
}
switch t.Kind {
case v1pb.Type_BOOL:
return reflectx.Bool, nil
case v1pb.Type_INT:
return reflectx.Int, nil
case v1pb.Type_INT8:
return reflectx.Int8, nil
case v1pb.Type_INT16:
return reflectx.Int16, nil
case v1pb.Type_INT32:
return reflectx.Int32, nil
case v1pb.Type_INT64:
return reflectx.Int64, nil
case v1pb.Type_UINT:
return reflectx.Uint, nil
case v1pb.Type_UINT8:
return reflectx.Uint8, nil
case v1pb.Type_UINT16:
return reflectx.Uint16, nil
case v1pb.Type_UINT32:
return reflectx.Uint32, nil
case v1pb.Type_UINT64:
return reflectx.Uint64, nil
case v1pb.Type_FLOAT32:
return reflectx.Float32, nil
case v1pb.Type_FLOAT64:
return reflectx.Float64, nil
case v1pb.Type_STRING:
return reflectx.String, nil
case v1pb.Type_SLICE:
elm, err := decodeType(t.GetElement())
if err != nil {
wrapped := errors.Wrap(err, "bad element")
return nil, errors.WithContextf(wrapped, "failed to decode type %v, bad element", t)
}
return reflect.SliceOf(elm), nil
case v1pb.Type_STRUCT:
var fields []reflect.StructField
for _, f := range t.Fields {
fType, err := decodeType(f.Type)
if err != nil {
wrapped := errors.Wrap(err, "bad field type")
return nil, errors.WithContextf(wrapped, "failed to decode type %v, bad field type", t)
}
field := reflect.StructField{
Name: f.GetName(),
PkgPath: f.GetPkgPath(),
Type: fType,
Tag: reflect.StructTag(f.GetTag()),
Offset: uintptr(f.GetOffset()),
Index: decodeInts(f.GetIndex()),
Anonymous: f.GetAnonymous(),
}
fields = append(fields, field)
}
return reflect.StructOf(fields), nil
case v1pb.Type_FUNC:
in, err := decodeTypes(t.GetParameterTypes())
if err != nil {
wrapped := errors.Wrap(err, "bad parameter type")
return nil, errors.WithContextf(wrapped, "decoding type %v", t)
}
out, err := decodeTypes(t.GetReturnTypes())
if err != nil {
wrapped := errors.Wrap(err, "bad return type")
return nil, errors.WithContextf(wrapped, "decoding type %v", t)
}
return reflect.FuncOf(in, out, t.GetIsVariadic()), nil
case v1pb.Type_CHAN:
elm, err := decodeType(t.GetElement())
if err != nil {
wrapped := errors.Wrap(err, "bad element")
return nil, errors.WithContextf(wrapped, "decoding type %v", t)
}
dir, err := decodeChanDir(t.GetChanDir())
if err != nil {
wrapped := errors.Wrap(err, "bad channel direction")
return nil, errors.WithContextf(wrapped, "decoding type %v", t)
}
return reflect.ChanOf(dir, elm), nil
case v1pb.Type_PTR:
elm, err := decodeType(t.GetElement())
if err != nil {
wrapped := errors.Wrap(err, "bad element")
return nil, errors.WithContextf(wrapped, "decoding type %v", t)
}
return reflect.PtrTo(elm), nil
case v1pb.Type_SPECIAL:
ret, err := decodeSpecial(t.Special)
if err != nil {
wrapped := errors.Wrap(err, "bad element")
return nil, errors.WithContextf(wrapped, "decoding type %v", t)
}
return ret, nil
case v1pb.Type_EXTERNAL:
ret, ok := runtime.LookupType(t.ExternalKey)
if !ok {
err := errors.Errorf("external key not found %v", t.ExternalKey)
return nil, errors.WithContextf(err, "decoding type %v", t)
}
return ret, nil
default:
err := errors.Errorf("unexpected type kind %v", t.Kind)
return nil, errors.WithContextf(err, "failed to decode type %v", t)
}
}
func decodeSpecial(s v1pb.Type_Special) (reflect.Type, error) {
switch s {
case v1pb.Type_ERROR:
return reflectx.Error, nil
case v1pb.Type_CONTEXT:
return reflectx.Context, nil
case v1pb.Type_TYPE:
return reflectx.Type, nil
case v1pb.Type_EVENTTIME:
return typex.EventTimeType, nil
case v1pb.Type_WINDOW:
return typex.WindowType, nil
case v1pb.Type_KV:
return typex.KVType, nil
case v1pb.Type_COGBK:
return typex.CoGBKType, nil
case v1pb.Type_WINDOWEDVALUE:
return typex.WindowedValueType, nil
case v1pb.Type_T:
return typex.TType, nil
case v1pb.Type_U:
return typex.UType, nil
case v1pb.Type_V:
return typex.VType, nil
case v1pb.Type_W:
return typex.WType, nil
case v1pb.Type_X:
return typex.XType, nil
case v1pb.Type_Y:
return typex.YType, nil
case v1pb.Type_Z:
return typex.ZType, nil
default:
return nil, errors.Errorf("failed to decode special type, unknown type %v", s)
}
}
func decodeTypes(list []*v1pb.Type) ([]reflect.Type, error) {
var ret []reflect.Type
for _, elm := range list {
t, err := decodeType(elm)
if err != nil {
return nil, err
}
ret = append(ret, t)
}
return ret, nil
}
func encodeInts(offsets []int) []int32 {
var ret []int32
for _, elm := range offsets {
ret = append(ret, int32(elm))
}
return ret
}
func decodeInts(offsets []int32) []int {
var ret []int
for _, elm := range offsets {
ret = append(ret, int(elm))
}
return ret
}
func encodeChanDir(dir reflect.ChanDir) v1pb.Type_ChanDir {
switch dir {
case reflect.RecvDir:
return v1pb.Type_RECV
case reflect.SendDir:
return v1pb.Type_SEND
case reflect.BothDir:
return v1pb.Type_BOTH
default:
panic(fmt.Sprintf("Failed to encode channel direction, invalid value: %v", dir))
}
}
func decodeChanDir(dir v1pb.Type_ChanDir) (reflect.ChanDir, error) {
switch dir {
case v1pb.Type_RECV:
return reflect.RecvDir, nil
case v1pb.Type_SEND:
return reflect.SendDir, nil
case v1pb.Type_BOTH:
return reflect.BothDir, nil
default:
err := errors.Errorf("invalid value: %v", dir)
return reflect.BothDir, errors.WithContext(err, "decoding channel direction")
}
}
func encodeInputKind(k graph.InputKind) v1pb.MultiEdge_Inbound_InputKind {
switch k {
case graph.Main:
return v1pb.MultiEdge_Inbound_MAIN
case graph.Singleton:
return v1pb.MultiEdge_Inbound_SINGLETON
case graph.Slice:
return v1pb.MultiEdge_Inbound_SLICE
case graph.Map:
return v1pb.MultiEdge_Inbound_MAP
case graph.MultiMap:
return v1pb.MultiEdge_Inbound_MULTIMAP
case graph.Iter:
return v1pb.MultiEdge_Inbound_ITER
case graph.ReIter:
return v1pb.MultiEdge_Inbound_REITER
default:
panic(fmt.Sprintf("Failed to encode input kind, invalid value: %v", k))
}
}
func decodeInputKind(k v1pb.MultiEdge_Inbound_InputKind) (graph.InputKind, error) {
switch k {
case v1pb.MultiEdge_Inbound_MAIN:
return graph.Main, nil
case v1pb.MultiEdge_Inbound_SINGLETON:
return graph.Singleton, nil
case v1pb.MultiEdge_Inbound_SLICE:
return graph.Slice, nil
case v1pb.MultiEdge_Inbound_MAP:
return graph.Map, nil
case v1pb.MultiEdge_Inbound_MULTIMAP:
return graph.MultiMap, nil
case v1pb.MultiEdge_Inbound_ITER:
return graph.Iter, nil
case v1pb.MultiEdge_Inbound_REITER:
return graph.ReIter, nil
default:
err := errors.Errorf("invalid value: %v", k)
return graph.Main, errors.WithContext(err, "decoding input kind")
}
}
|
import numpy as np
import sympy
class Muon:
def __init__(self, x, y, theta, phi, time=0):
self.org_x = x
self.org_y = y
self.org_z = 0
self.r = 1.
self.theta = theta
self.phi = phi
self.end_x = self.r*np.cos(self.phi)*np.sin(self.theta) + self.org_x
self.end_y = self.r*np.sin(self.phi)*np.sin(self.theta) + self.org_y
self.end_z = self.r*np.cos(self.theta)
## muon is generated from 0,0,0 (IP), but this can be changed
self.point_org = sympy.Point3D( (self.org_x,self.org_y,0) )
self.point_end = sympy.Point3D( (self.end_x,self.end_y,self.end_z) )
self.line = sympy.Line3D(self.point_org, self.point_end)
self.time = time
|
The dataset consists of 100 sentences, divided equally into 50 positive and 50 negative sentences. The positive sentences include:
"I love spending time with my family."
"I'm excited to start my new project."
"I'm feeling inspired today."
"I achieved so much today."
"I'm feeling optimistic about the future."
The negative sentences include:
"I'm feeling overwhelmed."
"I'm not sure what to do next."
"I feel like I'm wasting my time."
"I'm not good enough."
"I'm scared of making the wrong decision." |
<gh_stars>1-10
/* eslint-env jest */
'use strict'
const log = require('../lib/log')
// const logLevels = ['INFO', 'WARN', 'ERROR', 'DEBUG']
test(`Appends log level to the message`, () => {
expect(log.info(`Just an info`)).toEqual(console.log(`INFO just an info`))
expect(log.warn(`Just a warn`)).toEqual(console.log(`WARN just a warn`))
expect(log.error(`Just an error`)).toEqual(console.log(`ERROR just an error`))
expect(log.debug(`Just a debug`)).toEqual(console.log(`DEBUG just a debug`))
})
test(`Debug mode is enabled`, () => {
process.env.DEBUG_LOGGING = 'true'
expect(log.debug(``)).toEqual(console.log('DEBUG LOGGING ENABLED'))
})
|
more ../../results/frats_add*.json
|
from machina.apps.forum_conversation.admin import * # noqa
|
package rbd
import (
"errors"
)
// Pool is an rbd pool
type Pool struct {
name string
}
// Name is the pool name
func (pool *Pool) Name() string {
return pool.name
}
// GetPool gets a pool object (does not verify pool exists)
func GetPool(name string) *Pool {
return &Pool{name}
}
// ErrDoesNotExist is returned if the pool, image or snapshot does not exist
var ErrDoesNotExist = errors.New("does not exist")
func (pool *Pool) cmdArgs(args ...string) []string {
return append([]string{"--pool", pool.name}, args...)
}
func (pool *Pool) getImage(name string) *Image {
return getImage(pool, name)
}
var poolErrs = exitCodeToErrMap(map[int]error{2: ErrDoesNotExist})
// Images returns the rbd images
func (pool *Pool) Images() ([]*Image, error) {
imgNames := []string{}
err := cmdJSON(&imgNames, poolErrs, pool.cmdArgs("list")...)
images := make([]*Image, 0, len(imgNames))
for _, n := range imgNames {
images = append(images, pool.getImage(n))
}
return images, err
}
func (pool *Pool) MappedImages() ([]*Image, error) {
mappedNBDs, err := mappedNBDs()
if err != nil {
return nil, err
}
mappedImages := []*Image{}
for _, nbd := range mappedNBDs {
if nbd.Pool == pool.Name() && nbd.Snapshot == "-" {
mappedImages = append(mappedImages, pool.getImage(nbd.Name))
/*
var mountTime time.Time
if blkDevStat, err := os.Stat(nbd.Device); err != nil {
mountTime = blkDevStat.ModTime()
}
mappedImages = append(mappedImages, &MappedImage{
pool.getImage(nbd.Name), nbd.Device, nbd.Pid, mountTime})
*/
}
}
return mappedImages, nil
}
type devList struct {
Image string `json:"image"`
Snapshot string `json:"snapshot"`
}
// Devices returns all rbd devices including images and snapshots
func (pool *Pool) Devices() ([]Dev, error) {
devs := []*devList{}
err := cmdJSON(&devs, poolErrs, pool.cmdArgs("list", "--long")...)
images := make(map[string]*Image)
for _, d := range devs {
if d.Snapshot == "" {
images[d.Image] = pool.getImage(d.Image)
}
}
retDevs := make([]Dev, 0, len(devs))
for _, d := range devs {
image := images[d.Image]
if d.Snapshot == "" {
retDevs = append(retDevs, image)
} else {
retDevs = append(retDevs, image.getSnapshot(d.Snapshot))
}
}
return retDevs, err
}
var imageErrs = exitCodeToErrMap(map[int]error{2: ErrDoesNotExist})
// GetImage gets an image in the pool
func (pool *Pool) GetImage(name string) (*Image, error) {
img := pool.getImage(name)
_, err := img.Info()
return img, err
}
// ErrAlreadyExists is returned if creating an image that already exists
var ErrAlreadyExists = errors.New("image already exists")
var createErrs = exitCodeToErrMap(map[int]error{
17: ErrAlreadyExists,
})
// CreateImage creates an image in the pool
func (pool *Pool) CreateImage(name string, size string, args ...string) (*Image, error) {
args = append([]string{"--image", name, "--size", size}, args...)
err := cmdRun(createErrs, pool.cmdArgs(args...)...)
if err != nil && !errors.Is(err, ErrAlreadyExists) {
return nil, err
}
return pool.getImage(name), err
}
// CreateImageWithFileSystem creates and formats an image
func (pool *Pool) CreateImageWithFileSystem(name, size, fileSystem string, args ...string) (*Image, error) {
img, err := pool.CreateImage(name, size, args...)
if err != nil {
return img, err
}
blk, err := img.Map()
if err != nil {
return img, err
}
err = mkfs(blk, fileSystem)
if err != nil {
return img, err
}
return img, img.Unmap()
}
|
import java.util.Arrays;
class SortList
{
public static void main(String[] args)
{
int[] list = {1, 4, 5, 6, 7, 8};
Arrays.sort(list);
System.out.print("Sorted list: ");
for (int i=0; i < list.length; i++)
System.out.print(list[i] + " ");
}
} |
#!/bin/sh
# (C) Copyright 2005- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities granted to it by
# virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction.
#
. ./include.sh
#Define a common label for all the tmp files
label="bufr_read_tropical_cyclone_f"
#Define tmp file
fTmp=${label}".tmp.txt"
rm -f $fTmp
#We check "tropical_cyclone.bufr". The path is hardcoded in the example
REDIRECT=/dev/null
#Write the values into a file and compare with reference
${examples_dir}/eccodes_f_bufr_read_tropical_cyclone 2> $REDIRECT > $fTmp
# Check the output
grep -q "Date and time: 18.11.2015 0:0" $fTmp
grep -q "== Member 52" $fTmp
#Clean up
rm -f $fTmp
|
#!/bin/bash
#SBATCH --time=2:00:00 # walltime
#SBATCH --ntasks=3 # number of processor cores (i.e. tasks)
#SBATCH --nodes=1 # number of nodes
#SBATCH --mem-per-cpu=8000M # memory per CPU core
#SBATCH --gres=gpu:1
#SBATCH --output="runBWv3.slurm"
#SBATCH --constraint rhel7
echo $USER
if [ $USER=="tarch" ]; then
email="taylor.archibald@byu.edu"
else
email="masonfp@byu.edu"
fi
#SBATCH --mail-user=$email
#SBATCH --mail-type=BEGIN
#SBATCH --mail-type=END
#SBATCH --mail-type=FAIL
# Set the max number of threads to use for programs using OpenMP. Should be <= ppn. Does nothing if the program doesn't use OpenMP.
#export OMP_NUM_THREADS=$SLURM_CPUS_ON_NODE
# LOAD MODULES, INSERT CODE, AND RUN YOUR PROGRAMS HERE
#%Module
cat /etc/os-release
cat /etc/redhat-release
module purge
activate "/fslhome/tarch/anaconda3/envs/cycleGAN"
export PATH="/fslhome/$USER/anaconda3/envs/cycleGAN/bin:$PATH"
which python
cd "/fslhome/$USER/fsl_groups/fslg_hwr/compute/pytorch-CycleGAN"
# Has trainA and trainB folder in it
# TrainA = online
# TrainB = offline
# --num_test
#python -u test.py --dataroot ./datasets/hwr --name handwriting_cyclegan_BW --model cycle_gan --input_nc 1 --output_nc 1 --loadSizeY 64 --fineSizeY 64 --fineSizeX 1280 --loadSizeX 1280 --results_dir "./results" --step 200
#python -u test.py --dataroot ./datasets/hwr --name handwriting_cyclegan_BW_GT --model cycle_gan2 --input_nc 1 --output_nc 1 --loadSizeY 64 --fineSizeY 64 --fineSizeX 1280 --loadSizeX 1280 --results_dir "./results" --step 5800 --num_test 12149 --image_type fake_B
# No GT
python -u test.py --dataroot ./datasets/hwr --name handwriting_cyclegan_BW --model cycle_gan --input_nc 1 --output_nc 1 --loadSizeY 64 --fineSizeY 64 --fineSizeX 1280 --loadSizeX 1280 --results_dir "./results" --step 4500 --num_test 12149 --image_type fake_B
#--continue_train
# To run:
#sbatch ./run.sh
#sbatch /fslhome/tarch/compute/handwriting/run.sh
#squeue -u tarch
#pip install git+https://github.com/Tahlor/utils.git |
#!/bin/bash
./node_modules/.bin/ncp ./packages/neuron-ui/build ./packages/neuron-wallet/dist/neuron-ui
|
SELECT order_id, customer_name
FROM orders
WHERE country = 'US' |
package graph
import (
"code.google.com/p/plotinum/plot"
"code.google.com/p/plotinum/plotter"
"code.google.com/p/plotinum/vg"
"fmt"
"github.com/THUNDERGROOVE/SDETool2/log"
"github.com/THUNDERGROOVE/SDETool2/market"
"github.com/lucasb-eyer/go-colorful"
"math"
"math/rand"
"sort"
"time"
)
type Cache struct {
Data map[string]map[int]*Data
}
type Data struct {
UnitsSold int
Month int
Year int
}
var MarketCache Cache
func init() {
log.Info("Graph imported. Generating MarketCache.")
MarketCache = Cache{make(map[string]map[int]*Data, 0)}
}
func save(p *plot.Plot, count int, isBar bool) {
baseW := float64(8)
baseH := float64(4.5)
log.Info("Saving graph with options count", count, "isBar", isBar, "using size ", saveScale(baseW, count), saveScale(baseH, count))
if err := p.Save(saveScale(baseW, count), saveScale(baseH, count), "graph.png"); err != nil {
log.LogError("Error saving graph as png.")
}
}
func saveScale(f float64, c int) float64 {
return math.Sqrt(f*float64(c)) * float64(2)
}
func doCache(data map[string]map[string]market.MarketData) {
for name, set := range data {
for _, v := range set {
for _, vv := range v.Items {
d, err := time.Parse("2006-01-02T03:04:05", vv.Date)
if err != nil {
fmt.Println("Error parsing date from market data", err.Error())
continue
}
if time.Now().Sub(d).Hours() >= 4380 { // Ignore data set. 4380 is half of a year
//log.Info("Ignoring old data set.", time.Now().Sub(d).Hours(), "hours old.", d.String())
continue
}
if MarketCache.Data == nil {
MarketCache.Data = make(map[string]map[int]*Data, 0)
}
if MarketCache.Data[name] == nil {
MarketCache.Data[name] = make(map[int]*Data, 0)
}
if MarketCache.Data[name][int(d.Month())] == nil {
MarketCache.Data[name][int(d.Month())] = &Data{}
}
MarketCache.Data[name][int(d.Month())].UnitsSold += int(vv.Volume)
MarketCache.Data[name][int(d.Month())].Month = int(d.Month())
MarketCache.Data[name][int(d.Month())].Year = int(d.Month())
}
}
}
}
func PlotSuitData(data map[string]map[string]market.MarketData) { //ew lol
log.Info("Proccessing Market data")
doCache(data)
p, err := plot.New()
if err != nil {
log.LogError("Error creating log", err.Error())
return
}
p.Title.Text = "Suit Market Data"
p.X.Label.Text = "Month"
p.Y.Label.Text = "Suits Sold"
rand.Seed(time.Now().UnixNano())
c, _ := colorful.SoftPalette(len(MarketCache.Data))
var ci int
for name, v := range MarketCache.Data {
var i int
points := make(plotter.XYs, len(v))
// Sort keys so we can have the lines in order
var keys []int
for k := range v {
keys = append(keys, k)
}
sort.Ints(keys)
for _, k := range keys {
points[i].X = float64(k)
points[i].Y = float64(MarketCache.Data[name][k].UnitsSold)
i++
}
line, err := plotter.NewLine(points)
if err != nil {
log.LogError("Error creating line in plot", err.Error())
continue
}
line.Color = c[ci]
line.Width = vg.Points(5)
p.Add(line)
p.Legend.Add(name, line)
ci++
}
save(p, len(MarketCache.Data), false)
}
func BarSuitData(data map[string]map[string]market.MarketData) { //ew lol
log.Info("Proccessing Market data")
doCache(data)
p, err := plot.New()
if err != nil {
log.LogError("Error creating log", err.Error())
return
}
rand.Seed(time.Now().UnixNano())
c, _ := colorful.SoftPalette(len(MarketCache.Data))
var ci int
for name, v := range MarketCache.Data {
val := plotter.Values{}
for _, vv := range v {
val = append(val, float64(vv.UnitsSold))
}
bar, err := plotter.NewBarChart(val, vg.Points(5))
if err != nil {
log.LogError("Error creating bar for chart")
}
bar.Color = c[ci]
bar.Offset = vg.Points(float64(5 * ci))
p.Add(bar)
p.Legend.Add(name, bar)
ci++
}
p.Legend.Top = true
p.NominalX("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December")
save(p, len(MarketCache.Data), true)
}
|
sandy () {
local DIR=$PWD
local TARGET="cabal.sandbox.config"
local PROG=$1
shift
while [ ! -e $DIR/$TARGET -a $DIR != "/" ]
do
DIR=$(dirname $DIR)
done
if test $DIR != "/"
then
local DB=$(sed -ne '/^package-db: */{s///p;q;}' "$DIR/$TARGET")
$PROG -no-user-package-db -package-db="$DB" "$@"
else
$PROG "$@"
fi
}
|
// Package errors defines errors that can occur in command/input validation and
// execution and ways to handle those.
package errors
import (
"github.com/giantswarm/gsctl/client/clienterror"
"github.com/giantswarm/microerror"
)
// UnknownError should be thrown if we have no idea what went wrong.
var UnknownError = µerror.Error{
Kind: "UnknownError",
}
// IsUnknownError asserts UnknownError.
func IsUnknownError(err error) bool {
return microerror.Cause(err) == UnknownError
}
// CouldNotCreateClientError means that a client could not be created
var CouldNotCreateClientError = µerror.Error{
Kind: "CouldNotCreateClientError",
}
// IsCouldNotCreateClientError asserts CouldNotCreateClientError.
func IsCouldNotCreateClientError(err error) bool {
return microerror.Cause(err) == CouldNotCreateClientError
}
// InvalidReleaseError means that the customer entered an invalid release.
var InvalidReleaseError = µerror.Error{
Kind: "InvalidReleaseError",
}
// IsInvalidReleaseError asserts InvalidReleaseError.
func IsInvalidReleaseError(err error) bool {
return microerror.Cause(err) == InvalidReleaseError
}
// NotLoggedInError means that the user is currently not authenticated
var NotLoggedInError = µerror.Error{
Kind: "NotLoggedInError",
}
// IsNotLoggedInError asserts NotLoggedInError.
func IsNotLoggedInError(err error) bool {
return microerror.Cause(err) == NotLoggedInError
}
// UserAccountInactiveError means that the user account is marked as inative by the API
var UserAccountInactiveError = µerror.Error{
Kind: "UserAccountInactiveError",
}
// IsUserAccountInactiveError asserts UserAccountInactiveError.
func IsUserAccountInactiveError(err error) bool {
return microerror.Cause(err) == UserAccountInactiveError
}
// CommandAbortedError means that the user has aborted a command or input
var CommandAbortedError = µerror.Error{
Kind: "CommandAbortedError",
}
// IsCommandAbortedError asserts CommandAbortedError
func IsCommandAbortedError(err error) bool {
return microerror.Cause(err) == CommandAbortedError
}
// ConflictingFlagsError means that the user combined command line options
// that are incompatible
var ConflictingFlagsError = µerror.Error{
Desc: "Some of the command line flags used cannot be combined.",
Kind: "ConflictingFlagsError",
}
// IsConflictingFlagsError asserts ConflictingFlagsError.
func IsConflictingFlagsError(err error) bool {
return microerror.Cause(err) == ConflictingFlagsError
}
// DesiredEqualsCurrentStateError means that the user described a desired
// state which is equal to the current state.
var DesiredEqualsCurrentStateError = µerror.Error{
Kind: "DesiredEqualsCurrentStateError",
}
// IsDesiredEqualsCurrentStateError asserts DesiredEqualsCurrentStateError.
func IsDesiredEqualsCurrentStateError(err error) bool {
return microerror.Cause(err) == DesiredEqualsCurrentStateError
}
// ClusterNameOrIDMissingError means a required cluster ID has not been given as input
var ClusterNameOrIDMissingError = µerror.Error{
Kind: "ClusterNameOrIDMissingError",
}
// IsClusterNameOrIDMissingError asserts ClusterNameOrIDMissingError.
func IsClusterNameOrIDMissingError(err error) bool {
return microerror.Cause(err) == ClusterNameOrIDMissingError
}
// NodePoolIDMissingError means a required node pool ID has not been given as input
var NodePoolIDMissingError = µerror.Error{
Kind: "NodePoolIDMissingError",
}
// IsNodePoolIDMissingError asserts NodePoolIDMissingError.
func IsNodePoolIDMissingError(err error) bool {
return microerror.Cause(err) == NodePoolIDMissingError
}
// NodePoolIDMalformedError means a cluster/nodepool ID tuple has been formatted the wrong way.
var NodePoolIDMalformedError = µerror.Error{
Kind: "NodePoolIDMalformedError",
}
// IsNodePoolIDMalformedError asserts IsNodePoolIDMalformedError.
func IsNodePoolIDMalformedError(err error) bool {
return microerror.Cause(err) == NodePoolIDMalformedError
}
// ClusterNotFoundError means that a given cluster does not exist
var ClusterNotFoundError = µerror.Error{
Kind: "ClusterNotFoundError",
}
// IsClusterNotFoundError asserts ClusterNotFoundError.
func IsClusterNotFoundError(err error) bool {
c := microerror.Cause(err)
if clienterror.IsNotFoundError(err) {
return true
}
if c == ClusterNotFoundError {
return true
}
return false
}
// NodePoolNotFoundError means that a node pool the user wants to interact with does not exist.
var NodePoolNotFoundError = µerror.Error{
Kind: "NodePoolNotFoundError",
}
// IsNodePoolNotFound asserts NodePoolNotFoundError.
func IsNodePoolNotFound(err error) bool {
return microerror.Cause(err) == NodePoolNotFoundError
}
// ReleaseVersionMissingError means the required release version argument is missing
var ReleaseVersionMissingError = µerror.Error{
Kind: "ReleaseVersionMissingError",
}
// IsReleaseVersionMissingError asserts ReleaseVersionMissingError.
func IsReleaseVersionMissingError(err error) bool {
return microerror.Cause(err) == ReleaseVersionMissingError
}
// ReleaseNotFoundError means that a given release does not exist.
var ReleaseNotFoundError = µerror.Error{
Kind: "ReleaseNotFoundError",
}
// IsReleaseNotFoundError asserts ReleaseNotFoundError.
func IsReleaseNotFoundError(err error) bool {
return microerror.Cause(err) == ReleaseNotFoundError
}
// InternalServerError should only be used in case of server communication
// being responded to with a response status >= 500.
// See also: unknownError
var InternalServerError = µerror.Error{
Kind: "InternalServerError",
}
// IsInternalServerError asserts InternalServerError.
func IsInternalServerError(err error) bool {
c := microerror.Cause(err)
if clienterror.IsInternalServerError(err) {
return true
}
if c == InternalServerError {
return true
}
return false
}
// NoResponseError means the server side has not returned a response.
var NoResponseError = µerror.Error{
Kind: "NoResponseError",
}
// IsNoResponseError asserts NoResponseError.
func IsNoResponseError(err error) bool {
return microerror.Cause(err) == NoResponseError
}
// NotAuthorizedError means that an API action could not be performed due to
// an authorization problem (usually a HTTP 401 error)
var NotAuthorizedError = µerror.Error{
Kind: "NotAuthorizedError",
}
// IsNotAuthorizedError asserts NotAuthorizedError.
func IsNotAuthorizedError(err error) bool {
c := microerror.Cause(err)
if clienterror.IsUnauthorizedError(err) {
return true
}
if c == NotAuthorizedError {
return true
}
return false
}
// Errors for cluster creation
// NotEnoughWorkerNodesError means that the user has specified a too low
// number of worker nodes for a cluster
var NotEnoughWorkerNodesError = µerror.Error{
Kind: "NotEnoughWorkerNodesError",
}
// IsNotEnoughWorkerNodesError asserts NotEnoughWorkerNodesError.
func IsNotEnoughWorkerNodesError(err error) bool {
return microerror.Cause(err) == NotEnoughWorkerNodesError
}
// ClusterOwnerMissingError means that the user has not specified an owner organization
// for a new cluster
var ClusterOwnerMissingError = µerror.Error{
Kind: "ClusterOwnerMissingError",
}
// IsClusterOwnerMissingError asserts ClusterOwnerMissingError.
func IsClusterOwnerMissingError(err error) bool {
return microerror.Cause(err) == ClusterOwnerMissingError
}
// OrganizationNotFoundError means that the specified organization could not be found
var OrganizationNotFoundError = µerror.Error{
Kind: "OrganizationNotFoundError",
}
// IsOrganizationNotFoundError asserts OrganizationNotFoundError
func IsOrganizationNotFoundError(err error) bool {
c := microerror.Cause(err)
if clienterror.IsNotFoundError(err) {
return true
}
if c == OrganizationNotFoundError {
return true
}
return false
}
// OrganizationNotSpecifiedError means that the user has not specified an organization to work with
var OrganizationNotSpecifiedError = µerror.Error{
Kind: "OrganizationNotSpecifiedError",
}
// IsOrganizationNotSpecifiedError asserts OrganizationNotSpecifiedError
func IsOrganizationNotSpecifiedError(err error) bool {
return microerror.Cause(err) == OrganizationNotSpecifiedError
}
// CredentialNotFoundError means that the specified credential could not be found
var CredentialNotFoundError = µerror.Error{
Kind: "CredentialNotFoundError",
}
// IsCredentialNotFoundError asserts CredentialNotFoundError
func IsCredentialNotFoundError(err error) bool {
return microerror.Cause(err) == CredentialNotFoundError
}
// YAMLFileNotReadableError means a YAML file was not readable
var YAMLFileNotReadableError = µerror.Error{
Kind: "YAMLFileNotReadableError",
}
// IsYAMLFileNotReadable asserts YAMLFileNotReadableError.
func IsYAMLFileNotReadable(err error) bool {
return microerror.Cause(err) == YAMLFileNotReadableError
}
// YAMLNotParseableError means a YAML file was not readable
var YAMLNotParseableError = µerror.Error{
Kind: "YAMLNotParseableError",
}
// IsYAMLNotParseable asserts YAMLNotParseableError.
func IsYAMLNotParseable(err error) bool {
return microerror.Cause(err) == YAMLNotParseableError
}
// CouldNotCreateJSONRequestBodyError occurs when we could not create a JSON
// request body based on the input we have, so something in out input attributes
// is wrong.
var CouldNotCreateJSONRequestBodyError = µerror.Error{
Kind: "CouldNotCreateJSONRequestBodyError",
}
// IsCouldNotCreateJSONRequestBodyError asserts CouldNotCreateJSONRequestBodyError.
func IsCouldNotCreateJSONRequestBodyError(err error) bool {
return microerror.Cause(err) == CouldNotCreateJSONRequestBodyError
}
// CouldNotCreateClusterError should be used if the API call to create a
// cluster has been responded with status >= 400 and none of the other
// more specific errors apply.
var CouldNotCreateClusterError = µerror.Error{
Kind: "CouldNotCreateClusterError",
}
// IsCouldNotCreateClusterError asserts CouldNotCreateClusterError.
func IsCouldNotCreateClusterError(err error) bool {
return microerror.Cause(err) == CouldNotCreateClusterError
}
// BadRequestError should be used when the server returns status 400 on cluster creation.
var BadRequestError = µerror.Error{
Kind: "BadRequestError",
}
// IsBadRequestError asserts BadRequestError
func IsBadRequestError(err error) bool {
return microerror.Cause(err) == BadRequestError
}
// errors for cluster deletion
// CouldNotDeleteClusterError should be used if the API call to delete a
// cluster has been responded with status >= 400
var CouldNotDeleteClusterError = µerror.Error{
Kind: "CouldNotDeleteClusterError",
}
// IsCouldNotDeleteClusterError asserts CouldNotDeleteClusterError.
func IsCouldNotDeleteClusterError(err error) bool {
return microerror.Cause(err) == CouldNotDeleteClusterError
}
// Errors for scaling a cluster
// CouldNotScaleClusterError should be used if the API call to scale a cluster
// has been responded with status >= 400
var CouldNotScaleClusterError = µerror.Error{
Kind: "CouldNotScaleClusterError",
}
// IsCouldNotScaleClusterError asserts CouldNotScaleClusterError.
func IsCouldNotScaleClusterError(err error) bool {
return microerror.Cause(err) == CouldNotScaleClusterError
}
// APIError is happening when an error occurs in the API
var APIError = µerror.Error{
Kind: "APIError",
}
// IsAPIError asserts APIError.
func IsAPIError(err error) bool {
c := microerror.Cause(err)
_, ok := c.(*clienterror.APIError)
if ok {
return true
}
if c == APIError {
return true
}
return false
}
// CannotScaleBelowMinimumWorkersError means the user tries to scale to less
// nodes than allowed
var CannotScaleBelowMinimumWorkersError = µerror.Error{
Kind: "CannotScaleBelowMinimumWorkersError",
}
// IsCannotScaleBelowMinimumWorkersError asserts CannotScaleBelowMinimumWorkersError.
func IsCannotScaleBelowMinimumWorkersError(err error) bool {
return microerror.Cause(err) == CannotScaleBelowMinimumWorkersError
}
// CannotScaleClusterError means the user tries to scale a cluster that does not support
// scaling, e. g. because it is a v5 cluster (node pools).
var CannotScaleClusterError = µerror.Error{
Kind: "CannotScaleClusterError",
}
// IsCannotScaleCluster asserts CannotScaleClusterError.
func IsCannotScaleCluster(err error) bool {
return microerror.Cause(err) == CannotScaleClusterError
}
// IncompatibleSettingsError means user has mixed incompatible settings related to different providers.
var IncompatibleSettingsError = µerror.Error{
Kind: "IncompatibleSettingsError",
}
// IsIncompatibleSettings asserts IncompatibleSettingsError.
func IsIncompatibleSettings(err error) bool {
return microerror.Cause(err) == IncompatibleSettingsError
}
// EndpointMissingError means the user has not given an endpoint where expected
var EndpointMissingError = µerror.Error{
Kind: "EndpointMissingError",
}
// IsEndpointMissingError asserts EndpointMissingError.
func IsEndpointMissingError(err error) bool {
return microerror.Cause(err) == EndpointMissingError
}
// EmptyPasswordError means the password supplied by the user was empty
var EmptyPasswordError = µerror.Error{
Kind: "EmptyPasswordError",
}
// IsEmptyPasswordError asserts EmptyPasswordError.
func IsEmptyPasswordError(err error) bool {
return microerror.Cause(err) == EmptyPasswordError
}
// TokenArgumentNotApplicableError means the user used --auth-token argument
// but it wasn't permitted for that command
var TokenArgumentNotApplicableError = µerror.Error{
Kind: "TokenArgumentNotApplicableError",
}
// IsTokenArgumentNotApplicableError asserts TokenArgumentNotApplicableError.
func IsTokenArgumentNotApplicableError(err error) bool {
return microerror.Cause(err) == TokenArgumentNotApplicableError
}
// PasswordArgumentNotApplicableError means the user used --password argument
// but it wasn't permitted for that command
var PasswordArgumentNotApplicableError = µerror.Error{
Kind: "PasswordArgumentNotApplicableError",
}
// IsPasswordArgumentNotApplicableError asserts PasswordArgumentNotApplicableError.
func IsPasswordArgumentNotApplicableError(err error) bool {
return microerror.Cause(err) == PasswordArgumentNotApplicableError
}
// NoEmailArgumentGivenError means the email argument was required
// but not given/empty
var NoEmailArgumentGivenError = µerror.Error{
Kind: "NoEmailArgumentGivenError",
}
// IsNoEmailArgumentGivenError asserts NoEmailArgumentGivenError
func IsNoEmailArgumentGivenError(err error) bool {
return microerror.Cause(err) == NoEmailArgumentGivenError
}
// AccessForbiddenError means the client has been denied access to the API endpoint
// with a HTTP 403 error
var AccessForbiddenError = µerror.Error{
Kind: "AccessForbiddenError",
}
// IsAccessForbiddenError asserts AccessForbiddenError
func IsAccessForbiddenError(err error) bool {
c := microerror.Cause(err)
if clienterror.IsAccessForbiddenError(err) {
return true
}
if c == AccessForbiddenError {
return true
}
return false
}
// InvalidCredentialsError means the user's credentials could not be verified
// by the API
var InvalidCredentialsError = µerror.Error{
Kind: "InvalidCredentialsError",
}
// IsInvalidCredentialsError asserts InvalidCredentialsError
func IsInvalidCredentialsError(err error) bool {
return microerror.Cause(err) == InvalidCredentialsError
}
// KubectlMissingError means that the 'kubectl' executable is not available
var KubectlMissingError = µerror.Error{
Kind: "KubectlMissingError",
}
// IsKubectlMissingError asserts KubectlMissingError
func IsKubectlMissingError(err error) bool {
return microerror.Cause(err) == KubectlMissingError
}
// CouldNotWriteFileError is used when an attempt to write some file fails
var CouldNotWriteFileError = µerror.Error{
Kind: "CouldNotWriteFileError",
}
// IsCouldNotWriteFileError asserts CouldNotWriteFileError
func IsCouldNotWriteFileError(err error) bool {
return microerror.Cause(err) == CouldNotWriteFileError
}
// UnspecifiedAPIError means an API error has occurred which we can't or don't
// need to categorize any further.
var UnspecifiedAPIError = µerror.Error{
Kind: "UnspecifiedAPIError",
}
// IsUnspecifiedAPIError asserts UnspecifiedAPIError
func IsUnspecifiedAPIError(err error) bool {
return microerror.Cause(err) == UnspecifiedAPIError
}
// NoUpgradeAvailableError means that the user wanted to start an upgrade, but
// there is no newer version available for the given cluster
var NoUpgradeAvailableError = µerror.Error{
Kind: "NoUpgradeAvailableError",
Desc: "no upgrade available for the current version",
}
// IsNoUpgradeAvailableError asserts NoUpgradeAvailableError
func IsNoUpgradeAvailableError(err error) bool {
return microerror.Cause(err) == NoUpgradeAvailableError
}
// CouldNotUpgradeClusterError is thrown when a cluster upgrade failed.
var CouldNotUpgradeClusterError = µerror.Error{
Kind: "CouldNotUpgradeClusterError",
Desc: "could not upgrade cluster",
}
// IsCouldNotUpgradeClusterError asserts CouldNotUpgradeClusterError
func IsCouldNotUpgradeClusterError(err error) bool {
return microerror.Cause(err) == CouldNotUpgradeClusterError
}
// InvalidCNPrefixError means the user has used bad characters in the CN prefix argument
var InvalidCNPrefixError = µerror.Error{
Kind: "InvalidCNPrefixError",
}
// IsInvalidCNPrefixError asserts InvalidCNPrefixError
func IsInvalidCNPrefixError(err error) bool {
return microerror.Cause(err) == InvalidCNPrefixError
}
// InvalidDurationError means that a user-provided duration string could not be parsed
var InvalidDurationError = µerror.Error{
Kind: "InvalidDurationError",
}
// IsInvalidDurationError asserts InvalidDurationError
func IsInvalidDurationError(err error) bool {
return microerror.Cause(err) == InvalidDurationError
}
// DurationExceededError is thrown when a duration value is larger than can be represented internally
var DurationExceededError = µerror.Error{
Kind: "DurationExceededError",
}
// IsDurationExceededError asserts DurationExceededError
func IsDurationExceededError(err error) bool {
return microerror.Cause(err) == DurationExceededError
}
// SSOError means something went wrong during the SSO process
var SSOError = µerror.Error{
Kind: "SSOError",
}
// IsSSOError asserts SSOError
func IsSSOError(err error) bool {
return microerror.Cause(err) == SSOError
}
// ProviderNotSupportedError means that the intended action is not possible with
// the installation's provider.
var ProviderNotSupportedError = µerror.Error{
Kind: "ProviderNotSupportedError",
}
// IsProviderNotSupportedError asserts ProviderNotSupportedError.
func IsProviderNotSupportedError(err error) bool {
return microerror.Cause(err) == ProviderNotSupportedError
}
// RequiredFlagMissingError means that a required flag has not been set by the user.
var RequiredFlagMissingError = µerror.Error{
Kind: "RequiredFlagMissingError",
}
// IsRequiredFlagMissingError asserts RequiredFlagMissingError.
func IsRequiredFlagMissingError(err error) bool {
return microerror.Cause(err) == RequiredFlagMissingError
}
// CredentialsAlreadySetError means the user tried setting credential to an org
// that has credentials already.
var CredentialsAlreadySetError = µerror.Error{
Kind: "CredentialsAlreadySetError",
}
// IsCredentialsAlreadySetError asserts CredentialsAlreadySetError.
func IsCredentialsAlreadySetError(err error) bool {
return microerror.Cause(err) == CredentialsAlreadySetError
}
// UpdateCheckFailed means that checking for a newer gsctl version failed.
var UpdateCheckFailed = µerror.Error{
Kind: "UpdateCheckFailed",
}
// IsUpdateCheckFailed asserts UpdateCheckFailed.
func IsUpdateCheckFailed(err error) bool {
return microerror.Cause(err) == UpdateCheckFailed
}
// ConflictingWorkerFlagsUsedError is raised when the deprecated --num-workers
// flag is used together with the new node count flags --workers-min and
// --workers-max.
var ConflictingWorkerFlagsUsedError = µerror.Error{
Kind: "ConflictingWorkerFlagsUsedError",
}
// IsConflictingWorkerFlagsUsed asserts ConflictingWorkerFlagsUsedError.
func IsConflictingWorkerFlagsUsed(err error) bool {
return microerror.Cause(err) == ConflictingWorkerFlagsUsedError
}
// WorkersMinMaxInvalidError is raised when the value of the node count flag
// --workers-min is higher than the value of the node count flag --workers-max.
var WorkersMinMaxInvalidError = µerror.Error{
Kind: "WorkersMinMaxInvalidError",
Desc: "min must not be higher than max",
}
// IsWorkersMinMaxInvalid asserts WorkersMinMaxInvalidError.
func IsWorkersMinMaxInvalid(err error) bool {
return microerror.Cause(err) == WorkersMinMaxInvalidError
}
// OutputFormatInvalidError is raised when the user specifies an unsupported output format.
var OutputFormatInvalidError = µerror.Error{
Kind: "OutputFormatInvalidError",
}
// IsOutputFormatInvalid asserts OutputFormatInvalidError.
func IsOutputFormatInvalid(err error) bool {
return microerror.Cause(err) == OutputFormatInvalidError
}
// ClusterDoesNotSupportNodePoolsError is raised when the user tries to do something with node pools
// on a cluster that does not support node pools.
var ClusterDoesNotSupportNodePoolsError = µerror.Error{
Kind: "ClusterDoesNotSupportNodePoolsError",
}
// IsClusterDoesNotSupportNodePools asserts ClusterDoesNotSupportNodePoolsError.
func IsClusterDoesNotSupportNodePools(err error) bool {
return microerror.Cause(err) == ClusterDoesNotSupportNodePoolsError
}
// NoOpError is raised when the user calls a command without any meaningful
// parameters, resulting in no change/nothing done.
var NoOpError = µerror.Error{
Kind: "NoOpError",
Desc: "Nothing to be done",
}
// IsNoOpError asserts NoOpError.
func IsNoOpError(err error) bool {
return microerror.Cause(err) == NoOpError
}
// InvalidNodePoolIDArgumentError should be raised when the user gives a "clusterID/nodepoolID"
// argument that is syntactically incorrect.
var InvalidNodePoolIDArgumentError = µerror.Error{
Kind: "InvalidNodePoolIDArgumentError",
}
// IsInvalidNodePoolIDArgument asserts InvalidNodePoolIDArgumentError.
func IsInvalidNodePoolIDArgument(err error) bool {
return microerror.Cause(err) == InvalidNodePoolIDArgumentError
}
// Endpoint deletion errors
// CouldNotDeleteEndpointError should be used when the
// "API endpoint" could not be deleted
var CouldNotDeleteEndpointError = µerror.Error{
Kind: "CouldNotDeleteEndpointError",
}
// IsCouldNotDeleteEndpointError asserts CouldNotDeleteEndpointError.
func IsCouldNotDeleteEndpointError(err error) bool {
return microerror.Cause(err) == CouldNotDeleteEndpointError
}
// EndpointNotFoundError should be used when the user
// tries to delete an "API endpoint" that does not exist
var EndpointNotFoundError = µerror.Error{
Kind: "EndpointNotFoundError",
}
// IsEndpointNotFoundError asserts EndpointNotFoundError.
func IsEndpointNotFoundError(err error) bool {
return microerror.Cause(err) == EndpointNotFoundError
}
// NotPercentage should be used when the user
// tries to set a percentage value outside 0-100
var NotPercentage = µerror.Error{
Kind: "NotPercentage",
Desc: "Value should be in the range between 0 and 100.",
}
|
#include <iostream>
#include <vector>
#include <string>
#include <sstream>
using namespace std;
string convertListToString(vector<string> list) {
stringstream ss;
for (int i=0; i<list.size(); i++) {
ss << list[i];
if (i != list.size() - 1)
ss << ", ";
}
return ss.str();
}
int main()
{
vector<string> list = {"apple", "banana", "cherry"};
string result = convertListToString(list);
cout << "Result: " << result;
return 0;
}
// Output: Result: apple, banana, cherry |
function bytelist_js_to_java(bytelist) {
return Java.array('byte', bytelist)
// var base64Str = base64Encode(bytelist)
// var androidBase64 = Java.use('android.util.Base64')
// var bytesInJava = androidBase64.decode(base64Str, 0)
// return bytesInJava;
}
function bytelist_java_to_js(bytelist) {
var jsonString = Java.use('org.json.JSONArray').$new(bytelist).toString();
return JSON.parse(jsonString);
}
function decrypt(byteListInJs) {
return Promise(function (resolve, reject) {
try {
Java.perform(function () {
try {
var objCompanion = Java.use('com.stardust.autojs.engine.encryption.ScriptEncryption').class.getField("Companion").get(null)
objCompanion = Java.cast(objCompanion, Java.use('com.stardust.autojs.engine.encryption.ScriptEncryption$Companion'))
var byteListInJava = bytelist_js_to_java(byteListInJs)
var byteListRtnInJava = objCompanion.decrypt(byteListInJava, 8, byteListInJs.length)
var byteListRtnInJS = bytelist_java_to_js(byteListRtnInJava)
resolve(byteListRtnInJS);
} catch (e) {
reject(e)
}
})
} catch (e) {
reject(e)
}
});
}
function encrypt(byteListInJs) {
return Promise(function (resolve, reject) {
try {
Java.perform(function () {
try {
var objCompanion = Java.use('com.stardust.autojs.engine.encryption.ScriptEncryption').class.getField("Companion").get(null)
objCompanion = Java.cast(objCompanion, Java.use('com.stardust.autojs.engine.encryption.ScriptEncryption$Companion'))
var byteListInJava = bytelist_js_to_java(byteListInJs)
var byteListRtnInJava = objCompanion.encrypt(byteListInJava)
var byteListRtnInJS = bytelist_java_to_js(byteListRtnInJava)
resolve(byteListRtnInJS);
} catch (e) {
reject(e)
}
})
} catch (e) {
reject(e)
}
});
}
function get_project_path() {
return Promise(function (resolve, reject) {
try {
Java.perform(function () {
try {
var got = false;
Java.choose('android.app.ContextImpl', {
onMatch: function (instance) {
if (!got) {
got = true;
var filesDir = Java.cast(instance, Java.use('android.app.ContextImpl')).getFilesDir().getAbsolutePath();
resolve(filesDir);
}
return "stop";
},
onComplete: function () {
if(!got){
console.log("bug, unable to found any 'android.app.ContextImpl' instance!!!")
reject();
}
}
})
} catch (e) {
reject(e)
}
})
} catch (e) {
reject(e)
}
});
}
function write_file(full_path, data) {
var f = new File(full_path, 'w')
f.write(data)
f.close()
}
rpc.exports = {
"decrypt": decrypt,
"encrypt": encrypt,
"getprojectpath": get_project_path,
"writefile": write_file,
} |
<gh_stars>0
package etf.dotsandboxes.me170117d;
import java.io.*;
import java.util.ArrayList;
import java.util.Iterator;
public class GameState implements Cloneable {
enum Turn{
BLUE, RED, GRAY
}
private Turn currentTurn;
private GameScreen gameScreen;
private ArrayList<Move> moveList;
private boolean[][][] lines;
private Turn centers[][];
private GameConfig gc;
private int progressCnt = 0;
private int blueCenters = 0, redCenters = 0;
private boolean gameOver = false;
private int criticalCenters = 0;
public GameState(GameConfig gc) {
this.gc = gc;
currentTurn = gc.firstTurn;
moveList = new ArrayList<Move>();
lines = new boolean[2][gc.rowCnt+1][gc.colCnt+1];
centers = new Turn[gc.rowCnt][gc.colCnt];
}
@Override
public GameState clone() {
GameState ret = new GameState(gc);
for(Move m: moveList) {
ret.apply(m);
}
return ret;
}
public boolean getLine(int ort, int row, int col) {
return lines[ort][row][col];
}
public boolean gameOver() {
return gameOver;
}
public Turn getCurrentTurn() {
return currentTurn;
}
public void apply(Move move) {
moveList.add(move);
lines[move.getOrt()][move.getRow()][move.getCol()] = true;
if(gameScreen != null) {
gameScreen.setLine(move);
gameScreen.addMove(move);
}
boolean changeTurn = true;
if(move.getOrt() == Move.HORIZONTAL) {
if(move.getRow()>0) {
int row = getCenterRow(move.getOrt(), move.getRow(), move.getCol(), true);
int col = getCenterCol(move.getOrt(), move.getRow(), move.getCol(), true);
int linCnt = linesAround(row, col);
if(linCnt == 4) {
changeTurn = false;
occupy(row,col);
}else if(linCnt == 2) {
criticalCenters++;
}
}
if(move.getRow()<gc.rowCnt) {
int row = getCenterRow(move.getOrt(), move.getRow(), move.getCol(), false);
int col = getCenterCol(move.getOrt(), move.getRow(), move.getCol(), false);
int linCnt = linesAround(row, col);
if(linCnt == 4) {
changeTurn = false;
occupy(row,col);
}else if(linCnt == 2) {
criticalCenters++;
}
}
}
else {
if(move.getCol()>0) {
int row = getCenterRow(move.getOrt(), move.getRow(), move.getCol(), true);
int col = getCenterCol(move.getOrt(), move.getRow(), move.getCol(), true);
int linCnt = linesAround(row, col);
if(linCnt == 4) {
changeTurn = false;
occupy(row,col);
}else if(linCnt == 2) {
criticalCenters++;
}
}
if(move.getCol()<gc.colCnt) {
int row = getCenterRow(move.getOrt(), move.getRow(), move.getCol(), false);
int col = getCenterCol(move.getOrt(), move.getRow(), move.getCol(), false);
int linCnt = linesAround(row, col);
if(linCnt == 4) {
changeTurn = false;
occupy(row,col);
}else if(linCnt == 2) {
criticalCenters++;
}
}
}
if(changeTurn) {
currentTurn = (currentTurn==Turn.BLUE)?Turn.RED:Turn.BLUE;
if(gameScreen != null)
gameScreen.setTurn(currentTurn);
}
}
public boolean undo() {
boolean changeTurn = true;
Move move = moveList.remove(moveList.size()-1);
if(move.getOrt() == Move.HORIZONTAL) {
if(move.getRow()>0) {
int row = getCenterRow(move.getOrt(), move.getRow(), move.getCol(), true);
int col = getCenterCol(move.getOrt(), move.getRow(), move.getCol(), true);
int linCnt = linesAround(row, col);
if(linCnt == 4) {
changeTurn = false;
unoccupy(row,col);
}else if(linCnt == 2) {
criticalCenters--;
}
}
if(move.getRow()<gc.rowCnt) {
int row = getCenterRow(move.getOrt(), move.getRow(), move.getCol(), false);
int col = getCenterCol(move.getOrt(), move.getRow(), move.getCol(), false);
int linCnt = linesAround(row, col);
if(linCnt == 4) {
changeTurn = false;
unoccupy(row,col);
}else if(linCnt == 2) {
criticalCenters--;
}
}
}
else {
if(move.getCol()>0) {
int row = getCenterRow(move.getOrt(), move.getRow(), move.getCol(), true);
int col = getCenterCol(move.getOrt(), move.getRow(), move.getCol(), true);
int linCnt = linesAround(row, col);
if(linCnt == 4) {
changeTurn = false;
unoccupy(row,col);
}else if(linCnt == 2) {
criticalCenters--;
}
}
if(move.getCol()<gc.colCnt) {
int row = getCenterRow(move.getOrt(), move.getRow(), move.getCol(), false);
int col = getCenterCol(move.getOrt(), move.getRow(), move.getCol(), false);
int linCnt = linesAround(row, col);
if(linCnt == 4) {
changeTurn = false;
unoccupy(row,col);
}else if(linCnt == 2) {
criticalCenters--;
}
}
}
if(changeTurn) {
currentTurn = (currentTurn==Turn.BLUE)?Turn.RED:Turn.BLUE;
if(gameScreen != null)
gameScreen.setTurn(currentTurn);
}
lines[move.getOrt()][move.getRow()][move.getCol()] = false;
if(gameScreen != null) {
gameScreen.resetLine(move);
gameScreen.removeMove();
}
return !changeTurn;
}
private void unoccupy(int row, int col) {
centers[row][col]= Turn.GRAY;
if(gameScreen != null)
gameScreen.setCenter(row, col, Turn.GRAY);
progressCnt--;
if(currentTurn == Turn.BLUE) {
blueCenters--;
}else {
redCenters--;
}
gameOver = false;
}
private void occupy(int row, int col) {
centers[row][col]=currentTurn;
if(gameScreen != null)
gameScreen.setCenter(row, col, currentTurn);
progressCnt++;
if(currentTurn == Turn.BLUE) {
blueCenters++;
}else {
redCenters++;
}
if(progressCnt == gc.colCnt*gc.rowCnt)
gameOver = true;
}
public static int getCenterRow(int ort, int i, int j, boolean upOrLeft) {
if(ort == Move.HORIZONTAL && upOrLeft)
return i-1;
return i;
}
public static int getCenterCol(int ort, int i, int j, boolean upOrLeft) {
if(ort == Move.VERTICAL && upOrLeft)
return j-1;
return j;
}
public int linesAround(int i, int j) {
return (lines[Move.HORIZONTAL][i][j]?1:0) +
(lines[Move.HORIZONTAL][i+1][j]?1:0) +
(lines[Move.VERTICAL][i][j]?1:0) +
(lines[Move.VERTICAL][i][j+1]?1:0);
}
public void setGameScreen(GameScreen gameScreen) {
this.gameScreen = gameScreen;
}
public void saveGameState(String path) {
File file = new File(path);
try {
BufferedWriter writer = new BufferedWriter(new FileWriter(file));
String head = "" + gc.rowCnt + " " + gc.colCnt;
writer.write(head);
writer.newLine();
for(Move m : moveList) {
writer.write(m.toString());
if(moveList.get(moveList.size()-1) != m)
writer.newLine();
}
writer.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public void setState(ArrayList<Move> moves) {
for(Move m: moves) {
apply(m);
}
}
public GameConfig getConfig() {
return gc;
}
public int getBlueCenters() {
return blueCenters;
}
public int getRedCenters() {
return redCenters;
}
public boolean isTunnelPhase() {
return criticalCenters == gc.colCnt*gc.rowCnt;
}
public int getTurnCount() {
return moveList.size();
}
}
|
def separate_even_odd(nums):
evens = []
odds = []
for num in nums:
if num % 2 == 0:
evens.append(num)
else:
odds.append(num)
return evens + odds
print(separate_even_odd([1, 2, 3, 4, 5, 6, 7, 8])) |
import * as moment from 'moment-timezone';
import * as path from 'path';
import { BOT_DESCRIPTION, GITHUB_LINK } from '../helpers/consts';
import { embedFactory } from '../services/EmbedFactoryService';
import { redisCollectorService } from '../services/RedisCollectorService';
import { Command } from '../types';
const group = path.parse(__filename).name;
const help: Command = {
name: 'help',
group,
aliases: ['commands', 'h'],
description: 'Prints out this message.',
execute(message) {
const { user } = message.client;
const orderedCommands: { [key: string]: Array<Command> } = {};
const commands: { [key: string]: Array<Command> } = { uncategorized: [] };
message.client.commands!.forEach((c) => {
if (c.group) {
c.group in commands
? commands[c.group].push(c)
: commands[c.group] = [c];
} else {
commands.uncategorized.push(c);
}
});
Object.keys(commands).sort().forEach((key) => {
const capitalizedKey = key.charAt(0).toUpperCase() + key.slice(1);
orderedCommands[capitalizedKey] = commands[key]
.filter((c) => !c.hidden)
.sort((a, b) => (a.name > b.name ? 1 : -1));
});
const embed = embedFactory.getEmbedBase(user, `${user?.username} commands list`)
.setThumbnail(user?.avatarURL({ dynamic: true }) || '')
.setDescription(BOT_DESCRIPTION);
const prefix = process.env.BOT_PREFIX;
Object.keys(orderedCommands).forEach((k) => {
if (orderedCommands[k].length) {
embed.addField('Group', `**${k}**`);
orderedCommands[k].forEach((c) => {
const commandName = c.aliases
? `${prefix}${c.name} [${c.aliases.join(', ')}]`
: `${prefix}${c.name}`;
embed.addField(commandName, c.description, true);
});
}
});
return message.channel.send(embed);
},
};
const ping: Command = {
name: 'ping',
group,
description: 'Ping!',
async execute(message) {
const msg = await message.channel.send(embedFactory.getEmbedBase(message.client.user, 'Pong!').setColor('GREEN'));
const pingTime = moment(msg.createdTimestamp).diff(moment(message.createdTimestamp));
const replyEmbed = embedFactory.getEmbedBase(message.client.user, 'Pong!')
.setDescription(
`:hourglass: Message ping: ${pingTime}ms\n`
+ `:heartbeat: Websocket ping: ${message.client.ws.ping}ms`,
)
.setColor('GREEN')
.setTimestamp();
return msg.edit(replyEmbed);
},
};
const invite: Command = {
name: 'invite',
group,
description: 'Retrieves bot invite.',
async execute(message) {
// Returns old API link since lib is not updated it yet
return message.channel.send(`<${await message.client.generateInvite()}>`);
},
};
const info: Command = {
name: 'info',
aliases: ['about'],
group,
description: 'Prints bot info.',
async execute(message) {
const { user } = message.client;
const infoEmbed = embedFactory.getEmbedBase(user, 'Source code')
.setURL(GITHUB_LINK)
.setThumbnail(user?.avatarURL({ dynamic: true }) || '')
.setDescription(
`${BOT_DESCRIPTION}\n`
+ 'Check out real time counter [here](https://ayamedespair.com).\n'
+ `[Invite this bot to your server](${await message.client.generateInvite()}).`,
)
.addField('Users known', `${message.client.users.cache.size}`, true)
.addField('Guilds known', `${message.client.guilds.cache.size}`, true)
.addField('Commands executed', `${await redisCollectorService.getKeyValue('commands')}`, true);
return message.channel.send(infoEmbed);
},
};
export default [help, ping, invite, info];
|
<filename>src/main/java/org/infinispan/persistence/cloud/logging/Log.java
/*
* JBoss, Home of Professional Open Source.
* Copyright 2000 - 2011, Red Hat Middleware LLC, and individual contributors
* as indicated by the @author tags. See the copyright.txt file in the
* distribution for a full listing of individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.persistence.cloud.logging;
import org.infinispan.commons.CacheConfigurationException;
import org.jboss.logging.BasicLogger;
import org.jboss.logging.Logger.Level;
import org.jboss.logging.annotations.LogMessage;
import org.jboss.logging.annotations.Message;
import org.jboss.logging.annotations.MessageLogger;
/**
* Log abstraction for the cloud cache store. For this module, message ids
* ranging from 7001 to 8000 inclusively have been reserved.
*
* @author <NAME>
* @author <NAME>
* @since 7.2
*/
@MessageLogger(projectCode = "ISPN")
public interface Log extends BasicLogger {
@Message(value = "Provider not specified", id = 7001)
CacheConfigurationException providerNotSpecified();
@Message(value = "Location not specified", id = 7002)
CacheConfigurationException locationNotSpecified();
@Message(value = "Identity not specified", id = 7003)
CacheConfigurationException identityNotSpecified();
@Message(value = "Credential not specified", id = 7004)
CacheConfigurationException credentialNotSpecified();
@Message(value = "Container not specified", id = 7005)
CacheConfigurationException containerNotSpecified();
@LogMessage(level = Level.INFO)
@Message(value = "Container hasn't been created yet, waiting...", id = 7006)
void waitingForContainer();
@LogMessage(level = Level.WARN)
@Message(value = "The 'overrides' attribute is no longer supported. Use <property> elements instead", id = 7007)
void overridesRemoved();
}
|
package contract;
import tx.Data;
public class DataMessage implements Message{
private byte[] sender;
private String methodName;
private Class[] parameterTypes;
private Object[] inputs;
public DataMessage(byte[] sender, Data data) {
this.sender = sender;
this.methodName = data.getMethodName();
this.parameterTypes = data.getParameterTypes();
this.inputs = data.getInputs();
}
@Override
public byte[] getSender() {
return sender;
}
@Override
public String getMethodName() {
return methodName;
}
@Override
public Class[] getParameterTypes() {
return parameterTypes;
}
@Override
public Object[] getInputs() {
return inputs;
}
}
|
set -e
# Set file name prefixes
TRANSCRIPTS_PREFIX="1kg_NA12878_gencode100"
GRAPHS_PREFIX="1kg_NA12878_exons_gencode100_allpaths"
READS_PREFIX="ENCSR000AED_rep1"
EXPRESSION_PREFIX="${READS_PREFIX}_uni"
PROFILE_PREFIX="${TRANSCRIPTS_PREFIX}_${EXPRESSION_PREFIX}"
OUT_PREFIX="sim_${PROFILE_PREFIX}_vg_r1_h1"
# Download graph
aws s3 cp s3://vg-k8s/users/jsibbesen/vgrna/benchmark/whole_genome/data/graphs/${GRAPHS_PREFIX}/${GRAPHS_PREFIX}.xg . --no-progress
# Download haplotype-specific transcript summary
aws s3 cp s3://vg-k8s/users/jsibbesen/vgrna/benchmark/whole_genome/data/graphs/${GRAPHS_PREFIX}/ . --recursive --exclude "*" --include "*.txt.gz" --no-progress
# Download reads
aws s3 cp s3://encode-public/2013/06/13/c653a32e-e618-42b1-b8b8-b3b838847b97/ENCFF001REK.fastq.gz . --no-sign-request --no-progress
aws s3 cp s3://encode-public/2013/06/13/efa1a02d-6b43-4635-9ef8-d2d78c527839/ENCFF001REJ.fastq.gz . --no-sign-request --no-progress
# Download expression profile
aws s3 cp s3://vg-k8s/users/jsibbesen/vgrna/benchmark/whole_genome/data/reads/sim/${TRANSCRIPTS_PREFIX}/${EXPRESSION_PREFIX}/${PROFILE_PREFIX}.isoforms.results . --no-progress
# Extract haplotype-specific expression
/usr/bin/time -v bash -c "zcat */*.txt.gz | grep NA12878 | grep _0_ | cut -f1 | grep -v Name > haplotype_transcripts.txt; echo transcript_id >> haplotype_transcripts.txt; wc -l haplotype_transcripts.txt; grep -F -f haplotype_transcripts.txt ${PROFILE_PREFIX}.isoforms.results > ${PROFILE_PREFIX}_haplotype.isoforms.results; wc -l ${PROFILE_PREFIX}_haplotype.isoforms.results"
# Subset read pairs
/usr/bin/time -v bash -c "seqtk sample -s ${SEED} ENCFF001REK.fastq.gz 10000000 > ${READS_PREFIX}_subset_1.fq; seqtk sample -s ${SEED} ENCFF001REJ.fastq.gz 10000000 > ${READS_PREFIX}_subset_2.fq; wc -l ${READS_PREFIX}_subset_1.fq; wc -l ${READS_PREFIX}_subset_2.fq"
# Simulate reads
/usr/bin/time -v bash -c "vg sim -r -t ${CPU} -x ${GRAPHS_PREFIX}.xg -F ${READS_PREFIX}_subset_1.fq -F ${READS_PREFIX}_subset_2.fq -T ${PROFILE_PREFIX}_haplotype.isoforms.results -n ${NREADS} -s ${SEED} -d 0.001 -p 216 -v 24 -a -E ${OUT_PREFIX}.txt > ${OUT_PREFIX}.gam"
# Extract simulated reads from alignments
/usr/bin/time -v bash -c "vg view -a -X ${OUT_PREFIX}.gam > ${OUT_PREFIX}.fq; wc -l ${OUT_PREFIX}.fq"
# De-interleave simulated reads (https://gist.github.com/nathanhaigh/3521724)
/usr/bin/time -v bash -c 'cat '"${OUT_PREFIX}"'.fq | paste - - - - - - - - | tee >(cut -f 1-4 | tr "\t" "\n" > '"${OUT_PREFIX}"'_1.fq) | cut -f 5-8 | tr "\t" "\n" > '"${OUT_PREFIX}"'_2.fq; wc -l '"${OUT_PREFIX}"'_1.fq; wc -l '"${OUT_PREFIX}"'_2.fq'
# Compress simulated reads and read info
/usr/bin/time -v bash -c "gzip ${OUT_PREFIX}_1.fq; gzip ${OUT_PREFIX}_2.fq; gzip ${OUT_PREFIX}.txt"
# Upload simulated reads and alignments
aws s3 sync . s3://vg-k8s/users/jsibbesen/vgrna/benchmark/whole_genome/data/reads/sim/${TRANSCRIPTS_PREFIX}/${EXPRESSION_PREFIX}/vg_r1/ --exclude "*" --include "${OUT_PREFIX}*" --exclude "${OUT_PREFIX}.fq" --no-progress
|
#!/bin/bash
./download_wikipedia_sparse.sh
./download_wikipedia_lsi128.sh
./download_cayton.sh
./download_colors.sh
./genunif.py -d 64 -n 500000 -o unif64.txt
|
#!/bin/sh
ghc Tests.hs -package ghc -dynamic && /usr/bin/time -f "%U" ./Tests
|
package io.trillo.example;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import io.trillo.example.datasource.MemoryDataSource;
@SpringBootApplication
public class TrilloExampleServer {
private static final Logger logger = LoggerFactory.getLogger(TrilloExampleServer.class);
public static void main(String[] args) {
MemoryDataSource.createInstance();
SpringApplication app = new SpringApplication(TrilloExampleServer.class);
app.run(args);
String workingDir = System.getProperty("user.dir");
logger.info("Current working directory : " + workingDir);
}
}
|
/*
* 深圳小旺网络科技有限公司
*/
package com.base.utils;
import android.annotation.SuppressLint;
import android.content.ClipData;
import android.content.Context;
import android.os.Build;
/**
* 剪切板相关工具类
*/
@SuppressLint("NewApi")
public class ClipboardUtil {
public static void setText(Context context, String content) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB) {
android.content.ClipboardManager cm = (android.content.ClipboardManager) context
.getSystemService(Context.CLIPBOARD_SERVICE);
cm.setPrimaryClip(ClipData.newPlainText(null, content));
} else {
android.text.ClipboardManager cm = (android.text.ClipboardManager) context
.getSystemService(Context.CLIPBOARD_SERVICE);
cm.setText(content);
}
}
public static String getText(Context context, String content) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB) {
android.content.ClipboardManager cm = (android.content.ClipboardManager) context
.getSystemService(Context.CLIPBOARD_SERVICE);
if (cm.hasPrimaryClip()) {
if (cm.getPrimaryClip().getItemCount() > 0) {
return cm.getPrimaryClip().getItemAt(0).getText().toString();
}
}
} else {
android.text.ClipboardManager cm = (android.text.ClipboardManager) context
.getSystemService(Context.CLIPBOARD_SERVICE);
if (cm.hasText()) {
return cm.getText().toString();
}
}
return null;
}
}
|
#!/bin/bash
echo "Starting verification of Slack example"
# Prep
failed=0 # Any failure will set this to 1
targetdir=${0%/*} # get script's own directory
cd $targetdir
source ../../scripts/example-test-utils.sh
# Prep
cleanLogs
# Check for the Slack token file. Without it, no point in proceeding.
if [ ! -f .slack.token ]; then
echo "Slack token not found, cannot run Slack demo"
failed=1
fi
# Build
npm -s uninstall slack # spurious errors happen if we don't force this
quark install slack.q --java
quark install slack.q --javascript
quark install slack.q --python
mvn -q compile
testProcessOutput \
"Python Slack client" \
"python -u bot.py" \
3 \
log/py-slack.log \
"slack.event.SlackEvent object at"
testProcessOutput \
"JavaScript Slack client" \
"node bot.js" \
3 \
log/js-slack.log \
"Hello {"
testProcessOutput \
"Java Slack client" \
"mvn exec:java -Dexec.mainClass=bot.SlackBot" \
8 \
log/j-slack.log \
"WebSocket Client connected!"
echo "*********************"
if [ $failed == "1" ]
then
echo Slack example: FAILED
else
echo Slack example: PASSED
fi
echo "*********************"
# Exit with status so outer scripts can interpret the
# overall example result
exit $failed
|
#!/usr/bin/env bash
#
# Postprocess scaffold
#
# Include ./functions.bash
source .anax/scaffold/functions.bash
# Install using composer
composer install --no-dev
# Get scaffolding scripts from Anax Lite, Anax Flat, Anax design me
rsync -a vendor/anax/anax-lite/.anax/scaffold/postprocess.d .anax/scaffold/
rsync -a vendor/anax/anax-flat/.anax/scaffold/postprocess.d .anax/scaffold/
rsync -a vendor/anax/anax-design-me/.anax/scaffold/postprocess.d .anax/scaffold/
# Run scaffolding scripts
echo -n "Processing scaffolding scripts: "
for file in .anax/scaffold/postprocess.d/*.bash; do
echo -n "."
bash "$file"
done
echo " done."
|
<filename>src/views/sonar/SonarLogin.js
import React from 'react';
import { connect } from 'react-redux';
import * as Yup from 'yup';
import { Formik } from 'formik';
import {
Box,
Button,
Container,
InputAdornment,
TextField,
Typography,
makeStyles
} from '@material-ui/core';
import FingerprintIcon from '@material-ui/icons/Fingerprint';
import SonarQubeIcon from 'src/assets/icons/SonarQubeIcon';
import { sonarCreator } from "src/redux/actions/Sonar/creator";
import { config } from "src/services/config.js";
const useStyles = makeStyles((theme) => ({
root: {
height: '100%',
overflow: 'hidden',
backgroundColor: theme.palette.background.dark,
border: "4px solid " + theme.palette.primary.main,
borderRadius: theme.spacing(3),
paddingBottom: theme.spacing(3),
paddingTop: theme.spacing(3)
}
}));
function SonarLogin(props){
const classes = useStyles();
return (
<Container className={classes.root} maxWidth="sm">
<Formik
initialValues={{
token: ''
}}
validationSchema={Yup.object().shape({
token: Yup.string().max(40).required('Personal Token is required'),
})}
onSubmit={(values,act) => {
props.login(values).then(act.setSubmitting(false));
}}
>
{({
errors,
handleBlur,
handleChange,
handleSubmit,
isSubmitting,
touched,
values
}) => (
<form onSubmit={handleSubmit}>
<Box mb={3}>
<Typography
color="textPrimary"
variant="h2"
>
SonarQube Sign in
</Typography>
</Box>
<TextField
error={Boolean(touched.token && errors.token)}
fullWidth
label="Token"
margin="normal"
name="token"
onBlur={handleBlur}
onChange={handleChange}
value={values.token}
variant="outlined"
InputProps={{
startAdornment: (
<InputAdornment position="start">
<FingerprintIcon />
</InputAdornment>
),
}}
/>
<Box my={2}>
<Button
color="primary"
disabled={isSubmitting}
fullWidth
size="large"
type="submit"
variant="contained"
>
Sign in now
</Button>
</Box>
<Typography
color="textSecondary"
variant="body1"
>
Don't have a Token? Request it on
{' '}
<Button
variant="contained"
color="secondary"
onClick={() => window.location = `${config.URL}/sonarqube`}
startIcon={<SonarQubeIcon/>}
>
SonarQube
</Button>
</Typography>
</form>
)}
</Formik>
</Container>
);
}
const actions = {
login: sonarCreator.login
}
export default connect(null, actions)(SonarLogin);
|
#!/usr/bin/env bash
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# GCS utility functions
#
# This is intended to be very general-purpose and "low-level". Higher-level
# policy does not belong here.
#
# This MUST NOT be used directly. Source it via lib.sh instead.
# Ensure that a bucket exists
# $1: The GCP project
# $2: The bucket (e.g. gs://bucket-name)
function _ensure_gcs_bucket() {
if [ $# -lt 2 ] || [ -z "$1" ] || [ -z "$2" ]; then
echo "_ensure_gcs_bucket(project, bucket) requires 2 arguments" >&2
return 1
fi
local project="$1"
local bucket="$2"
local location="us"
if ! gsutil ls "${bucket}" >/dev/null 2>&1; then
gsutil mb -p "${project}" -l "${location}" "${bucket}"
fi
if ! gsutil bucketpolicyonly get "${bucket}" | grep -q "Enabled: True"; then
gsutil bucketpolicyonly set on "${bucket}"
fi
}
# Ensure the bucket exists and is world-readable
# $1: The GCP project
# $2: The bucket (e.g. gs://bucket-name)
function ensure_public_gcs_bucket() {
if [ $# -lt 2 ] || [ -z "$1" ] || [ -z "$2" ]; then
echo "ensure_public_gcs_bucket(project, bucket) requires 2 arguments" >&2
return 1
fi
local project="$1"
local bucket="$2"
_ensure_gcs_bucket "${project}" "${bucket}"
ensure_gcs_role_binding "${bucket}" "allUsers" "objectViewer"
}
# Set up logging
# $1: The GCS bucket (e.g. gs://k8s-infra-foo)
function ensure_gcs_bucket_logging() {
if [ $# != 1 ] || [ -z "$1" ]; then
echo "ensure_gcs_bucket_logging(bucket) requires 1 argument" >&2
return 1
fi
local bucket="$1"
local intent="${TMPDIR}/gcs-bucket-logging.intent.yaml"
local before="${TMPDIR}/gcs-bucket-logging.before.yaml"
local after="${TMPDIR}/gcs-bucket-logging.after.yaml"
echo "{\"logBucket\": \"${K8S_INFRA_GCSLOGS_BUCKET}\", \"logObjectPrefix\": \"$bucket\"}" > "${intent}"
gsutil logging get "${bucket}"> "${before}"
if ! diff "${intent}" "${before}" >/dev/null; then
gsutil logging set on -b "${K8S_INFRA_GCSLOGS_BUCKET}" -o "${bucket#gs://}" "${bucket}"
gsutil logging get "${bucket}" > "${after}"
diff_colorized "${before}" "${after}"
fi
}
# Ensure the bucket exists and is NOT world-accessible
# $1: The GCP project
# $2: The bucket (e.g. gs://bucket-name)
function ensure_private_gcs_bucket() {
if [ $# -lt 2 ] || [ -z "$1" ] || [ -z "$2" ]; then
echo "ensure_private_gcs_bucket(project, bucket) requires 2 arguments" >&2
return 1
fi
local project="$1"
local bucket="$2"
_ensure_gcs_bucket "${project}" "${bucket}"
ensure_removed_gcs_role_binding "${bucket}" "allUsers" "objectViewer"
# roles/storage.legacyBucketOwner contains storage.buckets.setIamPolicy,
# but not storage.objects.get. This means someone with roles/editor on the
# project could grant themselves access to read bucket contents that they
# aren't supposed to be able to read.
#
# Given that roles/editor has no *.setIamPolicy permissions for other
# service resources, this seems like a security gap that should be closed.
#
# Ideally we would do this in _ensure_gcs_bucket. However, removing this
# role means removing other (possibly needed) permissions that may be used
# by GCP service agent service accounts (e.g. App Engine, GCR, GCE):
# - storage.buckets.get
# - storage.multipartUploads.(abort|create|list|listParts)
# - storage.objects.(create|delete|list)
#
# So until we have time to research what buckets those service agents
# specifically need access to, we'll leave them alone and constrain this
# policy to "private" gcs buckets that are currently only used by humans
# to store terraform state containing potentially sensitive info
ensure_removed_gcs_role_binding "${bucket}" "projectEditor:${project}" "legacyBucketOwner"
}
# Sets the web policy on the bucket, including a default index.html page
# $1: The bucket (e.g. gs://bucket-name)
function ensure_gcs_web_policy() {
if [ $# -lt 1 ] || [ -z "$1" ]; then
echo "ensure_gcs_web_policy(bucket) requires 1 argument" >&2
return 1
fi
local bucket="$1"
gsutil web set -m index.html "${bucket}"
}
# Copies any static content into the bucket
# $1: The bucket (e.g. gs://bucket-name)
# $2: The source directory
function upload_gcs_static_content() {
if [ $# -lt 2 ] || [ -z "$1" ] || [ -z "$2" ]; then
echo "upload_gcs_static_content(bucket, dir) requires 2 arguments" >&2
return 1
fi
local bucket="$1"
local srcdir="$2"
# Checksum data to avoid no-op syncs.
gsutil rsync -c "${srcdir}" "${bucket}"
}
# Ensure the bucket retention policy is set
# $1: The GCS bucket (e.g. gs://bucket-name)
# $2: The retention
function ensure_gcs_bucket_retention() {
if [ $# -lt 2 ] || [ -z "$1" ] || [ -z "$2" ]; then
echo "ensure_gcs_bucket_retention(bucket, retention) requires 2 arguments" >&2
return 1
fi
local bucket="$1"
local retention="$2"
gsutil retention set "${retention}" "${bucket}"
}
# Ensure the bucket auto-deletion policy is set
# $1: The GCS bucket (e.g. gs://bucket-name)
# $2: The auto-deletion policy
function ensure_gcs_bucket_auto_deletion() {
if [ $# -lt 2 ] || [ -z "$1" ] || [ -z "$2" ]; then
echo "ensure_gcs_bucket_auto_deletion(bucket, auto_delettion_days) requires 2 arguments" >&2
return 1
fi
local bucket="$1"
local auto_deletion_days="$2"
local intent="${TMPDIR}/gcs-lifecycle.intent.yaml"
local before="${TMPDIR}/gcs-lifecycle.before.yaml"
local after="${TMPDIR}/gcs-lifecycle.after.yaml"
echo "{\"rule\": [{\"action\": {\"type\": \"Delete\"}, \"condition\": {\"age\": ${auto_deletion_days}}}]}" > "${intent}"
gsutil lifecycle get "${bucket}"> "${before}"
if ! diff "${intent}" "${before}" >/dev/null; then
gsutil lifecycle set "${intent}" "${bucket}"
gsutil lifecycle get "${bucket}" > "${after}"
diff_colorized "${before}" "${after}"
fi
}
# Grant write privileges on a bucket to a principal
# $1: The principal (group:<g> or serviceAccount:<s> or ...)
# $2: The bucket (e.g. gs://bucket-name)
function _empower_principal_to_write_gcs_bucket() {
if [ $# -lt 2 ] || [ -z "$1" ] || [ -z "$2" ]; then
echo "_empower_principal_to_write_gcs_bucket(principal, bucket) requires 2 arguments" >&2
return 1
fi
local principal="$1"
local bucket="$2"
ensure_gcs_role_binding "${bucket}" "${principal}" "objectAdmin"
ensure_gcs_role_binding "${bucket}" "${principal}" "legacyBucketWriter"
}
# Grant admin privileges on a bucket to a principal
# $1: The principal (group:<g> or serviceAccount:<s> or ...)
# $2: The bucket (e.g. gs://bucket-name)
function _empower_principal_to_admin_gcs_bucket() {
if [ $# -lt 2 ] || [ -z "$1" ] || [ -z "$2" ]; then
echo "_empower_principal_to_admin_gcs_bucket(principal, bucket) requires 2 arguments" >&2
return 1
fi
local principal="$1"
local bucket="$2"
ensure_gcs_role_binding "${bucket}" "${principal}" "objectAdmin"
ensure_gcs_role_binding "${bucket}" "${principal}" "legacyBucketOwner"
}
# Ensure that IAM binding is present for the given gcs bucket
# Arguments:
# $1: The bucket (e.g. "gs://k8s-infra-foo"
# $2: The principal (e.g. "group:k8s-infra-foo@kubernetes.io", "allUsers", etc.)
# $3: The role name (e.g. "objectAdmin", "legacyBucketOwner", etc.)
ensure_gcs_role_binding() {
if [ ! $# -eq 3 ] || [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then
echo "ensure_gcs_role_binding(bucket, principal, role) requires 3 arguments" >&2
return 1
fi
local bucket="${1}"
local principal="${2}"
local role="${3}"
# use json for gsutil iam set, yaml for easier diffing
local before_json="${TMPDIR}/gsutil-iam-get.before.json"
local after_json="${TMPDIR}/gsutil-iam-get.after.json"
local before_yaml="${TMPDIR}/gcs-role-binding.before.yaml"
local after_yaml="${TMPDIR}/gcs-role-binding.after.yaml"
gsutil iam get "${bucket}" > "${before_json}"
<"${before_json}" yq -y | _format_iam_policy > "${before_yaml}"
# Avoid calling `gsutil iam set` if we can, to reduce output noise
if ! <"${before_yaml}" yq --exit-status \
".[] | select(contains({role: \"${role}\", member: \"${principal}\"}))" \
>/dev/null; then
# add the binding, then merge with existing bindings
# shellcheck disable=SC2016 # jq uses $foo for variables
<"${before_json}" yq --arg role "${role}" --arg principal "${principal}" \
'.bindings |= (
. + [{
members: [$principal],
role: ("roles/storage." + $role)
}]
| group_by(.role)
| map({
members: map(.members) | flatten | unique,
role: .[0].role
})
)' > "${after_json}"
gsutil iam set "${after_json}" "${bucket}"
<"${after_json}" yq -y | _format_iam_policy > "${after_yaml}"
diff_colorized "${before_yaml}" "${after_yaml}"
fi
}
# Ensure that IAM binding is removed for the given gcs bucket
# Arguments:
# $1: The bucket (e.g. "gs://k8s-infra-foo"
# $2: The principal (e.g. "group:k8s-infra-foo@kubernetes.io", "allUsers", etc.)
# $3: The role name (e.g. "objectAdmin", "legacyBucketOwner", etc.)
ensure_removed_gcs_role_binding() {
if [ ! $# -eq 3 ] || [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then
echo "ensure_removed_gcs_role_binding(bucket, principal, role) requires 3 arguments" >&2
return 1
fi
local bucket="${1}"
local principal="${2}"
local role="${3}"
# use json for gsutil iam set, yaml for easier diffing
local before_json="${TMPDIR}/gsutil-iam-get.before.json"
local after_json="${TMPDIR}/gsutil-iam-get.after.json"
local before_yaml="${TMPDIR}/gcs-role-binding.before.yaml"
local after_yaml="${TMPDIR}/gcs-role-binding.after.yaml"
gsutil iam get "${bucket}" > "${before_json}"
<"${before_json}" yq -y | _format_iam_policy > "${before_yaml}"
# Avoid calling `gsutil iam set` if we can, to reduce output noise
if <"${before_yaml}" yq --exit-status \
".[] | select(contains({role: \"${role}\", member: \"${principal}\"}))" \
>/dev/null; then
# remove member from role if it exists; gcs deletes bindings with no
# members, so we don't need to bother pruning them here
# shellcheck disable=SC2016 # jq uses $foo for variables
<"${before_json}" yq --arg role "${role}" --arg principal "${principal}" \
'.bindings |= map(
if .role == ("roles/storage." + $role) then
.members -= [$principal]
else
.
end
)' > "${after_json}"
gsutil iam set "${after_json}" "${bucket}"
<"${after_json}" yq -y | _format_iam_policy > "${after_yaml}"
diff_colorized "${before_yaml}" "${after_yaml}"
fi
}
# Grant write privileges on a bucket to a group
# $1: The googlegroups group email
# $2: The bucket (e.g. gs://bucket-name)
function empower_group_to_write_gcs_bucket() {
if [ $# -lt 2 ] || [ -z "$1" ] || [ -z "$2" ]; then
echo "empower_group_to_write_gcs_bucket(group_email, bucket) requires 2 arguments" >&2
return 1
fi
local group="$1"
local bucket="$2"
_empower_principal_to_write_gcs_bucket "group:${group}" "${bucket}"
}
# Grant admin privileges on a bucket to a group
# $1: The googlegroups group email
# $2: The bucket (e.g. gs://bucket-name)
function empower_group_to_admin_gcs_bucket() {
if [ $# -lt 2 ] || [ -z "$1" ] || [ -z "$2" ]; then
echo "empower_group_to_admin_gcs_bucket(group_email, bucket) requires 2 arguments" >&2
return 1
fi
local group="$1"
local bucket="$2"
_empower_principal_to_admin_gcs_bucket "group:${group}" "${bucket}"
}
# Grant write privileges on a bucket to a service account
# $1: The service account email
# $2: The bucket (e.g. gs://bucket-name)
function empower_svcacct_to_write_gcs_bucket() {
if [ $# -lt 2 ] || [ -z "$1" ] || [ -z "$2" ]; then
echo "empower_svcacct_to_write_gcs_bucket(svcacct_email, bucket) requires 2 arguments" >&2
return 1
fi
local svcacct="$1"
local bucket="$2"
_empower_principal_to_write_gcs_bucket "serviceAccount:${svcacct}" "${bucket}"
}
# Grant admin privileges on a bucket to a service account
# $1: The service account email
# $2: The bucket (e.g. gs://bucket-name)
function empower_svcacct_to_admin_gcs_bucket() {
if [ $# -lt 2 ] || [ -z "$1" ] || [ -z "$2" ]; then
echo "empower_svcacct_to_admin_gcs_bucket(svcacct_email, bucket) requires 2 arguments" >&2
return 1
fi
local svcacct="$1"
local bucket="$2"
_empower_principal_to_admin_gcs_bucket "serviceAccount:${svcacct}" "${bucket}"
}
|
<gh_stars>0
//check object had properties
let object1={
x:0,
y:3,
z:2
};
console.log(Object.keys(object1));
//object assign function copies properties of an object to another
Object.assign(object1,{a:7,x:3});
console.log(object1);
//jacques objects
let journal = [
{events: ["work", "touched tree", "pizza",
"running", "television"],
squirrel: false},
{events: ["work", "ice cream", "cauliflower",
"lasagna", "touched tree", "brushed teeth"],
squirrel: false},
{events: ["weekend", "cycling", "break", "peanuts",
"beer"],
squirrel: true},
/* and so on... */
];
console.log(typeof journal);
console.log(journal);
console.log("journal 0 element:",journal[0]);
const score = {visitors: 0, home: 0};
// This is okay
score.visitors = 1;
// This isn't allowed
score = {visitors: 1, home: 1}; |
<reponame>rovedit/Fort-Candle
#pragma once
#include <clean-core/assert.hh>
#include <clean-core/cursor.hh>
namespace cc
{
template <class RangeFrom, class RangeTo>
constexpr void copy(RangeFrom const& from, RangeTo& to)
{
auto c_from = cc::to_cursor(from);
auto c_to = cc::to_cursor(to);
while (c_from)
{
CC_CONTRACT(c_to);
*c_to = *c_from;
++c_to, ++c_from;
}
}
template <class Range, class T>
constexpr void fill(Range& range, T const& value)
{
auto c = cc::to_cursor(range);
while (c)
{
*c = value;
++c;
}
}
template <class RangeA, class RangeB>
[[nodiscard]] constexpr bool are_ranges_equal(RangeA const& a, RangeB const& b)
{
if (a.size() != b.size())
return false;
auto ca = cc::to_cursor(a);
auto cb = cc::to_cursor(b);
while (ca)
{
if (*ca != *cb)
return false;
++ca, ++cb;
}
return true;
}
template <class RangeA, class RangeB>
[[nodiscard]] constexpr bool are_ranges_unequal(RangeA const& a, RangeB const& b)
{
if (a.size() != b.size())
return true;
auto ca = cc::to_cursor(a);
auto cb = cc::to_cursor(b);
while (ca)
{
if (*ca != *cb)
return true;
++ca, ++cb;
}
return false;
}
}
|
///////////////////////////////////////////////////////////////////////////////
/// \file switch.hpp
/// A generalization of the conditional_compiler. Given N different compilers
/// in a MPL-style map and a lambda that generates a key from an expression,
/// find the compiler in the map corresponding to the key and use that compiler
/// to compile the expression.
//
// Copyright 2007 <NAME>. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_PROTO_V1_COMPILER_SWITCH_HPP_EAN_04_01_2005
#define BOOST_PROTO_V1_COMPILER_SWITCH_HPP_EAN_04_01_2005
#include <boost/mpl/at.hpp>
#include <boost/xpressive/proto/v1_/proto_fwd.hpp>
namespace boost { namespace proto1
{
///////////////////////////////////////////////////////////////////////////////
// switch_compiler
// applies a transform, then looks up the appropriate compiler in a map
template<typename Lambda, typename Map>
struct switch_compiler
{
template<typename Op, typename State, typename Visitor>
struct apply
{
typedef typename boost::mpl::at
<
Map
, typename Lambda::BOOST_NESTED_TEMPLATE apply<Op, State, Visitor>::type
>::type compiler_type;
typedef typename compiler_type::BOOST_NESTED_TEMPLATE apply
<
Op
, State
, Visitor
>::type type;
};
template<typename Op, typename State, typename Visitor>
static typename apply<Op, State, Visitor>::type
call(Op const &op, State const &state, Visitor &visitor)
{
typedef typename apply<Op, State, Visitor>::compiler_type compiler_type;
return compiler_type::call(op, state, visitor);
}
};
}}
#endif
|
#!/bin/sh
VER=`grep "__version__ = " perftracker_cp_crawler/__init__.py | cut -d "\"" -f 2`
echo "\n###### run the following commands manually ######\n"
echo "vim perftracker_cp_crawler/__init__.py # update version"
echo "vim python2-perftracker-cp-crawler.spec # update version"
echo "vim python36-perftracker-cp-crawler.spec # update version"
echo "git commit -m \"bump version to $VER\" perftracker_cp_crawler/__init__.py *.spec && git tag \"v$VER\" && git push origin --tags"
echo python3 setup.py sdist bdist_wheel
echo twine upload dist/perftracker-cp-crawler-$VER.tar.gz
echo git push
echo
|
<reponame>dmepilot/denver_zoo_cli
class DenverZooCli::Animal
attr_accessor :url, :name, :klass, :order, :family, :genus, :species, :subspecies, :range, :habitat, :fun_facts
@@all = []
def initialize(name, url)
@name = name
@url = url
@fun_facts = []
self.save
end
def self.all
@@all
end
def save
@@all << self
end
end |
#!/bin/bash
PUSH=$1
DATE="$(date "+%Y%m%d%H%M")"
REPOSITORY_PREFIX="latonaio"
SERVICE_NAME="sap-api-integrations-bank-master-reads-rmq-kube"
DOCKER_BUILDKIT=1 docker build --progress=plain -t ${SERVICE_NAME}:"${DATE}" . --no-cache
# tagging
docker tag ${SERVICE_NAME}:"${DATE}" ${SERVICE_NAME}:latest
docker tag ${SERVICE_NAME}:"${DATE}" ${REPOSITORY_PREFIX}/${SERVICE_NAME}:"${DATE}"
docker tag ${REPOSITORY_PREFIX}/${SERVICE_NAME}:"${DATE}" ${REPOSITORY_PREFIX}/${SERVICE_NAME}:latest
if [[ $PUSH == "push" ]]; then
docker push ${REPOSITORY_PREFIX}/${SERVICE_NAME}:"${DATE}"
docker push ${REPOSITORY_PREFIX}/${SERVICE_NAME}:latest
fi
|
#!/bin/bash
# This script clones the current repo to a temp dir
# and then builds and uploads the documentation.
# The cloned repo is left in the build/ dir as I am allergic to
# unguarded "rm -rf" commands in scripts. The documentation
# then lives in the github repo in the "gh-pages" branch.
#
# The documentation on Github uses the awesome mkdocs + Material facility
# kindly provided by the awesome Martin Donath (squidfunk)
# FMI:
# https://www.mkdocs.org/
# https://squidfunk.github.io/mkdocs-material/
#
# NOTES:
# for live updating, run: mkdocs serve
# aim browser at: http://127.0.0.1:8000/stepikBioinformaticsCourse
#
repotarget="stepikBioinformaticsCourse"
# Posting changes
# 1) commit and push your changes to master!!
# script assumes you run this with the current dir in the "docs" folder
# Prepare the script - use vim to fix line endings:
# 2) set ff=unix
#
# Then cd to the docs dir and run:
# 3) bash -x build_and_upload_docs.sh
# Check the "gh-pages" branch to see the changes.
# 4) Press "Shift-F5" to reload the browser and see your changes (Chrome).
REPO="https://github.com/jimandreas/$repotarget"
DIR=../../build/temp-$$
if [ ! -f build_and_upload_docs.sh ]; then
echo "this script must be run from the docs dir"
exit
fi
cd ..
if [ ! -d build ]; then
mkdir build
fi
if [ -d $DIR ]; then
echo "build dir already exists!! EXITING."
exit
fi
# Clone the current repo into temp folder
git clone $REPO $DIR
# Move working directory into temp folder
cd $DIR
# Build the site and push the new files up to GitHub
mkdocs gh-deploy --force -r https://jimandreas:${TOKEN_MADNESS}@github.com/jimandreas/$repotarget
|
# frozen_string_literal: true
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
require "google/cloud/errors"
require "google/chromeos/moblab/v1beta1/build_service_pb"
module Google
module Chromeos
module Moblab
module V1beta1
module BuildService
##
# Client for the BuildService service.
#
# Manages Chrome OS build services.
#
class Client
include Paths
# @private
attr_reader :build_service_stub
##
# Configure the BuildService Client class.
#
# See {::Google::Chromeos::Moblab::V1beta1::BuildService::Client::Configuration}
# for a description of the configuration fields.
#
# @example
#
# # Modify the configuration for all BuildService clients
# ::Google::Chromeos::Moblab::V1beta1::BuildService::Client.configure do |config|
# config.timeout = 10.0
# end
#
# @yield [config] Configure the Client client.
# @yieldparam config [Client::Configuration]
#
# @return [Client::Configuration]
#
def self.configure
@configure ||= begin
namespace = ["Google", "Chromeos", "Moblab", "V1beta1"]
parent_config = while namespace.any?
parent_name = namespace.join "::"
parent_const = const_get parent_name
break parent_const.configure if parent_const.respond_to? :configure
namespace.pop
end
default_config = Client::Configuration.new parent_config
default_config.rpcs.list_builds.timeout = 60.0
default_config.rpcs.list_builds.retry_policy = {
initial_delay: 1.0, max_delay: 10.0, multiplier: 1.3, retry_codes: [14]
}
default_config.rpcs.check_build_stage_status.timeout = 60.0
default_config.rpcs.check_build_stage_status.retry_policy = {
initial_delay: 1.0, max_delay: 10.0, multiplier: 1.3, retry_codes: [14]
}
default_config.rpcs.stage_build.timeout = 60.0
default_config
end
yield @configure if block_given?
@configure
end
##
# Configure the BuildService Client instance.
#
# The configuration is set to the derived mode, meaning that values can be changed,
# but structural changes (adding new fields, etc.) are not allowed. Structural changes
# should be made on {Client.configure}.
#
# See {::Google::Chromeos::Moblab::V1beta1::BuildService::Client::Configuration}
# for a description of the configuration fields.
#
# @yield [config] Configure the Client client.
# @yieldparam config [Client::Configuration]
#
# @return [Client::Configuration]
#
def configure
yield @config if block_given?
@config
end
##
# Create a new BuildService client object.
#
# @example
#
# # Create a client using the default configuration
# client = ::Google::Chromeos::Moblab::V1beta1::BuildService::Client.new
#
# # Create a client using a custom configuration
# client = ::Google::Chromeos::Moblab::V1beta1::BuildService::Client.new do |config|
# config.timeout = 10.0
# end
#
# @yield [config] Configure the BuildService client.
# @yieldparam config [Client::Configuration]
#
def initialize
# These require statements are intentionally placed here to initialize
# the gRPC module only when it's required.
# See https://github.com/googleapis/toolkit/issues/446
require "gapic/grpc"
require "google/chromeos/moblab/v1beta1/build_service_services_pb"
# Create the configuration object
@config = Configuration.new Client.configure
# Yield the configuration if needed
yield @config if block_given?
# Create credentials
credentials = @config.credentials
# Use self-signed JWT if the endpoint is unchanged from default,
# but only if the default endpoint does not have a region prefix.
enable_self_signed_jwt = @config.endpoint == Client.configure.endpoint &&
!@config.endpoint.split(".").first.include?("-")
credentials ||= Credentials.default scope: @config.scope,
enable_self_signed_jwt: enable_self_signed_jwt
if credentials.is_a?(::String) || credentials.is_a?(::Hash)
credentials = Credentials.new credentials, scope: @config.scope
end
@quota_project_id = @config.quota_project
@quota_project_id ||= credentials.quota_project_id if credentials.respond_to? :quota_project_id
@operations_client = Operations.new do |config|
config.credentials = credentials
config.endpoint = @config.endpoint
end
@build_service_stub = ::Gapic::ServiceStub.new(
::Google::Chromeos::Moblab::V1beta1::BuildService::Stub,
credentials: credentials,
endpoint: @config.endpoint,
channel_args: @config.channel_args,
interceptors: @config.interceptors
)
end
##
# Get the associated client for long-running operations.
#
# @return [::Google::Chromeos::Moblab::V1beta1::BuildService::Operations]
#
attr_reader :operations_client
# Service calls
##
# Lists all build targets that a user has access to.
#
# @overload list_build_targets(request, options = nil)
# Pass arguments to `list_build_targets` via a request object, either of type
# {::Google::Chromeos::Moblab::V1beta1::ListBuildTargetsRequest} or an equivalent Hash.
#
# @param request [::Google::Chromeos::Moblab::V1beta1::ListBuildTargetsRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload list_build_targets(page_size: nil, page_token: nil)
# Pass arguments to `list_build_targets` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param page_size [::Integer]
# Optional. The number of build targets to return in a page.
# @param page_token [::String]
# Optional. A page token, received from a previous `ListBuildTargets` call. Provide
# this to retrieve the subsequent page.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::PagedEnumerable<::Google::Chromeos::Moblab::V1beta1::BuildTarget>]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::PagedEnumerable<::Google::Chromeos::Moblab::V1beta1::BuildTarget>]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def list_build_targets request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Chromeos::Moblab::V1beta1::ListBuildTargetsRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.list_build_targets.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Chromeos::Moblab::V1beta1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
options.apply_defaults timeout: @config.rpcs.list_build_targets.timeout,
metadata: metadata,
retry_policy: @config.rpcs.list_build_targets.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@build_service_stub.call_rpc :list_build_targets, request, options: options do |response, operation|
response = ::Gapic::PagedEnumerable.new @build_service_stub, :list_build_targets, request, response, operation, options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Lists all models for the given build target.
#
# @overload list_models(request, options = nil)
# Pass arguments to `list_models` via a request object, either of type
# {::Google::Chromeos::Moblab::V1beta1::ListModelsRequest} or an equivalent Hash.
#
# @param request [::Google::Chromeos::Moblab::V1beta1::ListModelsRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload list_models(parent: nil, page_size: nil, page_token: nil)
# Pass arguments to `list_models` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param parent [::String]
# Required. The full resource name of build target.
# @param page_size [::Integer]
# Optional. The number of models to return in a page.
# @param page_token [::String]
# Optional. A page token, received from a previous `ListModels` call. Provide
# this to retrieve the subsequent page.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::PagedEnumerable<::Google::Chromeos::Moblab::V1beta1::Model>]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::PagedEnumerable<::Google::Chromeos::Moblab::V1beta1::Model>]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def list_models request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Chromeos::Moblab::V1beta1::ListModelsRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.list_models.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Chromeos::Moblab::V1beta1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"parent" => request.parent
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.list_models.timeout,
metadata: metadata,
retry_policy: @config.rpcs.list_models.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@build_service_stub.call_rpc :list_models, request, options: options do |response, operation|
response = ::Gapic::PagedEnumerable.new @build_service_stub, :list_models, request, response, operation, options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Lists all builds for the given build target and model in descending order
# for the milestones and build versions.
#
# @overload list_builds(request, options = nil)
# Pass arguments to `list_builds` via a request object, either of type
# {::Google::Chromeos::Moblab::V1beta1::ListBuildsRequest} or an equivalent Hash.
#
# @param request [::Google::Chromeos::Moblab::V1beta1::ListBuildsRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload list_builds(parent: nil, page_size: nil, page_token: nil, filter: nil, read_mask: nil, group_by: nil)
# Pass arguments to `list_builds` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param parent [::String]
# Required. The full resource name of the model. The model id is the same as
# the build target id for non-unified builds.
# For example,
# 'buildTargets/octopus/models/bobba'.
# @param page_size [::Integer]
# Optional. The number of builds to return in a page.
# @param page_token [::String]
# Optional. A page token, received from a previous `ListBuilds` call. Provide this to
# retrieve the subsequent page.
# @param filter [::String]
# Optional. Filter that specifies value constraints of fields. For example, the
# filter can be set as "filter='milestone=milestones/80'" to only select
# builds in milestone 80.
# @param read_mask [::Google::Protobuf::FieldMask, ::Hash]
# Optional. Read mask that specifies which Build fields to return. If empty, all Build
# fields will be returned.
# Valid fields: name, milestone, build_version.
# For example, if the read_mask is set as "read_mask='milestone'", the
# ListBuilds will return a list of Builds object with only the milestone
# field.
# @param group_by [::Google::Protobuf::FieldMask, ::Hash]
# Optional. The operation that groups by all the Build fields specified in the read
# mask. The group_by field should be the same as the read_mask field in
# convention of SQL.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::PagedEnumerable<::Google::Chromeos::Moblab::V1beta1::Build>]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::PagedEnumerable<::Google::Chromeos::Moblab::V1beta1::Build>]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def list_builds request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Chromeos::Moblab::V1beta1::ListBuildsRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.list_builds.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Chromeos::Moblab::V1beta1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"parent" => request.parent
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.list_builds.timeout,
metadata: metadata,
retry_policy: @config.rpcs.list_builds.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@build_service_stub.call_rpc :list_builds, request, options: options do |response, operation|
response = ::Gapic::PagedEnumerable.new @build_service_stub, :list_builds, request, response, operation, options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Checks the stage status for a given build artifact in a partner Google
# Cloud Storage bucket.
#
# @overload check_build_stage_status(request, options = nil)
# Pass arguments to `check_build_stage_status` via a request object, either of type
# {::Google::Chromeos::Moblab::V1beta1::CheckBuildStageStatusRequest} or an equivalent Hash.
#
# @param request [::Google::Chromeos::Moblab::V1beta1::CheckBuildStageStatusRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload check_build_stage_status(name: nil, filter: nil)
# Pass arguments to `check_build_stage_status` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. The full resource name of the build artifact.
# For example,
# 'buildTargets/octopus/models/bobba/builds/12607.6.0/artifacts/chromeos-moblab-peng-staging'.
# @param filter [::String]
# Optional. Filter that specifies value constraints of fields. For example, the
# filter can be set as "filter='type=release'" to only check the release
# builds.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Chromeos::Moblab::V1beta1::CheckBuildStageStatusResponse]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Chromeos::Moblab::V1beta1::CheckBuildStageStatusResponse]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def check_build_stage_status request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Chromeos::Moblab::V1beta1::CheckBuildStageStatusRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.check_build_stage_status.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Chromeos::Moblab::V1beta1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"name" => request.name
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.check_build_stage_status.timeout,
metadata: metadata,
retry_policy: @config.rpcs.check_build_stage_status.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@build_service_stub.call_rpc :check_build_stage_status, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Stages a given build artifact from a internal Google Cloud Storage bucket
# to a partner Google Cloud Storage bucket. If any of objects has already
# been copied, it will overwrite the previous objects. Operation <response:
# {::Google::Chromeos::Moblab::V1beta1::StageBuildResponse StageBuildResponse},
# metadata: {::Google::Chromeos::Moblab::V1beta1::StageBuildMetadata StageBuildMetadata}>
#
# @overload stage_build(request, options = nil)
# Pass arguments to `stage_build` via a request object, either of type
# {::Google::Chromeos::Moblab::V1beta1::StageBuildRequest} or an equivalent Hash.
#
# @param request [::Google::Chromeos::Moblab::V1beta1::StageBuildRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload stage_build(name: nil, filter: nil)
# Pass arguments to `stage_build` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. The full resource name of the build artifact.
# For example,
# 'buildTargets/octopus/models/bobba/builds/12607.6.0/artifacts/chromeos-moblab-peng-staging'.
# @param filter [::String]
# Optional. Filter that specifies value constraints of fields. For example, the
# filter can be set as "filter='type=release'" to only check the release
# builds.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::Operation]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::Operation]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def stage_build request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Chromeos::Moblab::V1beta1::StageBuildRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.stage_build.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Chromeos::Moblab::V1beta1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"name" => request.name
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.stage_build.timeout,
metadata: metadata,
retry_policy: @config.rpcs.stage_build.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@build_service_stub.call_rpc :stage_build, request, options: options do |response, operation|
response = ::Gapic::Operation.new response, @operations_client, options: options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Finds the most stable build for the given build target. The definition of
# the most stable build is determined by evaluating the following rules in
# order until one is true. If none are true, then there is no stable build
# and it will return an empty response.
#
# Evaluation rules:
# 1. Stable channel build with label “Live”
# 2. Beta channel build with label “Live”
# 3. Dev channel build with label “Live”
# 4. Most recent stable channel build with build status Pass
# 5. Most recent beta channel build with build status Pass
# 6. Most recent dev channel build with build status Pass
#
# @overload find_most_stable_build(request, options = nil)
# Pass arguments to `find_most_stable_build` via a request object, either of type
# {::Google::Chromeos::Moblab::V1beta1::FindMostStableBuildRequest} or an equivalent Hash.
#
# @param request [::Google::Chromeos::Moblab::V1beta1::FindMostStableBuildRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload find_most_stable_build(build_target: nil)
# Pass arguments to `find_most_stable_build` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param build_target [::String]
# Required. The full resource name of the build target.
# For example,
# 'buildTargets/octopus'.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Chromeos::Moblab::V1beta1::FindMostStableBuildResponse]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Chromeos::Moblab::V1beta1::FindMostStableBuildResponse]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def find_most_stable_build request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Chromeos::Moblab::V1beta1::FindMostStableBuildRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.find_most_stable_build.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Chromeos::Moblab::V1beta1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"build_target" => request.build_target
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.find_most_stable_build.timeout,
metadata: metadata,
retry_policy: @config.rpcs.find_most_stable_build.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@build_service_stub.call_rpc :find_most_stable_build, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Configuration class for the BuildService API.
#
# This class represents the configuration for BuildService,
# providing control over timeouts, retry behavior, logging, transport
# parameters, and other low-level controls. Certain parameters can also be
# applied individually to specific RPCs. See
# {::Google::Chromeos::Moblab::V1beta1::BuildService::Client::Configuration::Rpcs}
# for a list of RPCs that can be configured independently.
#
# Configuration can be applied globally to all clients, or to a single client
# on construction.
#
# @example
#
# # Modify the global config, setting the timeout for
# # list_build_targets to 20 seconds,
# # and all remaining timeouts to 10 seconds.
# ::Google::Chromeos::Moblab::V1beta1::BuildService::Client.configure do |config|
# config.timeout = 10.0
# config.rpcs.list_build_targets.timeout = 20.0
# end
#
# # Apply the above configuration only to a new client.
# client = ::Google::Chromeos::Moblab::V1beta1::BuildService::Client.new do |config|
# config.timeout = 10.0
# config.rpcs.list_build_targets.timeout = 20.0
# end
#
# @!attribute [rw] endpoint
# The hostname or hostname:port of the service endpoint.
# Defaults to `"chromeosmoblab.googleapis.com"`.
# @return [::String]
# @!attribute [rw] credentials
# Credentials to send with calls. You may provide any of the following types:
# * (`String`) The path to a service account key file in JSON format
# * (`Hash`) A service account key as a Hash
# * (`Google::Auth::Credentials`) A googleauth credentials object
# (see the [googleauth docs](https://googleapis.dev/ruby/googleauth/latest/index.html))
# * (`Signet::OAuth2::Client`) A signet oauth2 client object
# (see the [signet docs](https://googleapis.dev/ruby/signet/latest/Signet/OAuth2/Client.html))
# * (`GRPC::Core::Channel`) a gRPC channel with included credentials
# * (`GRPC::Core::ChannelCredentials`) a gRPC credentails object
# * (`nil`) indicating no credentials
# @return [::Object]
# @!attribute [rw] scope
# The OAuth scopes
# @return [::Array<::String>]
# @!attribute [rw] lib_name
# The library name as recorded in instrumentation and logging
# @return [::String]
# @!attribute [rw] lib_version
# The library version as recorded in instrumentation and logging
# @return [::String]
# @!attribute [rw] channel_args
# Extra parameters passed to the gRPC channel. Note: this is ignored if a
# `GRPC::Core::Channel` object is provided as the credential.
# @return [::Hash]
# @!attribute [rw] interceptors
# An array of interceptors that are run before calls are executed.
# @return [::Array<::GRPC::ClientInterceptor>]
# @!attribute [rw] timeout
# The call timeout in seconds.
# @return [::Numeric]
# @!attribute [rw] metadata
# Additional gRPC headers to be sent with the call.
# @return [::Hash{::Symbol=>::String}]
# @!attribute [rw] retry_policy
# The retry policy. The value is a hash with the following keys:
# * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
# * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
# * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
# * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
# trigger a retry.
# @return [::Hash]
# @!attribute [rw] quota_project
# A separate project against which to charge quota.
# @return [::String]
#
class Configuration
extend ::Gapic::Config
config_attr :endpoint, "chromeosmoblab.googleapis.com", ::String
config_attr :credentials, nil do |value|
allowed = [::String, ::Hash, ::Proc, ::Symbol, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
allowed += [::GRPC::Core::Channel, ::GRPC::Core::ChannelCredentials] if defined? ::GRPC
allowed.any? { |klass| klass === value }
end
config_attr :scope, nil, ::String, ::Array, nil
config_attr :lib_name, nil, ::String, nil
config_attr :lib_version, nil, ::String, nil
config_attr(:channel_args, { "grpc.service_config_disable_resolution" => 1 }, ::Hash, nil)
config_attr :interceptors, nil, ::Array, nil
config_attr :timeout, nil, ::Numeric, nil
config_attr :metadata, nil, ::Hash, nil
config_attr :retry_policy, nil, ::Hash, ::Proc, nil
config_attr :quota_project, nil, ::String, nil
# @private
def initialize parent_config = nil
@parent_config = parent_config unless parent_config.nil?
yield self if block_given?
end
##
# Configurations for individual RPCs
# @return [Rpcs]
#
def rpcs
@rpcs ||= begin
parent_rpcs = nil
parent_rpcs = @parent_config.rpcs if defined?(@parent_config) && @parent_config.respond_to?(:rpcs)
Rpcs.new parent_rpcs
end
end
##
# Configuration RPC class for the BuildService API.
#
# Includes fields providing the configuration for each RPC in this service.
# Each configuration object is of type `Gapic::Config::Method` and includes
# the following configuration fields:
#
# * `timeout` (*type:* `Numeric`) - The call timeout in seconds
# * `metadata` (*type:* `Hash{Symbol=>String}`) - Additional gRPC headers
# * `retry_policy (*type:* `Hash`) - The retry policy. The policy fields
# include the following keys:
# * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
# * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
# * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
# * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
# trigger a retry.
#
class Rpcs
##
# RPC-specific configuration for `list_build_targets`
# @return [::Gapic::Config::Method]
#
attr_reader :list_build_targets
##
# RPC-specific configuration for `list_models`
# @return [::Gapic::Config::Method]
#
attr_reader :list_models
##
# RPC-specific configuration for `list_builds`
# @return [::Gapic::Config::Method]
#
attr_reader :list_builds
##
# RPC-specific configuration for `check_build_stage_status`
# @return [::Gapic::Config::Method]
#
attr_reader :check_build_stage_status
##
# RPC-specific configuration for `stage_build`
# @return [::Gapic::Config::Method]
#
attr_reader :stage_build
##
# RPC-specific configuration for `find_most_stable_build`
# @return [::Gapic::Config::Method]
#
attr_reader :find_most_stable_build
# @private
def initialize parent_rpcs = nil
list_build_targets_config = parent_rpcs.list_build_targets if parent_rpcs.respond_to? :list_build_targets
@list_build_targets = ::Gapic::Config::Method.new list_build_targets_config
list_models_config = parent_rpcs.list_models if parent_rpcs.respond_to? :list_models
@list_models = ::Gapic::Config::Method.new list_models_config
list_builds_config = parent_rpcs.list_builds if parent_rpcs.respond_to? :list_builds
@list_builds = ::Gapic::Config::Method.new list_builds_config
check_build_stage_status_config = parent_rpcs.check_build_stage_status if parent_rpcs.respond_to? :check_build_stage_status
@check_build_stage_status = ::Gapic::Config::Method.new check_build_stage_status_config
stage_build_config = parent_rpcs.stage_build if parent_rpcs.respond_to? :stage_build
@stage_build = ::Gapic::Config::Method.new stage_build_config
find_most_stable_build_config = parent_rpcs.find_most_stable_build if parent_rpcs.respond_to? :find_most_stable_build
@find_most_stable_build = ::Gapic::Config::Method.new find_most_stable_build_config
yield self if block_given?
end
end
end
end
end
end
end
end
end
|
import React, { useState } from 'react';
function App() {
const [items, setItems] = useState([]);
const addItem = () => {
const newItems = [
...items
];
newItems.push({
id: Math.random().toString(36).substr(2, 9) ,
text: ''
});
setItems(newItems);
}
const deleteItem = (id) => {
const newItems = items.filter((item) => item.id !== id);
setItems(newItems);
}
return (
<div className="App">
<h1>Checklist</h1>
<ul>
{items.map((item) => (
<li key={item.id}>
<input
placeholder="New task"
value={item.text}
onChange={(e) => console.log(e.target.value)}
/>
<button onClick={() => deleteItem(item.id)}>Delete</button>
</li>
))}
</ul>
<button onClick={addItem}>New Item</button>
</div>
);
}
export default App; |
<reponame>tssga-arch/myotc
#!/usr/bin/env python3
#
# Handling Nuking of resources
#
# ~ import openstack
###################################################################
#
# Nuking
#
###################################################################
def cname_match(prefix, rs):
for r in rs:
if r.startswith(prefix):
return True
return False
def nuke(c, sid, doIt = False, def_priv_zone='localnet', def_public_zone='public.zone'):
dryRun = not doIt
prefix = sid + '-'
# Remove automatic DNS entries
zone_name = '{}.{}'.format(sid,def_priv_zone)
zn = c.dns.find_zone(zone_name,zone_type='private')
if zn:
if dryRun:
print('WONT remove DNS internal zone {}'.format(zone_name))
else:
c.dns.delete_zone(zn)
print('Removed DNS internal zone {}'.format(zone_name))
zone_name = def_public_zone
zn = c.dns.find_zone(zone_name)
if not zn is None:
for rs in c.dns.recordsets(zn):
if 'name' in rs:
if rs['name'].startswith(sid):
if dryRun:
print('WONT remove DNS record for {}'.format(rs['name']))
continue
else:
c.dns.delete_recordset(rs,zn)
print('Removed DNS record for {}'.format(rs['name']))
continue
if 'type' in rs:
if rs['type'] == 'CNAME':
if cname_match(prefix, rs['records']):
if dryRun:
print('WONT remove DNS record for {}'.format(rs['name']))
else:
c.dns.delete_recordset(rs,zn)
print('Removed DNS record for {}'.format(rs['name']))
# Go first through servers to identify all port-ids
port_ids = {}
for s in c.compute.servers():
if 'name' in s:
if s['name'].startswith(sid):
i = 0
for interface in c.compute.server_interfaces(s):
port_ids[interface['port_id']] = '{server}-if{port}'.format(server = s['name'], port = i)
++i
for ip in c.network.ips():
if ip.port_id in port_ids:
if dryRun:
print('WONT release IP {ip} from server port {port}'.format(ip=ip.floating_ip_address, port = port_ids[ip.port_id]))
else:
c.network.delete_ip(ip)
print('Released IP {ip} from server port {port}'.format(ip=ip.floating_ip_address, port = port_ids[ip.port_id]))
# delete all servers
for s in c.compute.servers():
if 'name' in s:
if s['name'].startswith(prefix):
if dryRun:
print('WONT delete vm {}'.format(s['name']))
else:
c.compute.delete_server(s['id'])
c.compute.wait_for_delete(s)
print('Deleted vm {}'.format(s['name']))
# delete all left-over volumes...
for v in c.block_store.volumes():
if 'name' in v:
if v['name'].startswith(prefix):
if dryRun:
print('WONT delete volume {}'.format(v['name']))
else:
c.block_store.delete_volume(v)
print('Delete volume {}'.format(v['name']))
# Find all the relevant routers
routers = []
for r in c.network.routers():
if 'name' in r:
if r['name'].startswith(prefix):
routers.append(r)
# Delete all targeted subnets
for sn in c.network.subnets():
if 'name' in sn:
if sn['name'].startswith(prefix):
if dryRun:
print('WONT delete subnet {}'.format(sn['name']))
else:
for r in routers:
# ~ try:
c.network.remove_interface_from_router(r.id, sn.id)
print('Disconnected vpc {vpc} from subnet {sn}'.format(vpc = r.name, sn = sn.name))
c.network.delete_subnet(sn['id'])
print('Deleted subnet {}'.format(sn['name']))
# Delete all targeted networks
for n in c.network.networks():
if 'name' in n:
if n['name'].startswith(prefix):
if dryRun:
print('WONT delete net {}'.format(n['name']))
else:
c.network.delete_network(n['id'])
print('Deleted net {}'.format(n['name']))
# Delete internal dnz zone
zname = '{}.'.format(sid,def_priv_zone)
idnsz = c.dns.find_zone(zname,zone_type='private')
if idnsz:
if dryRun:
print('WONT delete internal DNS zone {}'.format(zname))
else:
c.dns.delete_zone(idnsz)
print('Deleted internal DNS zone {}'.format(zname))
# Delete VPCs (aka routers)
for r in c.network.routers():
if 'name' in r:
if r['name'].startswith(prefix):
if dryRun:
print('WONT delete vpc {}'.format(r['name']))
else:
c.network.delete_router(r['id'])
print('Deleted vpc {}'.format(r['name']))
# Delete network security groups
for g in c.network.security_groups():
if 'name' in g:
if g['name'].startswith(prefix):
if dryRun:
print('WONT delete sg {}'.format(g['name']))
else:
c.network.delete_security_group(g['id'])
print('Deleted sg {}'.format(g['name']))
print('DONE')
|
#include <stdio.h>
#include <openssl/rand.h>
int main() {
int numRandomNumbers;
printf("Enter the number of random numbers to generate: ");
scanf("%d", &numRandomNumbers);
unsigned char randomBuffer[numRandomNumbers * sizeof(int)];
if (RAND_bytes(randomBuffer, sizeof(randomBuffer)) != 1) {
fprintf(stderr, "Error generating random numbers\n");
return 1;
}
printf("Random numbers generated: ");
for (int i = 0; i < numRandomNumbers; i++) {
int randomNumber = *((int *)&randomBuffer[i * sizeof(int)]);
printf("%d ", randomNumber);
}
printf("\n");
return 0;
} |
<gh_stars>0
/*
* Copyright 2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webauthn4j.data;
import com.webauthn4j.data.attestation.authenticator.AuthenticatorData;
import com.webauthn4j.data.client.CollectedClientData;
import com.webauthn4j.data.extension.authenticator.AuthenticationExtensionAuthenticatorOutput;
import com.webauthn4j.data.extension.client.AuthenticationExtensionClientOutput;
import com.webauthn4j.data.extension.client.AuthenticationExtensionsClientOutputs;
import org.junit.jupiter.api.Test;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
@SuppressWarnings("ConstantConditions")
class AuthenticationDataTest {
@Test
void getter_test() {
byte[] credentialId = new byte[32];
byte[] userHandle = new byte[32];
AuthenticatorData<AuthenticationExtensionAuthenticatorOutput> authenticatorData = null;
byte[] authenticatorDataBytes = new byte[64];
CollectedClientData collectedClientData = mock(CollectedClientData.class);
byte[] collectedClientDataBytes = new byte[128];
AuthenticationExtensionsClientOutputs<AuthenticationExtensionClientOutput> clientExtensions = null;
byte[] signature = new byte[32];
AuthenticationData instance = new AuthenticationData(
credentialId,
userHandle,
authenticatorData,
authenticatorDataBytes,
collectedClientData,
collectedClientDataBytes,
clientExtensions,
signature
);
assertThat(instance.getCredentialId()).isEqualTo(credentialId);
assertThat(instance.getUserHandle()).isEqualTo(userHandle);
assertThat(instance.getAuthenticatorData()).isEqualTo(authenticatorData);
assertThat(instance.getAuthenticatorDataBytes()).isEqualTo(authenticatorDataBytes);
assertThat(instance.getCollectedClientData()).isEqualTo(collectedClientData);
assertThat(instance.getCollectedClientDataBytes()).isEqualTo(collectedClientDataBytes);
assertThat(instance.getClientExtensions()).isEqualTo(clientExtensions);
assertThat(instance.getSignature()).isEqualTo(signature);
}
@Test
void equals_hashCode_test() {
byte[] credentialId = new byte[32];
byte[] userHandle = new byte[32];
AuthenticatorData<AuthenticationExtensionAuthenticatorOutput> authenticatorData = null;
byte[] authenticatorDataBytes = new byte[64];
CollectedClientData collectedClientData = mock(CollectedClientData.class);
byte[] collectedClientDataBytes = new byte[128];
AuthenticationExtensionsClientOutputs<AuthenticationExtensionClientOutput> authenticationExtensionsClientOutputs = null;
byte[] signature = new byte[32];
AuthenticationData instanceA = new AuthenticationData(
credentialId,
userHandle,
authenticatorData,
authenticatorDataBytes,
collectedClientData,
collectedClientDataBytes,
authenticationExtensionsClientOutputs,
signature
);
AuthenticationData instanceB = new AuthenticationData(
credentialId,
userHandle,
authenticatorData,
authenticatorDataBytes,
collectedClientData,
collectedClientDataBytes,
authenticationExtensionsClientOutputs,
signature
);
assertThat(instanceA)
.isEqualTo(instanceB)
.hasSameHashCodeAs(instanceB);
}
} |
package file
import (
"errors"
"fmt"
"os"
"os/exec"
"strconv"
"strings"
)
var (
IsDirectoryError = errors.New("IsDirectoryError: must open a file,not a dir")
ParamDimensionsError = errors.New("param `dimensions` is invalid")
)
const (
GrepContain = iota
GrepNotContain
)
// Open 打开一个文件,如果打开时报错或者读取的是一个目录而不是文件,均会返回error
func Open(path string) (*os.File, error) {
// 读取文件信息
fi, err := os.Stat(path)
if err != nil {
return nil, err
}
// 读取的文件不能为目录
if fi.IsDir() {
return nil, IsDirectoryError
}
f, err := os.Open(path)
if err != nil {
return nil, err
}
return f, nil
}
// FindNotExistKeys 在文件中查找不存在于维度列表中的维度
func FindNotExistKeys(path string, dataSource string, keys []string) (result []string, err error) {
if keys == nil || len(keys) == 0 {
// 可以使用斜杠对引号进行转义
// return nil, errors.New("param \"dimensions\" is invalid")
// 反引号表示原生字符串,不可转义,可包含多行
// return nil, errors.New(`param "dimensions" is invalid`)
return nil, ParamDimensionsError
}
// 根据数据源加工出对应的表名关键字
source := "gongshang"
if dataSource != "mini" {
source += "_" + dataSource
}
// 通过grep命令查找文件中是否包含关键字, 如果原始命中中包含空格,则必须分成多个参数传入
// 如果有多条命令,则必须每个命令使用一个exec
// bash -c `command string....` command string最好是外层用单引号,内层字符串用引号,
// 因为如果内层用单引号,Linux命令可能会把字符串识别成命令
for _, key := range keys {
cmd := exec.Command(
"bash", "-c",
fmt.Sprintf(`grep -q "dwd_ei_basic_%s_%s_ds/p_date" %s && echo %d || echo %d`,
source, strings.ToLower(key), path, GrepContain, GrepNotContain),
)
outputByte, err := cmd.Output()
if err != nil {
return nil, err
}
output := strings.TrimSpace(string(outputByte))
if output == strconv.Itoa(GrepNotContain) {
result = append(result, key)
}
}
return result, nil
}
|
export const FORM_UPDATE = '@BILLING_ADDRESS/FORM_UPDATE'
export const FORM_RESET = '@BILLING_ADDRESS/FORM_RESET'
export const FORM_SUBMITTING = '@BILLING_ADDRESS/FORM_SUBMITTING'
export const FORM_ERROR = '@BILLING_ADDRESS/FORM_ERROR'
export const FORM_SUBMITTING_FAILED = '@BILLING_ADDRESS/FORM_SUBMITTING_FAILED'
export const FORM_SUBMITTED = '@BILLING_ADDRESS/FORM_SUBMITTED'
export const updateForm = (name, value, reducerName) => ({
type: FORM_UPDATE,
name,
value,
reducerName
})
export const resetForm = (reducerName) => ({
type: FORM_RESET,
reducerName
})
export const submitForm = (reducerName) => ({
type: FORM_SUBMITTING,
reducerName
})
export const submittedForm = (reducerName) => ({
type: FORM_SUBMITTED,
reducerName
})
export const failedToSubmitForm = (error, reducerName) => ({
type: FORM_SUBMITTING_FAILED,
error,
reducerName
})
export const updateFormError = (errors, name, reducerName) => ({
type: FORM_ERROR,
errors,
name,
reducerName
})
export const submitFormData = (data, reducerName) => {
return (dispatch, getState) => {
return dispatch(submitForm())
}
}
|
<reponame>chqu1012/QueryDsl
/**
* generated by Xtext 2.19.0
*/
package de.dc.sql.lang.sqlQueryDsl;
import org.eclipse.emf.common.util.EList;
import org.eclipse.emf.ecore.EObject;
import org.eclipse.xtext.common.types.JvmFormalParameter;
/**
* <!-- begin-user-doc -->
* A representation of the model object '<em><b>Query</b></em>'.
* <!-- end-user-doc -->
*
* <p>
* The following features are supported:
* </p>
* <ul>
* <li>{@link de.dc.sql.lang.sqlQueryDsl.Query#getName <em>Name</em>}</li>
* <li>{@link de.dc.sql.lang.sqlQueryDsl.Query#getParameters <em>Parameters</em>}</li>
* <li>{@link de.dc.sql.lang.sqlQueryDsl.Query#getStatement <em>Statement</em>}</li>
* <li>{@link de.dc.sql.lang.sqlQueryDsl.Query#isStatementFromFile <em>Statement From File</em>}</li>
* <li>{@link de.dc.sql.lang.sqlQueryDsl.Query#getFilePath <em>File Path</em>}</li>
* </ul>
*
* @see de.dc.sql.lang.sqlQueryDsl.SqlQueryDslPackage#getQuery()
* @model
* @generated
*/
public interface Query extends EObject
{
/**
* Returns the value of the '<em><b>Name</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the value of the '<em>Name</em>' attribute.
* @see #setName(String)
* @see de.dc.sql.lang.sqlQueryDsl.SqlQueryDslPackage#getQuery_Name()
* @model
* @generated
*/
String getName();
/**
* Sets the value of the '{@link de.dc.sql.lang.sqlQueryDsl.Query#getName <em>Name</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param value the new value of the '<em>Name</em>' attribute.
* @see #getName()
* @generated
*/
void setName(String value);
/**
* Returns the value of the '<em><b>Parameters</b></em>' containment reference list.
* The list contents are of type {@link org.eclipse.xtext.common.types.JvmFormalParameter}.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the value of the '<em>Parameters</em>' containment reference list.
* @see de.dc.sql.lang.sqlQueryDsl.SqlQueryDslPackage#getQuery_Parameters()
* @model containment="true"
* @generated
*/
EList<JvmFormalParameter> getParameters();
/**
* Returns the value of the '<em><b>Statement</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the value of the '<em>Statement</em>' attribute.
* @see #setStatement(String)
* @see de.dc.sql.lang.sqlQueryDsl.SqlQueryDslPackage#getQuery_Statement()
* @model
* @generated
*/
String getStatement();
/**
* Sets the value of the '{@link de.dc.sql.lang.sqlQueryDsl.Query#getStatement <em>Statement</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param value the new value of the '<em>Statement</em>' attribute.
* @see #getStatement()
* @generated
*/
void setStatement(String value);
/**
* Returns the value of the '<em><b>Statement From File</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the value of the '<em>Statement From File</em>' attribute.
* @see #setStatementFromFile(boolean)
* @see de.dc.sql.lang.sqlQueryDsl.SqlQueryDslPackage#getQuery_StatementFromFile()
* @model
* @generated
*/
boolean isStatementFromFile();
/**
* Sets the value of the '{@link de.dc.sql.lang.sqlQueryDsl.Query#isStatementFromFile <em>Statement From File</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param value the new value of the '<em>Statement From File</em>' attribute.
* @see #isStatementFromFile()
* @generated
*/
void setStatementFromFile(boolean value);
/**
* Returns the value of the '<em><b>File Path</b></em>' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @return the value of the '<em>File Path</em>' attribute.
* @see #setFilePath(String)
* @see de.dc.sql.lang.sqlQueryDsl.SqlQueryDslPackage#getQuery_FilePath()
* @model
* @generated
*/
String getFilePath();
/**
* Sets the value of the '{@link de.dc.sql.lang.sqlQueryDsl.Query#getFilePath <em>File Path</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param value the new value of the '<em>File Path</em>' attribute.
* @see #getFilePath()
* @generated
*/
void setFilePath(String value);
} // Query
|
#!/usr/bin/env bash
cd $(dirname $0) && cd ../
APP_NAME="auth-server"
APP_PROD_JAR="deploy/build/$APP_NAME-*.jar"
APP_ARGS=
while (( "$#" )); do
case "$1" in
--ssl)
APP_ARGS="$APP_ARGS --spring.config.additional-location=classpath:server-ssl.properties"
shift
;;
*)
shift
;;
esac
done
if ls $APP_PROD_JAR 1> /dev/null 2>&1; then
java -jar $APP_PROD_JAR $APP_ARGS
else
mvn clean && mvn package -DskipTests && mvn clean && \
java -jar $APP_PROD_JAR $APP_ARGS
fi
|
package filters;
import db.DBManager;
import db.User;
import db.User.Role;
import java.io.IOException;
import java.sql.SQLException;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import utilities.Constants;
public class PdfAccessControl implements Filter {
private FilterConfig filterConfig = null;
private DBManager manager;
public PdfAccessControl() {
}
/**
*
* @param request The servlet request we are processing
* @param response The servlet response we are creating
* @param chain The filter chain we are processing
*
* @exception IOException if an input/output error occurs
* @exception ServletException if a servlet error occurs
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response,FilterChain chain)
throws IOException, ServletException {
try {
if (request instanceof HttpServletRequest) {
HttpServletRequest httpRequest = (HttpServletRequest)request;
User user = (User)httpRequest.getSession().getAttribute(Constants.USER_ATTRIBUTE_NAME);
if (user!=null && user.getRole() == Role.BUYER) {
List<String> urls = manager.getPdfUrlsByBuyer(user.getId());
String requestedPdf = httpRequest.getRequestURI();
String [] list = requestedPdf.split("/");
requestedPdf = list[list.length-1];
if (urls.contains(requestedPdf)) {
chain.doFilter(request, response);
return;
}
}
HttpServletResponse httpResponse = (HttpServletResponse)response;
httpResponse.sendRedirect("../"+Constants.SM_LOGIN);
}
} catch (SQLException ex) {
Logger.getLogger(PdfAccessControl.class.getName()).log(Level.SEVERE, null, ex);
}
}
/**
* Return the filter configuration object for this filter.
*/
public FilterConfig getFilterConfig() {
return (this.filterConfig);
}
/**
* Set the filter configuration object for this filter.
*
* @param filterConfig The filter configuration object
*/
public void setFilterConfig(FilterConfig filterConfig) {
this.filterConfig = filterConfig;
}
/**
* Destroy method for this filter
*/
@Override
public void destroy() {
}
/**
* Init method for this filter
*/
@Override
public void init(FilterConfig filterConfig) {
this.filterConfig = filterConfig;
this.manager = (DBManager)filterConfig.getServletContext().getAttribute(Constants.DB_ATTRIBUTE_NAME);
}
}
|
/*
Get a byte or bytes for one character based on UTF-8 out of the key board.
Remarks:
Call fn. rl() later.
Call fn. _getch twice to read <Ctrl-@>.
The first call returns (0x00) and the second call returns (0x03)..
Return the number of bytes for one character.
Return (~0x00) if a meta key starting from (0x00) is detected.
Refer at fn. cli_i.
*/
# define CAR
# include <conio.h>
# include <stdio.h>
# include "../../../incl/config.h"
signed(__cdecl cli_i_b(signed char(**argp))) {
auto signed char *b;
auto signed r;
auto signed short flag;
auto signed SEQ_FLAG = (0x80);
if(!argp) return(0x00);
if(*argp) return(0x00);
r = _getch();
if(!(cat_bb(argp,r))) {
printf("%s \n","<< Error at fn. cat_bb()");
return(0x00);
}
if(!(**argp)) return(cli_i_except(argp));
// for a meta key starting from (0x00).
AND(flag,0x00);
r = nbytechar(r);
if(!r) OR(flag,0x01);
if(EQ(SEQ_FLAG,r)) OR(flag,0x01);
if(flag) {
embed(0x00,*argp);
rl(*argp);
*argp = (0x00);
return(0x00);
}
return(0x01+(cli_i_b_r(--r,argp)));
}
|
#!/bin/sh
unzip -o PCMark10-*.zip
/cygdrive/c/Windows/system32/cmd.exe /c pcmark10-setup.exe /install
echo "This test profile requires a PCMark 10 Professional license for command-line automation. Before running this test make sure you activate your installation." > ~/install-message
echo "#!/bin/bash
cd \"C:\Program Files\UL\PCMark 10\"
/cygdrive/c/Windows/system32/cmd.exe /c PCMark10Cmd.exe \$@ --out=out.pcm10-result --export-xml=\$LOG_FILE" > pcmark10
chmod +x pcmark10
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.ic_format_bold = void 0;
var ic_format_bold = {
"viewBox": "0 0 24 24",
"children": [{
"name": "path",
"attribs": {
"d": "M0 0h24v24H0z",
"fill": "none"
},
"children": []
}, {
"name": "path",
"attribs": {
"d": "M15.6 10.79c.97-.67 1.65-1.77 1.65-2.79 0-2.26-1.75-4-4-4H7v14h7.04c2.09 0 3.71-1.7 3.71-3.79 0-1.52-.86-2.82-2.15-3.42zM10 6.5h3c.83 0 1.5.67 1.5 1.5s-.67 1.5-1.5 1.5h-3v-3zm3.5 9H10v-3h3.5c.83 0 1.5.67 1.5 1.5s-.67 1.5-1.5 1.5z"
},
"children": []
}]
};
exports.ic_format_bold = ic_format_bold; |
import sys
import os
import subprocess
def apply_formatting_style(style, files):
style_options = {
"Google": "-style=Google",
"LLVM": "-style=LLVM",
"Chromium": "-style=Chromium",
"Mozilla": "-style=Mozilla"
}
if style not in style_options:
print("Invalid style specified. Available styles: Google, LLVM, Chromium, Mozilla")
return
for file in files:
if not os.path.isfile(file):
print(f"File '{file}' not found.")
continue
command = ["clang-format-7", "-i", style_options[style], file]
try:
subprocess.run(command, check=True)
print(f"File '{file}' formatted with {style} style.")
except subprocess.CalledProcessError as e:
print(f"Error formatting file '{file}': {e}")
if len(sys.argv) < 3:
print("Usage: python format_files.py -style=<style> <file1> <file2> ... <fileN>")
else:
style_arg = sys.argv[1]
files_arg = sys.argv[2:]
style = style_arg.split("=")[1]
files = [file for file in files_arg if not file.startswith("-")]
apply_formatting_style(style, files) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.