text
stringlengths 27
775k
|
|---|
#!/bin/bash
set -e
if [[ "$EUID" == 0 ]]
then echo "Please run as normal user (w/o sudo)"
exit
fi
echo
echo "MS' Surfece can have issue with this"
echo "Do you want to install? (y/n)"
echo
read -n 1 ans
echo
if [[ $ans == "y" ]]
then
sudo add-apt-repository ppa:linrunner/tlp
sudo apt update
sudo apt install -y tlp tlp-rdw
fi
echo
echo
echo "Do this manually if this is a THINKPAD:"
echo "- sudo apt install -y tp-smapi-dkms acpi-call-dkms"
echo
|
require 'json'
require 'yaml'
require_relative './../utils/kubectl'
module Kerbi
class StateManager
def patch
self.create_configmap_if_missing
patch_values = self.compile_patch
config_map = self.get_configmap
crt_values = self.get_configmap_values(config_map)
merged_vars = crt_values.deep_merge(patch_values)
new_body = { **config_map, data: { variables: JSON.dump(merged_vars) } }
yaml_body = YAML.dump(new_body.deep_stringify_keys)
Utils::Kubectl.apply_tmpfile(yaml_body, args_manager.get_kmd_arg_str)
end
def compile_patch
values = {}
args_manager.get_fnames.each do |fname|
new_values = YAML.load_file(fname).deep_symbolize_keys
values.deep_merge!(new_values)
end
args_manager.get_inlines.each do |assignment_str|
assignment = Utils::Utils.str_assign_to_h(assignment_str)
values.deep_merge!(assignment)
end
values
end
def get_crt_vars
create_configmap_if_missing
get_configmap_values(get_configmap)
end
def get_configmap_values(configmap)
json_enc_vars = configmap.dig(:data, :variables) || '{}'
JSON.parse(json_enc_vars).deep_symbolize_keys
end
def get_configmap(raise_on_er: true)
kmd = "get cm state #{args_manager.get_kmd_arg_str}"
if (configmap = Utils::Kubectl.jkmd(kmd, print_err: true))
configmap
else
raise if raise_on_er
end
end
def create_configmap_if_missing
unless get_configmap(raise_on_er: false)
kmd = "create cm state #{args_manager.get_kmd_arg_str}"
Utils::Kubectl.kmd(kmd)
end
end
end
end
|
import type { IFile, IUploadRequest, IUploadTask } from "./types";
import { EUploadStatus } from './enums';
import * as api from './api';
import { updateTask, pushWorker, removeWorker, terminateWorkers, pushFile } from "./store";
import { inferFileType } from "./util";
export async function upload(task: IUploadTask) {
const worker = new Worker("/js/md5_worker.js");
pushWorker(worker);
worker.postMessage(task.file);
worker.onmessage = async (e: MessageEvent<string>) => {
const hash = e.data;
worker.terminate();
removeWorker(worker);
task.hash = hash;
const payload = buildUploadRequest(task);
try {
await preUpload(task, payload);
await uploadFile(task, hash);
} catch (e) {
console.error(e);
}
}
}
function buildUploadRequest(task: IUploadTask) {
const payload: IUploadRequest = {
filename: task.file.name,
size: task.file.size,
target: encodeURIComponent(task.targetDir.join("/")),
hash: task.hash,
};
return payload;
}
async function preUpload(task: IUploadTask, payload: IUploadRequest) {
try {
const uuid: string = await api.post("/api/pre-upload", payload, false);
task.uuid = uuid;
updateTask(task.file, EUploadStatus.uploading, 0);
} catch (e) {
updateTask(task.file, EUploadStatus.failed, task.progress);
throw e;
}
}
async function uploadFile(task: IUploadTask, hash: string) {
if (!task.uuid || !task.hash) return;
const file = task.file;
let start = 0;
const worker = new Worker("/js/upload_worker.js");
pushWorker(worker);
worker.postMessage({ task, start });
worker.onmessage = async (e) => {
start = e.data;
const progress = start / file.size;
if (progress >= 1) {
updateTask(task.file, EUploadStatus.finishing, task.progress);
worker.terminate();
removeWorker(worker);
try {
await finishUpload(task);
} catch (e) {
throw e;
}
} else {
updateTask(task.file, EUploadStatus.uploading, progress);
worker.postMessage({ task, start });
}
}
worker.onerror = (e) => {
updateTask(task.file, EUploadStatus.failed, task.progress);
worker.terminate();
removeWorker(worker);
throw e;
}
}
async function finishUpload(task: IUploadTask) {
try {
await api.post(`/api/finish-upload/${task.uuid}`, null, false);
updateTask(task.file, EUploadStatus.success, task.progress);
const newFile: IFile = {
dir: task.targetDir.join("/") || "/",
file_type: inferFileType(task.file.name),
size: task.file.size,
filename: task.file.name,
least_permission: 0
};
pushFile(newFile);
} catch (e) {
updateTask(task.file, EUploadStatus.failed, task.progress);
throw e;
}
}
export async function cancelUploads(tasks: Array<IUploadTask>) {
const tasksToRemove = tasks.filter(t => !!t.uuid);
if (tasksToRemove.filter(t => t.status === EUploadStatus.preparing ||
t.status === EUploadStatus.uploading ||
t.status === EUploadStatus.finishing).length > 0) {
terminateWorkers();
}
const uuids = tasksToRemove.map((t) => t.uuid);
try {
const payload = { uuids };
await api.post(`/api/cancel-upload`, payload, false);
} catch (e) {
throw e;
}
}
|
require 'httparty'
require 'ampsy/version'
require 'ampsy/response'
require 'ampsy/client'
|
// Copyright 2018 Grove Enterprises LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! SQL Parser
use log::debug;
use super::dialect::keywords;
use super::dialect::Dialect;
use super::sqlast::*;
use super::sqltokenizer::*;
use std::error::Error;
#[derive(Debug, Clone, PartialEq)]
pub enum ParserError {
TokenizerError(String),
ParserError(String),
}
// Use `Parser::expected` instead, if possible
macro_rules! parser_err {
($MSG:expr) => {
Err(ParserError::ParserError($MSG.to_string()))
};
}
#[derive(PartialEq)]
pub enum IsOptional {
Optional,
Mandatory,
}
use IsOptional::*;
impl From<TokenizerError> for ParserError {
fn from(e: TokenizerError) -> Self {
ParserError::TokenizerError(format!("{:?}", e))
}
}
impl std::fmt::Display for ParserError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"sql parser error: {}",
match self {
ParserError::TokenizerError(s) => s,
ParserError::ParserError(s) => s,
}
)
}
}
impl Error for ParserError {}
/// SQL Parser
pub struct Parser {
tokens: Vec<Token>,
/// The index of the first unprocessed token in `self.tokens`
index: usize,
}
impl Parser {
/// Parse the specified tokens
pub fn new(tokens: Vec<Token>) -> Self {
Parser { tokens, index: 0 }
}
/// Parse a SQL statement and produce an Abstract Syntax Tree (AST)
pub fn parse_sql(dialect: &dyn Dialect, sql: String) -> Result<Vec<SQLStatement>, ParserError> {
let mut tokenizer = Tokenizer::new(dialect, &sql);
let tokens = tokenizer.tokenize()?;
let mut parser = Parser::new(tokens);
let mut stmts = Vec::new();
let mut expecting_statement_delimiter = false;
debug!("Parsing sql '{}'...", sql);
loop {
// ignore empty statements (between successive statement delimiters)
while parser.consume_token(&Token::SemiColon) {
expecting_statement_delimiter = false;
}
if parser.peek_token().is_none() {
break;
} else if expecting_statement_delimiter {
return parser.expected("end of statement", parser.peek_token());
}
let statement = parser.parse_statement()?;
stmts.push(statement);
expecting_statement_delimiter = true;
}
Ok(stmts)
}
/// Parse a single top-level statement (such as SELECT, INSERT, CREATE, etc.),
/// stopping before the statement separator, if any.
pub fn parse_statement(&mut self) -> Result<SQLStatement, ParserError> {
match self.next_token() {
Some(t) => match t {
Token::SQLWord(ref w) if w.keyword != "" => match w.keyword.as_ref() {
"SELECT" | "WITH" => {
self.prev_token();
Ok(SQLStatement::SQLQuery(Box::new(self.parse_query()?)))
}
"CREATE" => Ok(self.parse_create()?),
"DROP" => Ok(self.parse_drop()?),
"DELETE" => Ok(self.parse_delete()?),
"INSERT" => Ok(self.parse_insert()?),
"UPDATE" => Ok(self.parse_update()?),
"ALTER" => Ok(self.parse_alter()?),
"COPY" => Ok(self.parse_copy()?),
"BEGIN" => Ok(self.parse_transaction()?),
_ => parser_err!(format!(
"Unexpected keyword {:?} at the beginning of a statement",
w.to_string()
)),
},
unexpected => self.expected(
"a keyword at the beginning of a statement",
Some(unexpected),
),
},
None => self.expected("SQL statement", None),
}
}
/// Parse a new expression
pub fn parse_expr(&mut self) -> Result<ASTNode, ParserError> {
self.parse_subexpr(0)
}
/// Parse tokens until the precedence changes
pub fn parse_subexpr(&mut self, precedence: u8) -> Result<ASTNode, ParserError> {
debug!("parsing expr");
let mut expr = self.parse_prefix()?;
debug!("prefix: {:?}", expr);
loop {
let next_precedence = self.get_next_precedence()?;
debug!("next precedence: {:?}", next_precedence);
if precedence >= next_precedence {
break;
}
expr = self.parse_infix(expr, next_precedence)?;
}
Ok(expr)
}
/// Parse expression for DEFAULT clause in CREATE TABLE
pub fn parse_default_expr(&mut self, precedence: u8) -> Result<ASTNode, ParserError> {
debug!("parsing expr");
let mut expr = self.parse_prefix()?;
debug!("prefix: {:?}", expr);
loop {
// stop parsing on `NULL` | `NOT NULL`
match self.peek_token() {
Some(Token::SQLWord(ref k)) if k.keyword == "NOT" || k.keyword == "NULL" => break,
_ => {}
}
let next_precedence = self.get_next_precedence()?;
debug!("next precedence: {:?}", next_precedence);
if precedence >= next_precedence {
break;
}
expr = self.parse_infix(expr, next_precedence)?;
}
Ok(expr)
}
/// Parse an expression prefix
pub fn parse_prefix(&mut self) -> Result<ASTNode, ParserError> {
let tok = self
.next_token()
.ok_or_else(|| ParserError::ParserError("Unexpected EOF".to_string()))?;
let expr = match tok {
Token::SQLWord(w) => match w.keyword.as_ref() {
"TRUE" | "FALSE" | "NULL" => {
self.prev_token();
self.parse_sql_value()
}
"CASE" => self.parse_case_expression(),
"CAST" => self.parse_cast_expression(),
"DATE" => Ok(ASTNode::SQLValue(Value::Date(self.parse_literal_string()?))),
"EXISTS" => self.parse_exists_expression(),
"EXTRACT" => self.parse_extract_expression(),
"NOT" => Ok(ASTNode::SQLUnary {
operator: SQLOperator::Not,
expr: Box::new(self.parse_subexpr(Self::UNARY_NOT_PREC)?),
}),
"TIME" => Ok(ASTNode::SQLValue(Value::Time(self.parse_literal_string()?))),
"TIMESTAMP" => Ok(ASTNode::SQLValue(Value::Timestamp(
self.parse_literal_string()?,
))),
// Here `w` is a word, check if it's a part of a multi-part
// identifier, a function call, or a simple identifier:
_ => match self.peek_token() {
Some(Token::LParen) | Some(Token::Period) => {
let mut id_parts: Vec<SQLIdent> = vec![w.as_sql_ident()];
let mut ends_with_wildcard = false;
while self.consume_token(&Token::Period) {
match self.next_token() {
Some(Token::SQLWord(w)) => id_parts.push(w.as_sql_ident()),
Some(Token::Mult) => {
ends_with_wildcard = true;
break;
}
unexpected => {
return self
.expected("an identifier or a '*' after '.'", unexpected);
}
}
}
if ends_with_wildcard {
Ok(ASTNode::SQLQualifiedWildcard(id_parts))
} else if self.consume_token(&Token::LParen) {
self.prev_token();
self.parse_function(SQLObjectName(id_parts))
} else {
Ok(ASTNode::SQLCompoundIdentifier(id_parts))
}
}
_ => Ok(ASTNode::SQLIdentifier(w.as_sql_ident())),
},
}, // End of Token::SQLWord
Token::Mult => Ok(ASTNode::SQLWildcard),
tok @ Token::Minus | tok @ Token::Plus => {
let operator = if tok == Token::Plus {
SQLOperator::Plus
} else {
SQLOperator::Minus
};
Ok(ASTNode::SQLUnary {
operator,
expr: Box::new(self.parse_subexpr(Self::PLUS_MINUS_PREC)?),
})
}
Token::Number(_)
| Token::SingleQuotedString(_)
| Token::NationalStringLiteral(_)
| Token::HexStringLiteral(_) => {
self.prev_token();
self.parse_sql_value()
}
Token::LParen => {
let expr = if self.parse_keyword("SELECT") || self.parse_keyword("WITH") {
self.prev_token();
ASTNode::SQLSubquery(Box::new(self.parse_query()?))
} else {
ASTNode::SQLNested(Box::new(self.parse_expr()?))
};
self.expect_token(&Token::RParen)?;
Ok(expr)
}
unexpected => self.expected("an expression", Some(unexpected)),
}?;
if self.parse_keyword("COLLATE") {
Ok(ASTNode::SQLCollate {
expr: Box::new(expr),
collation: self.parse_object_name()?,
})
} else {
Ok(expr)
}
}
pub fn parse_function(&mut self, name: SQLObjectName) -> Result<ASTNode, ParserError> {
self.expect_token(&Token::LParen)?;
let all = self.parse_keyword("ALL");
let distinct = self.parse_keyword("DISTINCT");
if all && distinct {
return parser_err!(format!(
"Cannot specify both ALL and DISTINCT in function: {}",
name.to_string(),
));
}
let args = self.parse_optional_args()?;
let over = if self.parse_keyword("OVER") {
// TBD: support window names (`OVER mywin`) in place of inline specification
self.expect_token(&Token::LParen)?;
let partition_by = if self.parse_keywords(vec!["PARTITION", "BY"]) {
// a list of possibly-qualified column names
self.parse_expr_list()?
} else {
vec![]
};
let order_by = if self.parse_keywords(vec!["ORDER", "BY"]) {
self.parse_order_by_expr_list()?
} else {
vec![]
};
let window_frame = self.parse_window_frame()?;
Some(SQLWindowSpec {
partition_by,
order_by,
window_frame,
})
} else {
None
};
Ok(ASTNode::SQLFunction(SQLFunction {
name,
args,
over,
distinct,
}))
}
pub fn parse_window_frame(&mut self) -> Result<Option<SQLWindowFrame>, ParserError> {
let window_frame = match self.peek_token() {
Some(Token::SQLWord(w)) => {
let units = w.keyword.parse::<SQLWindowFrameUnits>()?;
self.next_token();
if self.parse_keyword("BETWEEN") {
let start_bound = self.parse_window_frame_bound()?;
self.expect_keyword("AND")?;
let end_bound = Some(self.parse_window_frame_bound()?);
Some(SQLWindowFrame {
units,
start_bound,
end_bound,
})
} else {
let start_bound = self.parse_window_frame_bound()?;
let end_bound = None;
Some(SQLWindowFrame {
units,
start_bound,
end_bound,
})
}
}
Some(Token::RParen) => None,
unexpected => return self.expected("'ROWS', 'RANGE', 'GROUPS', or ')'", unexpected),
};
self.expect_token(&Token::RParen)?;
Ok(window_frame)
}
/// "CURRENT ROW" | ( (<positive number> | "UNBOUNDED") ("PRECEDING" | FOLLOWING) )
pub fn parse_window_frame_bound(&mut self) -> Result<SQLWindowFrameBound, ParserError> {
if self.parse_keywords(vec!["CURRENT", "ROW"]) {
Ok(SQLWindowFrameBound::CurrentRow)
} else {
let rows = if self.parse_keyword("UNBOUNDED") {
None
} else {
let rows = self.parse_literal_uint()?;
Some(rows)
};
if self.parse_keyword("PRECEDING") {
Ok(SQLWindowFrameBound::Preceding(rows))
} else if self.parse_keyword("FOLLOWING") {
Ok(SQLWindowFrameBound::Following(rows))
} else {
self.expected("PRECEDING or FOLLOWING", self.peek_token())
}
}
}
pub fn parse_case_expression(&mut self) -> Result<ASTNode, ParserError> {
let mut operand = None;
if !self.parse_keyword("WHEN") {
operand = Some(Box::new(self.parse_expr()?));
self.expect_keyword("WHEN")?;
}
let mut conditions = vec![];
let mut results = vec![];
loop {
conditions.push(self.parse_expr()?);
self.expect_keyword("THEN")?;
results.push(self.parse_expr()?);
if !self.parse_keyword("WHEN") {
break;
}
}
let else_result = if self.parse_keyword("ELSE") {
Some(Box::new(self.parse_expr()?))
} else {
None
};
self.expect_keyword("END")?;
Ok(ASTNode::SQLCase {
operand,
conditions,
results,
else_result,
})
}
/// Parse a SQL CAST function e.g. `CAST(expr AS FLOAT)`
pub fn parse_cast_expression(&mut self) -> Result<ASTNode, ParserError> {
self.expect_token(&Token::LParen)?;
let expr = self.parse_expr()?;
self.expect_keyword("AS")?;
let data_type = self.parse_data_type()?;
self.expect_token(&Token::RParen)?;
Ok(ASTNode::SQLCast {
expr: Box::new(expr),
data_type,
})
}
/// Parse a SQL EXISTS expression e.g. `WHERE EXISTS(SELECT ...)`.
pub fn parse_exists_expression(&mut self) -> Result<ASTNode, ParserError> {
self.expect_token(&Token::LParen)?;
let exists_node = ASTNode::SQLExists(Box::new(self.parse_query()?));
self.expect_token(&Token::RParen)?;
Ok(exists_node)
}
pub fn parse_extract_expression(&mut self) -> Result<ASTNode, ParserError> {
self.expect_token(&Token::LParen)?;
let tok = self.next_token();
let field = if let Some(Token::SQLWord(ref k)) = tok {
match k.keyword.as_ref() {
"YEAR" => SQLDateTimeField::Year,
"MONTH" => SQLDateTimeField::Month,
"DAY" => SQLDateTimeField::Day,
"HOUR" => SQLDateTimeField::Hour,
"MINUTE" => SQLDateTimeField::Minute,
"SECOND" => SQLDateTimeField::Second,
_ => self.expected("Date/time field inside of EXTRACT function", tok)?,
}
} else {
self.expected("Date/time field inside of EXTRACT function", tok)?
};
self.expect_keyword("FROM")?;
let expr = self.parse_expr()?;
self.expect_token(&Token::RParen)?;
Ok(ASTNode::SQLExtract {
field,
expr: Box::new(expr),
})
}
/// Parse an operator following an expression
pub fn parse_infix(&mut self, expr: ASTNode, precedence: u8) -> Result<ASTNode, ParserError> {
debug!("parsing infix");
let tok = self.next_token().unwrap(); // safe as EOF's precedence is the lowest
let regular_binary_operator = match tok {
Token::Eq => Some(SQLOperator::Eq),
Token::Neq => Some(SQLOperator::NotEq),
Token::Gt => Some(SQLOperator::Gt),
Token::GtEq => Some(SQLOperator::GtEq),
Token::Lt => Some(SQLOperator::Lt),
Token::LtEq => Some(SQLOperator::LtEq),
Token::Plus => Some(SQLOperator::Plus),
Token::Minus => Some(SQLOperator::Minus),
Token::Mult => Some(SQLOperator::Multiply),
Token::Mod => Some(SQLOperator::Modulus),
Token::Div => Some(SQLOperator::Divide),
Token::SQLWord(ref k) => match k.keyword.as_ref() {
"AND" => Some(SQLOperator::And),
"OR" => Some(SQLOperator::Or),
"LIKE" => Some(SQLOperator::Like),
"NOT" => {
if self.parse_keyword("LIKE") {
Some(SQLOperator::NotLike)
} else {
None
}
}
_ => None,
},
_ => None,
};
if let Some(op) = regular_binary_operator {
Ok(ASTNode::SQLBinaryExpr {
left: Box::new(expr),
op,
right: Box::new(self.parse_subexpr(precedence)?),
})
} else if let Token::SQLWord(ref k) = tok {
match k.keyword.as_ref() {
"IS" => {
if self.parse_keyword("NULL") {
Ok(ASTNode::SQLIsNull(Box::new(expr)))
} else if self.parse_keywords(vec!["NOT", "NULL"]) {
Ok(ASTNode::SQLIsNotNull(Box::new(expr)))
} else {
self.expected("NULL or NOT NULL after IS", self.peek_token())
}
}
"NOT" | "IN" | "BETWEEN" => {
self.prev_token();
let negated = self.parse_keyword("NOT");
if self.parse_keyword("IN") {
self.parse_in(expr, negated)
} else if self.parse_keyword("BETWEEN") {
self.parse_between(expr, negated)
} else {
self.expected("IN or BETWEEN after NOT", self.peek_token())
}
}
// Can only happen if `get_precedence` got out of sync with this function
_ => panic!("No infix parser for token {:?}", tok),
}
} else if Token::DoubleColon == tok {
self.parse_pg_cast(expr)
} else {
// Can only happen if `get_precedence` got out of sync with this function
panic!("No infix parser for token {:?}", tok)
}
}
/// Parses the parens following the `[ NOT ] IN` operator
pub fn parse_in(&mut self, expr: ASTNode, negated: bool) -> Result<ASTNode, ParserError> {
self.expect_token(&Token::LParen)?;
let in_op = if self.parse_keyword("SELECT") || self.parse_keyword("WITH") {
self.prev_token();
ASTNode::SQLInSubquery {
expr: Box::new(expr),
subquery: Box::new(self.parse_query()?),
negated,
}
} else {
ASTNode::SQLInList {
expr: Box::new(expr),
list: self.parse_expr_list()?,
negated,
}
};
self.expect_token(&Token::RParen)?;
Ok(in_op)
}
/// Parses `BETWEEN <low> AND <high>`, assuming the `BETWEEN` keyword was already consumed
pub fn parse_between(&mut self, expr: ASTNode, negated: bool) -> Result<ASTNode, ParserError> {
// Stop parsing subexpressions for <low> and <high> on tokens with
// precedence lower than that of `BETWEEN`, such as `AND`, `IS`, etc.
let low = self.parse_subexpr(Self::BETWEEN_PREC)?;
self.expect_keyword("AND")?;
let high = self.parse_subexpr(Self::BETWEEN_PREC)?;
Ok(ASTNode::SQLBetween {
expr: Box::new(expr),
negated,
low: Box::new(low),
high: Box::new(high),
})
}
/// Parse a postgresql casting style which is in the form of `expr::datatype`
pub fn parse_pg_cast(&mut self, expr: ASTNode) -> Result<ASTNode, ParserError> {
Ok(ASTNode::SQLCast {
expr: Box::new(expr),
data_type: self.parse_data_type()?,
})
}
const UNARY_NOT_PREC: u8 = 15;
const BETWEEN_PREC: u8 = 20;
const PLUS_MINUS_PREC: u8 = 30;
/// Get the precedence of the next token
pub fn get_next_precedence(&self) -> Result<u8, ParserError> {
if let Some(token) = self.peek_token() {
debug!("get_precedence() {:?}", token);
match &token {
Token::SQLWord(k) if k.keyword == "OR" => Ok(5),
Token::SQLWord(k) if k.keyword == "AND" => Ok(10),
Token::SQLWord(k) if k.keyword == "NOT" => match &self.peek_nth_token(1) {
// The precedence of NOT varies depending on keyword that
// follows it. If it is followed by IN, BETWEEN, or LIKE,
// it takes on the precedence of those tokens. Otherwise it
// takes on UNARY_NOT_PREC.
Some(Token::SQLWord(k)) if k.keyword == "IN" => Ok(Self::BETWEEN_PREC),
Some(Token::SQLWord(k)) if k.keyword == "BETWEEN" => Ok(Self::BETWEEN_PREC),
Some(Token::SQLWord(k)) if k.keyword == "LIKE" => Ok(Self::BETWEEN_PREC),
_ => Ok(Self::UNARY_NOT_PREC),
},
Token::SQLWord(k) if k.keyword == "IS" => Ok(17),
Token::SQLWord(k) if k.keyword == "IN" => Ok(Self::BETWEEN_PREC),
Token::SQLWord(k) if k.keyword == "BETWEEN" => Ok(Self::BETWEEN_PREC),
Token::SQLWord(k) if k.keyword == "LIKE" => Ok(Self::BETWEEN_PREC),
Token::Eq | Token::Lt | Token::LtEq | Token::Neq | Token::Gt | Token::GtEq => {
Ok(20)
}
Token::Plus | Token::Minus => Ok(Self::PLUS_MINUS_PREC),
Token::Mult | Token::Div | Token::Mod => Ok(40),
Token::DoubleColon => Ok(50),
_ => Ok(0),
}
} else {
Ok(0)
}
}
/// Return the first non-whitespace token that has not yet been processed
/// (or None if reached end-of-file)
pub fn peek_token(&self) -> Option<Token> {
self.peek_nth_token(0)
}
/// Return nth non-whitespace token that has not yet been processed
pub fn peek_nth_token(&self, mut n: usize) -> Option<Token> {
let mut index = self.index;
loop {
index += 1;
match self.tokens.get(index - 1) {
Some(Token::Whitespace(_)) => continue,
non_whitespace => {
if n == 0 {
return non_whitespace.cloned();
}
n -= 1;
}
}
}
}
/// Return the first non-whitespace token that has not yet been processed
/// (or None if reached end-of-file) and mark it as processed. OK to call
/// repeatedly after reaching EOF.
pub fn next_token(&mut self) -> Option<Token> {
loop {
self.index += 1;
match self.tokens.get(self.index - 1) {
Some(Token::Whitespace(_)) => continue,
token => return token.cloned(),
}
}
}
/// Return the first unprocessed token, possibly whitespace.
pub fn next_token_no_skip(&mut self) -> Option<&Token> {
self.index += 1;
self.tokens.get(self.index - 1)
}
/// Push back the last one non-whitespace token. Must be called after
/// `next_token()`, otherwise might panic. OK to call after
/// `next_token()` indicates an EOF.
pub fn prev_token(&mut self) {
loop {
assert!(self.index > 0);
self.index -= 1;
if let Some(Token::Whitespace(_)) = self.tokens.get(self.index) {
continue;
}
return;
}
}
/// Report unexpected token
fn expected<T>(&self, expected: &str, found: Option<Token>) -> Result<T, ParserError> {
parser_err!(format!(
"Expected {}, found: {}",
expected,
found.map_or("EOF".to_string(), |t| t.to_string())
))
}
/// Look for an expected keyword and consume it if it exists
#[must_use]
pub fn parse_keyword(&mut self, expected: &'static str) -> bool {
// Ideally, we'd accept a enum variant, not a string, but since
// it's not trivial to maintain the enum without duplicating all
// the keywords three times, we'll settle for a run-time check that
// the string actually represents a known keyword...
assert!(keywords::ALL_KEYWORDS.contains(&expected));
match self.peek_token() {
Some(Token::SQLWord(ref k)) if expected.eq_ignore_ascii_case(&k.keyword) => {
self.next_token();
true
}
_ => false,
}
}
/// Look for an expected sequence of keywords and consume them if they exist
#[must_use]
pub fn parse_keywords(&mut self, keywords: Vec<&'static str>) -> bool {
let index = self.index;
for keyword in keywords {
if !self.parse_keyword(&keyword) {
//println!("parse_keywords aborting .. did not find {}", keyword);
// reset index and return immediately
self.index = index;
return false;
}
}
true
}
/// Look for one of the given keywords and return the one that matches.
#[must_use]
pub fn parse_one_of_keywords(&mut self, keywords: &[&'static str]) -> Option<&'static str> {
for keyword in keywords {
assert!(keywords::ALL_KEYWORDS.contains(keyword));
}
match self.peek_token() {
Some(Token::SQLWord(ref k)) => keywords
.iter()
.find(|keyword| keyword.eq_ignore_ascii_case(&k.keyword))
.map(|keyword| {
self.next_token();
*keyword
}),
_ => None,
}
}
/// Bail out if the current token is not one of the expected keywords, or consume it if it is
#[must_use]
pub fn expect_one_of_keywords(
&mut self,
keywords: &[&'static str],
) -> Result<&'static str, ParserError> {
if let Some(keyword) = self.parse_one_of_keywords(keywords) {
Ok(keyword)
} else {
self.expected(
&format!("one of {}", keywords.join(" or ")),
self.peek_token(),
)
}
}
/// Bail out if the current token is not an expected keyword, or consume it if it is
pub fn expect_keyword(&mut self, expected: &'static str) -> Result<(), ParserError> {
if self.parse_keyword(expected) {
Ok(())
} else {
self.expected(expected, self.peek_token())
}
}
/// Consume the next token if it matches the expected token, otherwise return false
#[must_use]
pub fn consume_token(&mut self, expected: &Token) -> bool {
match &self.peek_token() {
Some(t) if *t == *expected => {
self.next_token();
true
}
_ => false,
}
}
/// Bail out if the current token is not an expected keyword, or consume it if it is
pub fn expect_token(&mut self, expected: &Token) -> Result<(), ParserError> {
if self.consume_token(expected) {
Ok(())
} else {
self.expected(&expected.to_string(), self.peek_token())
}
}
/// Parse a SQL CREATE statement
pub fn parse_create(&mut self) -> Result<SQLStatement, ParserError> {
if self.parse_keyword("TABLE") {
self.parse_create_table()
} else if self.parse_keyword("MATERIALIZED") || self.parse_keyword("VIEW") {
self.prev_token();
self.parse_create_view()
} else if self.parse_keyword("EXTERNAL") {
self.parse_create_external_table()
} else {
self.expected("TABLE or VIEW after CREATE", self.peek_token())
}
}
pub fn parse_create_external_table(&mut self) -> Result<SQLStatement, ParserError> {
self.expect_keyword("TABLE")?;
let table_name = self.parse_object_name()?;
let (columns, constraints) = self.parse_columns()?;
self.expect_keyword("STORED")?;
self.expect_keyword("AS")?;
let file_format = self.parse_identifier()?.parse::<FileFormat>()?;
self.expect_keyword("LOCATION")?;
let location = self.parse_literal_string()?;
Ok(SQLStatement::SQLCreateTable {
name: table_name,
columns,
constraints,
with_options: vec![],
external: true,
file_format: Some(file_format),
location: Some(location),
})
}
pub fn parse_create_view(&mut self) -> Result<SQLStatement, ParserError> {
let materialized = self.parse_keyword("MATERIALIZED");
self.expect_keyword("VIEW")?;
// Many dialects support `OR REPLACE` | `OR ALTER` right after `CREATE`, but we don't (yet).
// ANSI SQL and Postgres support RECURSIVE here, but we don't support it either.
let name = self.parse_object_name()?;
let columns = self.parse_parenthesized_column_list(Optional)?;
let with_options = if self.parse_keyword("WITH") {
self.parse_with_options()?
} else {
vec![]
};
self.expect_keyword("AS")?;
let query = Box::new(self.parse_query()?);
// Optional `WITH [ CASCADED | LOCAL ] CHECK OPTION` is widely supported here.
Ok(SQLStatement::SQLCreateView {
name,
columns,
query,
materialized,
with_options,
})
}
pub fn parse_drop(&mut self) -> Result<SQLStatement, ParserError> {
let object_type = if self.parse_keyword("TABLE") {
SQLObjectType::Table
} else if self.parse_keyword("VIEW") {
SQLObjectType::View
} else {
return parser_err!(format!(
"Unexpected token after DROP: {:?}",
self.peek_token()
));
};
let if_exists = self.parse_keywords(vec!["IF", "EXISTS"]);
let mut names = vec![self.parse_object_name()?];
loop {
let token = &self.next_token();
if let Some(Token::Comma) = token {
names.push(self.parse_object_name()?)
} else {
if token.is_some() {
self.prev_token();
}
break;
}
}
let cascade = self.parse_keyword("CASCADE");
let restrict = self.parse_keyword("RESTRICT");
if cascade && restrict {
return parser_err!("Cannot specify both CASCADE and RESTRICT in DROP");
}
Ok(SQLStatement::SQLDrop {
object_type,
if_exists,
names,
cascade,
})
}
pub fn parse_create_table(&mut self) -> Result<SQLStatement, ParserError> {
let table_name = self.parse_object_name()?;
// parse optional column list (schema)
let (columns, constraints) = self.parse_columns()?;
let with_options = if self.parse_keyword("WITH") {
self.parse_with_options()?
} else {
vec![]
};
Ok(SQLStatement::SQLCreateTable {
name: table_name,
columns,
constraints,
with_options,
external: false,
file_format: None,
location: None,
})
}
fn parse_columns(&mut self) -> Result<(Vec<SQLColumnDef>, Vec<TableConstraint>), ParserError> {
let mut columns = vec![];
let mut constraints = vec![];
if !self.consume_token(&Token::LParen) || self.consume_token(&Token::RParen) {
return Ok((columns, constraints));
}
loop {
if let Some(constraint) = self.parse_optional_table_constraint()? {
constraints.push(constraint);
} else if let Some(Token::SQLWord(column_name)) = self.peek_token() {
self.next_token();
let data_type = self.parse_data_type()?;
let is_primary = self.parse_keywords(vec!["PRIMARY", "KEY"]);
let is_unique = self.parse_keyword("UNIQUE");
let default = if self.parse_keyword("DEFAULT") {
let expr = self.parse_default_expr(0)?;
Some(expr)
} else {
None
};
let allow_null = if self.parse_keywords(vec!["NOT", "NULL"]) {
false
} else {
let _ = self.parse_keyword("NULL");
true
};
debug!("default: {:?}", default);
columns.push(SQLColumnDef {
name: column_name.as_sql_ident(),
data_type,
allow_null,
is_primary,
is_unique,
default,
});
} else {
return self.expected("column name or constraint definition", self.peek_token());
}
let comma = self.consume_token(&Token::Comma);
if self.consume_token(&Token::RParen) {
// allow a trailing comma, even though it's not in standard
break;
} else if !comma {
return self.expected("',' or ')' after column definition", self.peek_token());
}
}
Ok((columns, constraints))
}
pub fn parse_optional_table_constraint(
&mut self,
) -> Result<Option<TableConstraint>, ParserError> {
let name = if self.parse_keyword("CONSTRAINT") {
Some(self.parse_identifier()?)
} else {
None
};
match self.next_token() {
Some(Token::SQLWord(ref k)) if k.keyword == "PRIMARY" || k.keyword == "UNIQUE" => {
let is_primary = k.keyword == "PRIMARY";
if is_primary {
self.expect_keyword("KEY")?;
}
let columns = self.parse_parenthesized_column_list(Mandatory)?;
Ok(Some(TableConstraint::Unique {
name,
columns,
is_primary,
}))
}
Some(Token::SQLWord(ref k)) if k.keyword == "FOREIGN" => {
self.expect_keyword("KEY")?;
let columns = self.parse_parenthesized_column_list(Mandatory)?;
self.expect_keyword("REFERENCES")?;
let foreign_table = self.parse_object_name()?;
let referred_columns = self.parse_parenthesized_column_list(Mandatory)?;
Ok(Some(TableConstraint::ForeignKey {
name,
columns,
foreign_table,
referred_columns,
}))
}
Some(Token::SQLWord(ref k)) if k.keyword == "CHECK" => {
self.expect_token(&Token::LParen)?;
let expr = Box::new(self.parse_expr()?);
self.expect_token(&Token::RParen)?;
Ok(Some(TableConstraint::Check { name, expr }))
}
unexpected => {
if name.is_some() {
self.expected("PRIMARY, UNIQUE, FOREIGN, or CHECK", unexpected)
} else {
self.prev_token();
Ok(None)
}
}
}
}
pub fn parse_with_options(&mut self) -> Result<Vec<SQLOption>, ParserError> {
self.expect_token(&Token::LParen)?;
let mut options = vec![];
loop {
let name = self.parse_identifier()?;
self.expect_token(&Token::Eq)?;
let value = self.parse_value()?;
options.push(SQLOption { name, value });
match self.peek_token() {
Some(Token::Comma) => self.next_token(),
_ => break,
};
}
self.expect_token(&Token::RParen)?;
Ok(options)
}
pub fn parse_alter(&mut self) -> Result<SQLStatement, ParserError> {
self.expect_keyword("TABLE")?;
let _ = self.parse_keyword("ONLY");
let table_name = self.parse_object_name()?;
let operation = if self.parse_keyword("ADD") {
if let Some(constraint) = self.parse_optional_table_constraint()? {
AlterTableOperation::AddConstraint(constraint)
} else {
return self.expected("a constraint in ALTER TABLE .. ADD", self.peek_token());
}
} else {
return self.expected("ADD after ALTER TABLE", self.peek_token());
};
Ok(SQLStatement::SQLAlterTable {
name: table_name,
operation,
})
}
/// Parse a copy statement
pub fn parse_copy(&mut self) -> Result<SQLStatement, ParserError> {
let table_name = self.parse_object_name()?;
let columns = self.parse_parenthesized_column_list(Optional)?;
self.expect_keyword("FROM")?;
self.expect_keyword("STDIN")?;
self.expect_token(&Token::SemiColon)?;
let values = self.parse_tsv()?;
Ok(SQLStatement::SQLCopy {
table_name,
columns,
values,
})
}
/// Parse a tab separated values in
/// COPY payload
fn parse_tsv(&mut self) -> Result<Vec<Option<String>>, ParserError> {
let values = self.parse_tab_value()?;
Ok(values)
}
fn parse_sql_value(&mut self) -> Result<ASTNode, ParserError> {
Ok(ASTNode::SQLValue(self.parse_value()?))
}
fn parse_tab_value(&mut self) -> Result<Vec<Option<String>>, ParserError> {
let mut values = vec![];
let mut content = String::from("");
while let Some(t) = self.next_token_no_skip() {
match t {
Token::Whitespace(Whitespace::Tab) => {
values.push(Some(content.to_string()));
content.clear();
}
Token::Whitespace(Whitespace::Newline) => {
values.push(Some(content.to_string()));
content.clear();
}
Token::Backslash => {
if self.consume_token(&Token::Period) {
return Ok(values);
}
if let Some(token) = self.next_token() {
if let Token::SQLWord(SQLWord { value: v, .. }) = token {
if v == "N" {
values.push(None);
}
}
} else {
continue;
}
}
_ => {
content.push_str(&t.to_string());
}
}
}
Ok(values)
}
/// Parse a literal value (numbers, strings, date/time, booleans)
fn parse_value(&mut self) -> Result<Value, ParserError> {
match self.next_token() {
Some(t) => match t {
Token::SQLWord(k) => match k.keyword.as_ref() {
"TRUE" => Ok(Value::Boolean(true)),
"FALSE" => Ok(Value::Boolean(false)),
"NULL" => Ok(Value::Null),
_ => {
return parser_err!(format!("No value parser for keyword {}", k.keyword));
}
},
Token::Number(ref n) if n.contains('.') => match n.parse::<f64>() {
Ok(n) => Ok(Value::Double(n.into())),
Err(e) => parser_err!(format!("Could not parse '{}' as f64: {}", n, e)),
},
Token::Number(ref n) => match n.parse::<u64>() {
Ok(n) => Ok(Value::Long(n)),
Err(e) => parser_err!(format!("Could not parse '{}' as u64: {}", n, e)),
},
Token::SingleQuotedString(ref s) => Ok(Value::SingleQuotedString(s.to_string())),
Token::NationalStringLiteral(ref s) => {
Ok(Value::NationalStringLiteral(s.to_string()))
}
Token::HexStringLiteral(ref s) => Ok(Value::HexStringLiteral(s.to_string())),
_ => parser_err!(format!("Unsupported value: {:?}", t)),
},
None => parser_err!("Expecting a value, but found EOF"),
}
}
/// Parse an unsigned literal integer/long
pub fn parse_literal_uint(&mut self) -> Result<u64, ParserError> {
match self.next_token() {
Some(Token::Number(s)) => s.parse::<u64>().map_err(|e| {
ParserError::ParserError(format!("Could not parse '{}' as u64: {}", s, e))
}),
other => self.expected("literal int", other),
}
}
/// Parse a literal double
pub fn parse_literal_double(&mut self) -> Result<f64, ParserError> {
match self.next_token() {
Some(Token::Number(s)) => s.parse::<f64>().map_err(|e| {
ParserError::ParserError(format!("Could not parse '{}' as f64: {}", s, e))
}),
other => parser_err!(format!("Expected literal number, found {:?}", other)),
}
}
/// Parse a literal string
pub fn parse_literal_string(&mut self) -> Result<String, ParserError> {
match self.next_token() {
Some(Token::SingleQuotedString(ref s)) => Ok(s.clone()),
other => parser_err!(format!("Expected literal string, found {:?}", other)),
}
}
/// Parse a SQL datatype (in the context of a CREATE TABLE statement for example)
pub fn parse_data_type(&mut self) -> Result<SQLType, ParserError> {
match self.next_token() {
Some(Token::SQLWord(k)) => match k.keyword.as_ref() {
"BOOLEAN" => Ok(SQLType::Boolean),
"FLOAT" => Ok(SQLType::Float(self.parse_optional_precision()?)),
"REAL" => Ok(SQLType::Real),
"DOUBLE" => {
let _ = self.parse_keyword("PRECISION");
Ok(SQLType::Double)
}
"SMALLINT" => Ok(SQLType::SmallInt),
"INT" | "INTEGER" => Ok(SQLType::Int),
"BIGINT" => Ok(SQLType::BigInt),
"VARCHAR" => Ok(SQLType::Varchar(self.parse_optional_precision()?)),
"CHAR" | "CHARACTER" => {
if self.parse_keyword("VARYING") {
Ok(SQLType::Varchar(self.parse_optional_precision()?))
} else {
Ok(SQLType::Char(self.parse_optional_precision()?))
}
}
"UUID" => Ok(SQLType::Uuid),
"DATE" => Ok(SQLType::Date),
"TIMESTAMP" => {
// TBD: we throw away "with/without timezone" information
if self.parse_keyword("WITH") || self.parse_keyword("WITHOUT") {
self.expect_keyword("TIME")?;
self.expect_keyword("ZONE")?;
}
Ok(SQLType::Timestamp)
}
"TIME" => {
// TBD: we throw away "with/without timezone" information
if self.parse_keyword("WITH") || self.parse_keyword("WITHOUT") {
self.expect_keyword("TIME")?;
self.expect_keyword("ZONE")?;
}
Ok(SQLType::Time)
}
"REGCLASS" => Ok(SQLType::Regclass),
"TEXT" => {
if self.consume_token(&Token::LBracket) {
// Note: this is postgresql-specific
self.expect_token(&Token::RBracket)?;
Ok(SQLType::Array(Box::new(SQLType::Text)))
} else {
Ok(SQLType::Text)
}
}
"BYTEA" => Ok(SQLType::Bytea),
"NUMERIC" | "DECIMAL" | "DEC" => {
let (precision, scale) = self.parse_optional_precision_scale()?;
Ok(SQLType::Decimal(precision, scale))
}
_ => {
self.prev_token();
let type_name = self.parse_object_name()?;
Ok(SQLType::Custom(type_name))
}
},
other => self.expected("a data type name", other),
}
}
/// Parse `AS identifier` (or simply `identifier` if it's not a reserved keyword)
/// Some examples with aliases: `SELECT 1 foo`, `SELECT COUNT(*) AS cnt`,
/// `SELECT ... FROM t1 foo, t2 bar`, `SELECT ... FROM (...) AS bar`
pub fn parse_optional_alias(
&mut self,
reserved_kwds: &[&str],
) -> Result<Option<SQLIdent>, ParserError> {
let after_as = self.parse_keyword("AS");
match self.next_token() {
// Accept any identifier after `AS` (though many dialects have restrictions on
// keywords that may appear here). If there's no `AS`: don't parse keywords,
// which may start a construct allowed in this position, to be parsed as aliases.
// (For example, in `FROM t1 JOIN` the `JOIN` will always be parsed as a keyword,
// not an alias.)
Some(Token::SQLWord(ref w))
if after_as || !reserved_kwds.contains(&w.keyword.as_str()) =>
{
Ok(Some(w.as_sql_ident()))
}
// MSSQL supports single-quoted strings as aliases for columns
// We accept them as table aliases too, although MSSQL does not.
Some(Token::SingleQuotedString(ref s)) => Ok(Some(format!("'{}'", s))),
not_an_ident => {
if after_as {
return self.expected("an identifier after AS", not_an_ident);
}
self.prev_token();
Ok(None) // no alias found
}
}
}
/// Parse `AS identifier` when the AS is describing a table-valued object,
/// like in `... FROM generate_series(1, 10) AS t (col)`. In this case
/// the alias is allowed to optionally name the columns in the table, in
/// addition to the table itself.
pub fn parse_optional_table_alias(
&mut self,
reserved_kwds: &[&str],
) -> Result<Option<TableAlias>, ParserError> {
match self.parse_optional_alias(reserved_kwds)? {
Some(name) => {
let columns = self.parse_parenthesized_column_list(Optional)?;
Ok(Some(TableAlias { name, columns }))
}
None => Ok(None),
}
}
/// Parse one or more identifiers with the specified separator between them
pub fn parse_list_of_ids(&mut self, separator: &Token) -> Result<Vec<SQLIdent>, ParserError> {
let mut idents = vec![];
let mut expect_identifier = true;
loop {
let token = &self.next_token();
match token {
Some(Token::SQLWord(s)) if expect_identifier => {
expect_identifier = false;
idents.push(s.as_sql_ident());
}
Some(token) if token == separator && !expect_identifier => {
expect_identifier = true;
continue;
}
_ => {
self.prev_token();
break;
}
}
}
if expect_identifier {
self.expected("identifier", self.peek_token())
} else {
Ok(idents)
}
}
/// Parse a possibly qualified, possibly quoted identifier, e.g.
/// `foo` or `myschema."table"`
pub fn parse_object_name(&mut self) -> Result<SQLObjectName, ParserError> {
Ok(SQLObjectName(self.parse_list_of_ids(&Token::Period)?))
}
/// Parse a simple one-word identifier (possibly quoted, possibly a keyword)
pub fn parse_identifier(&mut self) -> Result<SQLIdent, ParserError> {
match self.next_token() {
Some(Token::SQLWord(w)) => Ok(w.as_sql_ident()),
unexpected => self.expected("identifier", unexpected),
}
}
/// Parse a parenthesized comma-separated list of unqualified, possibly quoted identifiers
pub fn parse_parenthesized_column_list(
&mut self,
optional: IsOptional,
) -> Result<Vec<SQLIdent>, ParserError> {
if self.consume_token(&Token::LParen) {
let cols = self.parse_list_of_ids(&Token::Comma)?;
self.expect_token(&Token::RParen)?;
Ok(cols)
} else if optional == Optional {
Ok(vec![])
} else {
self.expected("a list of columns in parentheses", self.peek_token())
}
}
pub fn parse_optional_precision(&mut self) -> Result<Option<u64>, ParserError> {
if self.consume_token(&Token::LParen) {
let n = self.parse_literal_uint()?;
self.expect_token(&Token::RParen)?;
Ok(Some(n))
} else {
Ok(None)
}
}
pub fn parse_optional_precision_scale(
&mut self,
) -> Result<(Option<u64>, Option<u64>), ParserError> {
if self.consume_token(&Token::LParen) {
let n = self.parse_literal_uint()?;
let scale = if self.consume_token(&Token::Comma) {
Some(self.parse_literal_uint()?)
} else {
None
};
self.expect_token(&Token::RParen)?;
Ok((Some(n), scale))
} else {
Ok((None, None))
}
}
pub fn parse_delete(&mut self) -> Result<SQLStatement, ParserError> {
self.expect_keyword("FROM")?;
let table_name = self.parse_object_name()?;
let selection = if self.parse_keyword("WHERE") {
Some(self.parse_expr()?)
} else {
None
};
Ok(SQLStatement::SQLDelete {
table_name,
selection,
})
}
/// Parse a query expression, i.e. a `SELECT` statement optionally
/// preceeded with some `WITH` CTE declarations and optionally followed
/// by `ORDER BY`. Unlike some other parse_... methods, this one doesn't
/// expect the initial keyword to be already consumed
pub fn parse_query(&mut self) -> Result<SQLQuery, ParserError> {
let ctes = if self.parse_keyword("WITH") {
// TODO: optional RECURSIVE
self.parse_cte_list()?
} else {
vec![]
};
let body = self.parse_query_body(0)?;
let order_by = if self.parse_keywords(vec!["ORDER", "BY"]) {
self.parse_order_by_expr_list()?
} else {
vec![]
};
let limit = if self.parse_keyword("LIMIT") {
self.parse_limit()?
} else {
None
};
let offset = if self.parse_keyword("OFFSET") {
Some(self.parse_offset()?)
} else {
None
};
let fetch = if self.parse_keyword("FETCH") {
Some(self.parse_fetch()?)
} else {
None
};
Ok(SQLQuery {
ctes,
body,
limit,
order_by,
offset,
fetch,
})
}
/// Parse one or more (comma-separated) `alias AS (subquery)` CTEs,
/// assuming the initial `WITH` was already consumed.
fn parse_cte_list(&mut self) -> Result<Vec<Cte>, ParserError> {
let mut cte = vec![];
loop {
let alias = self.parse_identifier()?;
let renamed_columns = self.parse_parenthesized_column_list(Optional)?;
self.expect_keyword("AS")?;
self.expect_token(&Token::LParen)?;
cte.push(Cte {
alias,
query: self.parse_query()?,
renamed_columns,
});
self.expect_token(&Token::RParen)?;
if !self.consume_token(&Token::Comma) {
break;
}
}
Ok(cte)
}
/// Parse a "query body", which is an expression with roughly the
/// following grammar:
/// ```text
/// query_body ::= restricted_select | '(' subquery ')' | set_operation
/// restricted_select ::= 'SELECT' [expr_list] [ from ] [ where ] [ groupby_having ]
/// subquery ::= query_body [ order_by_limit ]
/// set_operation ::= query_body { 'UNION' | 'EXCEPT' | 'INTERSECT' } [ 'ALL' ] query_body
/// ```
fn parse_query_body(&mut self, precedence: u8) -> Result<SQLSetExpr, ParserError> {
// We parse the expression using a Pratt parser, as in `parse_expr()`.
// Start by parsing a restricted SELECT or a `(subquery)`:
let mut expr = if self.parse_keyword("SELECT") {
SQLSetExpr::Select(Box::new(self.parse_select()?))
} else if self.consume_token(&Token::LParen) {
// CTEs are not allowed here, but the parser currently accepts them
let subquery = self.parse_query()?;
self.expect_token(&Token::RParen)?;
SQLSetExpr::Query(Box::new(subquery))
} else if self.parse_keyword("VALUES") {
SQLSetExpr::Values(self.parse_values()?)
} else {
return self.expected("SELECT or a subquery in the query body", self.peek_token());
};
loop {
// The query can be optionally followed by a set operator:
let next_token = self.peek_token();
let op = self.parse_set_operator(&next_token);
let next_precedence = match op {
// UNION and EXCEPT have the same binding power and evaluate left-to-right
Some(SQLSetOperator::Union) | Some(SQLSetOperator::Except) => 10,
// INTERSECT has higher precedence than UNION/EXCEPT
Some(SQLSetOperator::Intersect) => 20,
// Unexpected token or EOF => stop parsing the query body
None => break,
};
if precedence >= next_precedence {
break;
}
self.next_token(); // skip past the set operator
expr = SQLSetExpr::SetOperation {
left: Box::new(expr),
op: op.unwrap(),
all: self.parse_keyword("ALL"),
right: Box::new(self.parse_query_body(next_precedence)?),
};
}
Ok(expr)
}
fn parse_set_operator(&mut self, token: &Option<Token>) -> Option<SQLSetOperator> {
match token {
Some(Token::SQLWord(w)) if w.keyword == "UNION" => Some(SQLSetOperator::Union),
Some(Token::SQLWord(w)) if w.keyword == "EXCEPT" => Some(SQLSetOperator::Except),
Some(Token::SQLWord(w)) if w.keyword == "INTERSECT" => Some(SQLSetOperator::Intersect),
_ => None,
}
}
/// Parse a restricted `SELECT` statement (no CTEs / `UNION` / `ORDER BY`),
/// assuming the initial `SELECT` was already consumed
pub fn parse_select(&mut self) -> Result<SQLSelect, ParserError> {
let all = self.parse_keyword("ALL");
let distinct = self.parse_keyword("DISTINCT");
if all && distinct {
return parser_err!("Cannot specify both ALL and DISTINCT in SELECT");
}
let projection = self.parse_select_list()?;
let (relation, joins) = if self.parse_keyword("FROM") {
let relation = Some(self.parse_table_factor()?);
let joins = self.parse_joins()?;
(relation, joins)
} else {
(None, vec![])
};
let selection = if self.parse_keyword("WHERE") {
Some(self.parse_expr()?)
} else {
None
};
let group_by = if self.parse_keywords(vec!["GROUP", "BY"]) {
self.parse_expr_list()?
} else {
vec![]
};
let having = if self.parse_keyword("HAVING") {
Some(self.parse_expr()?)
} else {
None
};
Ok(SQLSelect {
distinct,
projection,
selection,
relation,
joins,
group_by,
having,
})
}
/// A table name or a parenthesized subquery, followed by optional `[AS] alias`
pub fn parse_table_factor(&mut self) -> Result<TableFactor, ParserError> {
let lateral = self.parse_keyword("LATERAL");
if self.consume_token(&Token::LParen) {
let subquery = Box::new(self.parse_query()?);
self.expect_token(&Token::RParen)?;
let alias = self.parse_optional_table_alias(keywords::RESERVED_FOR_TABLE_ALIAS)?;
Ok(TableFactor::Derived {
lateral,
subquery,
alias,
})
} else if lateral {
self.expected("subquery after LATERAL", self.peek_token())
} else {
let name = self.parse_object_name()?;
// Postgres, MSSQL: table-valued functions:
let args = if self.consume_token(&Token::LParen) {
self.parse_optional_args()?
} else {
vec![]
};
let alias = self.parse_optional_table_alias(keywords::RESERVED_FOR_TABLE_ALIAS)?;
// MSSQL-specific table hints:
let mut with_hints = vec![];
if self.parse_keyword("WITH") {
if self.consume_token(&Token::LParen) {
with_hints = self.parse_expr_list()?;
self.expect_token(&Token::RParen)?;
} else {
// rewind, as WITH may belong to the next statement's CTE
self.prev_token();
}
};
Ok(TableFactor::Table {
name,
alias,
args,
with_hints,
})
}
}
fn parse_join_constraint(&mut self, natural: bool) -> Result<JoinConstraint, ParserError> {
if natural {
Ok(JoinConstraint::Natural)
} else if self.parse_keyword("ON") {
let constraint = self.parse_expr()?;
Ok(JoinConstraint::On(constraint))
} else if self.parse_keyword("USING") {
let columns = self.parse_parenthesized_column_list(Mandatory)?;
Ok(JoinConstraint::Using(columns))
} else {
self.expected("ON, or USING after JOIN", self.peek_token())
}
}
fn parse_joins(&mut self) -> Result<Vec<Join>, ParserError> {
let mut joins = vec![];
loop {
let join = match &self.peek_token() {
Some(Token::Comma) => {
self.next_token();
Join {
relation: self.parse_table_factor()?,
join_operator: JoinOperator::Implicit,
}
}
Some(Token::SQLWord(kw)) if kw.keyword == "CROSS" => {
self.next_token();
self.expect_keyword("JOIN")?;
Join {
relation: self.parse_table_factor()?,
join_operator: JoinOperator::Cross,
}
}
_ => {
let natural = self.parse_keyword("NATURAL");
let peek_keyword = if let Some(Token::SQLWord(kw)) = self.peek_token() {
kw.keyword
} else {
String::default()
};
let join_operator_type = match peek_keyword.as_ref() {
"INNER" | "JOIN" => {
let _ = self.parse_keyword("INNER");
self.expect_keyword("JOIN")?;
JoinOperator::Inner
}
kw @ "LEFT" | kw @ "RIGHT" | kw @ "FULL" => {
let _ = self.next_token();
let _ = self.parse_keyword("OUTER");
self.expect_keyword("JOIN")?;
match kw {
"LEFT" => JoinOperator::LeftOuter,
"RIGHT" => JoinOperator::RightOuter,
"FULL" => JoinOperator::FullOuter,
_ => unreachable!(),
}
}
_ if natural => {
return self.expected("a join type after NATURAL", self.peek_token());
}
_ => break,
};
let relation = self.parse_table_factor()?;
let join_constraint = self.parse_join_constraint(natural)?;
Join {
relation,
join_operator: join_operator_type(join_constraint),
}
}
};
joins.push(join);
}
Ok(joins)
}
/// Parse an INSERT statement
pub fn parse_insert(&mut self) -> Result<SQLStatement, ParserError> {
self.expect_keyword("INTO")?;
let table_name = self.parse_object_name()?;
let columns = self.parse_parenthesized_column_list(Optional)?;
let source = Box::new(self.parse_query()?);
Ok(SQLStatement::SQLInsert {
table_name,
columns,
source,
})
}
pub fn parse_update(&mut self) -> Result<SQLStatement, ParserError> {
let table_name = self.parse_object_name()?;
self.expect_keyword("SET")?;
let mut assignments = vec![];
loop {
let id = self.parse_identifier()?;
self.expect_token(&Token::Eq)?;
let value = self.parse_expr()?;
assignments.push(SQLAssignment { id, value });
if !self.consume_token(&Token::Comma) {
break;
}
}
let selection = if self.parse_keyword("WHERE") {
Some(self.parse_expr()?)
} else {
None
};
Ok(SQLStatement::SQLUpdate {
table_name,
assignments,
selection,
})
}
/// Parse a comma-delimited list of SQL expressions
pub fn parse_expr_list(&mut self) -> Result<Vec<ASTNode>, ParserError> {
let mut expr_list: Vec<ASTNode> = vec![];
loop {
expr_list.push(self.parse_expr()?);
if !self.consume_token(&Token::Comma) {
break;
}
}
Ok(expr_list)
}
pub fn parse_optional_args(&mut self) -> Result<Vec<ASTNode>, ParserError> {
if self.consume_token(&Token::RParen) {
Ok(vec![])
} else {
let args = self.parse_expr_list()?;
self.expect_token(&Token::RParen)?;
Ok(args)
}
}
/// Parse a comma-delimited list of projections after SELECT
pub fn parse_select_list(&mut self) -> Result<Vec<SQLSelectItem>, ParserError> {
let mut projections: Vec<SQLSelectItem> = vec![];
loop {
let expr = self.parse_expr()?;
if let ASTNode::SQLWildcard = expr {
projections.push(SQLSelectItem::Wildcard);
} else if let ASTNode::SQLQualifiedWildcard(prefix) = expr {
projections.push(SQLSelectItem::QualifiedWildcard(SQLObjectName(prefix)));
} else {
// `expr` is a regular SQL expression and can be followed by an alias
if let Some(alias) =
self.parse_optional_alias(keywords::RESERVED_FOR_COLUMN_ALIAS)?
{
projections.push(SQLSelectItem::ExpressionWithAlias { expr, alias });
} else {
projections.push(SQLSelectItem::UnnamedExpression(expr));
}
}
if !self.consume_token(&Token::Comma) {
break;
}
}
Ok(projections)
}
/// Parse a comma-delimited list of SQL ORDER BY expressions
pub fn parse_order_by_expr_list(&mut self) -> Result<Vec<SQLOrderByExpr>, ParserError> {
let mut expr_list: Vec<SQLOrderByExpr> = vec![];
loop {
let expr = self.parse_expr()?;
let asc = if self.parse_keyword("ASC") {
Some(true)
} else if self.parse_keyword("DESC") {
Some(false)
} else {
None
};
expr_list.push(SQLOrderByExpr { expr, asc });
if !self.consume_token(&Token::Comma) {
break;
}
}
Ok(expr_list)
}
/// Parse a LIMIT clause
pub fn parse_limit(&mut self) -> Result<Option<ASTNode>, ParserError> {
self.parse_statement()?;
if self.parse_keyword("ALL") {
Ok(None)
} else {
self.parse_literal_uint()
.map(|n| Some(ASTNode::SQLValue(Value::Long(n))))
}
}
/// Parse an OFFSET clause
pub fn parse_offset(&mut self) -> Result<ASTNode, ParserError> {
let value = self
.parse_literal_uint()
.map(|n| ASTNode::SQLValue(Value::Long(n)))?;
self.expect_one_of_keywords(&["ROW", "ROWS"])?;
Ok(value)
}
/// Parse a FETCH clause
pub fn parse_fetch(&mut self) -> Result<Fetch, ParserError> {
self.expect_one_of_keywords(&["FIRST", "NEXT"])?;
let (quantity, percent) = if self.parse_one_of_keywords(&["ROW", "ROWS"]).is_some() {
(None, false)
} else {
let quantity = self.parse_sql_value()?;
let percent = self.parse_keyword("PERCENT");
self.expect_one_of_keywords(&["ROW", "ROWS"])?;
(Some(quantity), percent)
};
let with_ties = if self.parse_keyword("ONLY") {
false
} else if self.parse_keywords(vec!["WITH", "TIES"]) {
true
} else {
return self.expected("one of ONLY or WITH TIES", self.peek_token());
};
Ok(Fetch {
with_ties,
percent,
quantity,
})
}
pub fn parse_values(&mut self) -> Result<SQLValues, ParserError> {
let mut values = vec![];
loop {
self.expect_token(&Token::LParen)?;
values.push(self.parse_expr_list()?);
self.expect_token(&Token::RParen)?;
match self.peek_token() {
Some(Token::Comma) => self.next_token(),
_ => break,
};
}
Ok(SQLValues(values))
}
pub fn parse_transaction(&mut self) -> Result<SQLStatement, ParserError> {
let mut stmts = Vec::new();
while self.consume_token(&Token::SemiColon) {
;
}
while let Some(Token::SQLWord(w)) = self.peek_token() {
if w.keyword == "COMMIT" {
break;
}
let stmt = self.parse_statement()?;
stmts.push(Box::new(stmt));
}
while self.consume_token(&Token::SemiColon) {
;
}
Ok(SQLStatement::SQLTransaction(stmts))
}
}
impl SQLWord {
pub fn as_sql_ident(&self) -> SQLIdent {
self.to_string()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::all_dialects;
use crate::sqlast::ASTNode::SQLFunction;
#[test]
fn test_prev_index() {
let sql = "SELECT version";
all_dialects().run_parser_method(sql, |parser| {
assert_eq!(parser.peek_token(), Some(Token::make_keyword("SELECT")));
assert_eq!(parser.next_token(), Some(Token::make_keyword("SELECT")));
parser.prev_token();
assert_eq!(parser.next_token(), Some(Token::make_keyword("SELECT")));
assert_eq!(parser.next_token(), Some(Token::make_word("version", None)));
parser.prev_token();
assert_eq!(parser.peek_token(), Some(Token::make_word("version", None)));
assert_eq!(parser.next_token(), Some(Token::make_word("version", None)));
assert_eq!(parser.peek_token(), None);
parser.prev_token();
assert_eq!(parser.next_token(), Some(Token::make_word("version", None)));
assert_eq!(parser.next_token(), None);
assert_eq!(parser.next_token(), None);
parser.prev_token();
});
}
#[test]
fn test_transaction() {
let sql = "BEGIN;\nSELECT version();\nCOMMIT;";
all_dialects().run_parser_method(sql, |parser| {
assert_eq!(parser.parse_statement(), Ok(SQLStatement::SQLTransaction(vec![
Box::new(SQLStatement::SQLQuery(Box::new(SQLQuery {
ctes: vec![],
body: SQLSetExpr::Select(Box::new(
SQLSelect {
distinct: false,
projection: vec![
SQLSelectItem::UnnamedExpression(SQLFunction {
name: SQLObjectName(vec!["version".to_owned()]),
args: vec![],
over: None,
distinct: false,
})
],
relation: None,
joins: vec![],
selection: None,
group_by: vec![],
having: None,
}
)),
fetch: None,
offset: None,
order_by: vec![],
limit: None,
}))),
])));
});
}
}
|
package shmp.lang.language.lexis
import shmp.lang.containers.SemanticsCoreTemplate
sealed class SemanticsCoreMatcher {
abstract fun match(core: SemanticsCoreTemplate): Boolean
}
object PassingMatcher: SemanticsCoreMatcher() {
override fun match(core: SemanticsCoreTemplate) = true
}
data class SpeechPartMatcher(private val speechPart: SpeechPart): SemanticsCoreMatcher() {
override fun match(core: SemanticsCoreTemplate) = core.speechPart == speechPart
}
data class TagMatcher(private val tag: SemanticsTag): SemanticsCoreMatcher() {
override fun match(core: SemanticsCoreTemplate) = core.tagClusters
.any { it.semanticsTags.size == 1 && it.semanticsTags[0].name == tag.name }
}
class ConcatMatcher(vararg matchers: SemanticsCoreMatcher): SemanticsCoreMatcher() {
private val _matchers = matchers.toList()
override fun match(core: SemanticsCoreTemplate) = _matchers.all { it.match(core) }
}
|
use crate::prelude::{debug, Logger, Value};
use stable_hash::{prelude::*, utils::StableHasherWrapper, SequenceNumberInt};
use std::collections::{BTreeMap, HashMap};
use std::fmt;
use strum::AsStaticRef as _;
use strum_macros::AsStaticStr;
use twox_hash::XxHash64;
use lazy_static::lazy_static;
lazy_static! {
static ref LOG_EVENTS: bool = std::env::var("GRAPH_LOG_POI_EVENTS")
.unwrap_or("false".into())
.parse::<bool>()
.expect("invalid GRAPH_LOG_POI_EVENTS");
}
#[derive(Debug)]
pub struct ProofOfIndexingDigest(pub String);
impl StableHash for ProofOfIndexingDigest {
fn stable_hash(&self, sequence_number: impl SequenceNumber, state: &mut impl StableHasher) {
self.0.stable_hash(sequence_number, state)
}
}
#[derive(AsStaticStr)]
pub enum ProofOfIndexingEvent<'a> {
RemoveEntity {
entity_type: &'a str,
id: &'a str,
},
SetEntity {
entity_type: &'a str,
id: &'a str,
data: &'a HashMap<String, Value>,
},
}
/// Different than #[derive(Debug)] in order to be deterministic.
/// In particular, we swap out the HashMap for a BTreeMap when printing
/// the data field of the SetEntity variant so that the keys are sorted.
impl fmt::Debug for ProofOfIndexingEvent<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut builder = f.debug_struct(self.as_static());
match self {
Self::RemoveEntity { entity_type, id } => {
builder.field("entity_type", entity_type);
builder.field("id", id);
}
Self::SetEntity {
entity_type,
id,
data,
} => {
builder.field("entity_type", entity_type);
builder.field("id", id);
builder.field("data", &data.iter().collect::<BTreeMap<_, _>>());
}
}
builder.finish()
}
}
impl slog::Value for ProofOfIndexingEvent<'_> {
fn serialize(
&self,
record: &slog::Record,
key: slog::Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
format!("{:?}", self).serialize(record, key, serializer)
}
}
impl StableHash for ProofOfIndexingEvent<'_> {
fn stable_hash(&self, mut sequence_number: impl SequenceNumber, state: &mut impl StableHasher) {
use ProofOfIndexingEvent::*;
match self {
RemoveEntity { entity_type, id } => {
entity_type.stable_hash(sequence_number.next_child(), state);
id.stable_hash(sequence_number.next_child(), state);
}
SetEntity {
entity_type,
id,
data,
} => {
entity_type.stable_hash(sequence_number.next_child(), state);
id.stable_hash(sequence_number.next_child(), state);
data.stable_hash(sequence_number.next_child(), state);
}
}
// Include the discriminant
self.as_static().stable_hash(sequence_number, state);
}
}
/// The POI is the StableHash of:
/// (Vec<ProofOfIndexingEvent>, PreviousDigest)
/// This struct contains the necessary state to construct that value in a streaming manner
pub struct ProofOfIndexingStream {
previous_digest_sequence_number: SequenceNumberInt<u64>,
vec_sequence_number: SequenceNumberInt<u64>,
vec_length: usize,
digest: StableHasherWrapper<XxHash64>,
}
impl ProofOfIndexingStream {
fn new() -> Self {
let mut tuple_sequence_number = SequenceNumberInt::<u64>::root();
let vec_sequence_number = tuple_sequence_number.next_child();
let previous_digest_sequence_number = tuple_sequence_number.next_child();
Self {
previous_digest_sequence_number,
vec_sequence_number,
vec_length: 0,
digest: Default::default(),
}
}
fn write(&mut self, event: &ProofOfIndexingEvent) {
event.stable_hash(self.vec_sequence_number.next_child(), &mut self.digest);
self.vec_length += 1;
}
pub fn finish(self, previous: &Option<ProofOfIndexingDigest>) -> ProofOfIndexingDigest {
let Self {
previous_digest_sequence_number,
vec_sequence_number,
vec_length,
mut digest,
} = self;
// Finish out the vec digest
vec_length.stable_hash(vec_sequence_number, &mut digest);
// Add the previous digest to the end of the tuple
previous.stable_hash(previous_digest_sequence_number, &mut digest);
let hash = format!("{:x}", digest.finish());
ProofOfIndexingDigest(hash)
}
}
#[derive(Default)]
pub struct ProofOfIndexing {
/// The POI is updated for each data source independently. This is necessary because
/// some data sources (eg: IPFS files) may be unreliable and therefore cannot mix
/// state with other data sources. This may also give us some freedom to change
/// the order of triggers in the future.
per_causality_region: HashMap<String, ProofOfIndexingStream>,
}
impl fmt::Debug for ProofOfIndexing {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("ProofOfIndexing").field(&"...").finish()
}
}
impl ProofOfIndexing {
/// Adds an event to the digest of the ProofOfIndexingStream local to the DataSource
pub fn write(
&mut self,
logger: &Logger,
causality_region: &str,
event: &ProofOfIndexingEvent<'_>,
) {
if *LOG_EVENTS {
debug!(
logger,
"Proof of indexing event";
"event" => &event,
"causality_region" => causality_region
);
}
// This may be better with the raw_entry API, once that is stabilized
if let Some(data_source) = self.per_causality_region.get_mut(causality_region) {
data_source.write(event);
} else {
let mut entry = ProofOfIndexingStream::new();
entry.write(event);
self.per_causality_region
.insert(causality_region.to_owned(), entry);
}
}
/// Swaps the internals out for an empty one
/// Returns None if there are no changes.
pub fn take(&mut self) -> Option<HashMap<String, ProofOfIndexingStream>> {
if self.per_causality_region.is_empty() {
None
} else {
Some(std::mem::replace(
&mut self.per_causality_region,
HashMap::new(),
))
}
}
}
|
#pragma once
#include <cassert>
#include <memory>
#include "../Actor/Actor.h"
#include "../Actor/ActorTypes.h"
#include "../Actor/Player.h"
namespace Factory
{
class ActorFactory
{
public:
std::unique_ptr<Actor::Actor> createActor(Actor::ActorType actorType)
{
switch (actorType)
{
case Actor::ActorType::PLAYER:
return std::make_unique<Actor::Player>(new Actor::Player);
case Actor::ActorType::MONSTER:
case Actor::ActorType::BULLET:
case Actor::ActorType::DOOR:
case Actor::ActorType::PLAYER_ICON:
case Actor::ActorType::PANEL_LIGHT:
default:
std::assert(!"Unknown ActorType.");
}
}
};
}
|
# frozen_string_literal: true
require_relative "install_dash_docs/version"
require_relative "install_dash_docs/bundler_plugin"
require_relative "install_dash_docs/cli"
require_relative "install_dash_docs/dash"
require_relative "install_dash_docs/dash_install_url"
require_relative "install_dash_docs/lockfile"
module Bundler
module InstallDashDocs
class ArgumentError < StandardError; end
class FileNotFound < StandardError; end
end
end
|
import { join } from 'path';
import {
renderPlaygroundPage,
RenderPageOptions as PlaygroundRenderPageOptions,
} from '@apollographql/graphql-playground-html';
import { loadFilesSync } from '@graphql-tools/load-files';
import { mergeTypeDefs, mergeResolvers } from '@graphql-tools/merge';
import { makeExecutableSchema } from '@graphql-tools/schema';
import {
ApolloServerBase,
GraphQLOptions,
formatApolloErrors,
} from 'apollo-server-core';
import { processRequest, GraphQLUpload } from 'graphql-upload';
import { HttpContextContract } from '@ioc:Adonis/Core/HttpContext';
import { ApolloConfig, ApolloBaseContext } from '@ioc:Apollo/Config';
import { ServerRegistration } from '@ioc:Apollo/Server';
import { graphqlAdonis } from './graphqlAdonis';
function makeContextFunction(
context?: (args: ApolloBaseContext) => unknown,
): (args: ApolloBaseContext) => unknown {
if (typeof context === 'function') {
return function ctxFn(args: ApolloBaseContext) {
return context(args);
};
} else {
return function ctxFn(args: ApolloBaseContext) {
return args;
};
}
}
export default class ApolloServer extends ApolloServerBase {
private $path: string;
protected supportsUploads(): boolean {
return true;
}
public constructor(appRoot: string, config: ApolloConfig) {
const {
path = '/graphql',
resolvers = 'app/Resolvers',
schemas = 'app/Schemas',
apolloServer = {},
executableSchema = {},
} = config;
const resolversPath = join(appRoot, resolvers);
const schemasPath = join(appRoot, schemas);
let { context, ...rest } = apolloServer;
super({
schema: makeExecutableSchema({
...executableSchema,
typeDefs: mergeTypeDefs(loadFilesSync(schemasPath)),
// eslint-disable-next-line @typescript-eslint/no-explicit-any
resolvers: mergeResolvers([
...loadFilesSync<any>(resolversPath, { recursive: false }),
{ Upload: GraphQLUpload },
]),
}),
context: makeContextFunction(context),
...rest,
});
this.$path = path;
}
private async createGraphQLServerOptions(
ctx: HttpContextContract,
): Promise<GraphQLOptions> {
return super.graphQLServerOptions({ ctx });
}
public applyMiddleware({ Route }: ServerRegistration): void {
Route.get(this.$path, this.getPlaygroundHandler());
const postRoute = Route.post(this.$path, this.getGraphqlHandler());
if (this.uploadsConfig) {
postRoute.middleware(this.getUploadsMiddleware());
}
}
public getPlaygroundHandler() {
return async (ctx: HttpContextContract) => {
const playgroundRenderPageOptions: PlaygroundRenderPageOptions = {
endpoint: this.$path,
};
ctx.response.header('Content-Type', 'text/html');
return renderPlaygroundPage(playgroundRenderPageOptions);
};
}
public getGraphqlHandler() {
return async (ctx: HttpContextContract) => {
const options = await this.createGraphQLServerOptions(ctx);
return graphqlAdonis(options, ctx);
};
}
public getUploadsMiddleware() {
return async (ctx: HttpContextContract, next: () => Promise<void>) => {
if (ctx.request.is(['multipart/form-data'])) {
try {
const processed = await processRequest(
ctx.request.request,
ctx.response.response,
this.uploadsConfig,
);
ctx.request.setInitialBody(processed);
return next();
} catch (error) {
if (error.status && error.expose) {
ctx.response.status(error.status);
}
throw formatApolloErrors([error], {
formatter: this.requestOptions.formatError,
debug: this.requestOptions.debug,
});
}
} else {
return next();
}
};
}
}
|
package com.lucidsoftworksllc.sabotcommunity.models
class UserListRecycler(val id: String,
val user_id: String,
val profile_pic: String,
val nickname: String,
val username: String,
val verified: String,
val online: String,
val desc: String)
|
cask 'flixster-desktop' do
version '2.5.12.333'
sha256 '2916b4f2fd2c50f355e427369f57b4f51ec32d22add34ba0abbc3467a849892c'
# cloudfront.net is the official download host per the vendor homepage
url 'https://dtmmt9rxsy2no.cloudfront.net/desktop/mac/FlixsterDesktop.zip'
appcast 'https://dtmmt9rxsy2no.cloudfront.net/desktop/mac/FlixsterDesktopMacAppcast.xml',
checkpoint: 'ed42b9935d8dab11e2acb5502ccd1076c9c1e3d1d477a99c9dc569cfbd3a9706'
name 'Flixster Desktop for Mac'
homepage 'https://www.flixster.com/about/ultraviolet/'
license :gratis
app 'Flixster Desktop.app'
postflight do
suppress_move_to_applications key: 'moveToApplicationsFolderAlertSuppress'
end
end
|
`LineAppendableImpl`是旨在简化`LineFormattingAppendable`的替代品
创建基于行的格式化输出,包括通过以下方式跟踪源偏移信息
使用`SegmentedBuilder`累积输出到`SegmentedSequence`最终结果。
:warning:正在进行中
## Format
格式化功能包括在每行生成时删除/添加前缀,折叠
将空格跨度扩展为单个空格,删除每行的前导和/或尾随空格。
该实现将输出累积为行列表。对于每一行,可追加项保持不变
前缀结尾和行文本开始的每行的位置。
在附加EOL时,将提交累积的行并将其添加到累积的列表中
线。
正在累积的行以其自己的`SegmentBuilder`累积到
允许以最小的性能损失快速修改或重新生成该行的文本。
## Options
选项定义在输出构造过程中可附加组件的行为:
* `CONVERT_TABS`:在4的列倍数上扩展制表符
* `COLLAPSE_WHITESPACE`:将多个制表符和空格折叠为单个空格
* `SUPPRESS_TRAILING_WHITESPACE`:不输出尾随空格
* `PASS_THROUGH`:无需格式化即可将所有内容传递给可附加项
* `ALLOW_LEADING_WHITESPACE`:允许一行中的前导空格,否则删除
* `ALLOW_LEADING_EOL`:允许偏移量为0的EOL
* `PREFIX_PRE_FORMATTED`:在为行加上前缀时,在预格式化的行之前加上前缀
:information_source:`COLLAPSE_WHITESPACE`会覆盖`CONVERT_TABS`,因为转换后的标签会
转换为空间。
## Formatting Features
`LineAppendable`跟踪累加行的`length`和`column`以方便
通过呈现代码来格式化决策。
`LineAppendable`提供了允许基于以下条件应用条件格式逻辑的方法
给定节点的子元素生成的累积输出,特别是应用前缀
仅在附加EOL后更改,或在附加EOL后注册回调。
在构造线之后,还可以修改线的前缀。但是,那
核心渲染器执行其活动后,无需诉诸于修改线
承诺。
Markdown Navigator插件在累积文本时使用行前缀修改功能
来自JetBrains IDE的解析树,它不适合跟踪父级前缀。
为此,`LineAppendable`允许将每行的前缀结尾修改为父行
前缀将从文本中删除。
|
import { graphql, useStaticQuery } from 'gatsby';
import React from 'react';
import { Helmet } from 'react-helmet';
interface HeadProps {
title: string;
}
const Head: React.FC<HeadProps> = ({title}) => {
return (
<Helmet titleTemplate={`%s | A11Y Accceptance Criteria Library`}>
<html lang="en" />
<meta charSet="utf-8" />
<title>{title}</title>
<link rel="icon" type="image/svg+xml" href="/favicon.svg" />
<link rel="alticon" href="/favicon.ico" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
</Helmet>
)
}
export default Head;
|
function doTest() {
var objA = {};
Object.defineProperty(objA, 'prop', {
get: function () {
return 'foo'
},
set: function (newVal) {
log(newVal);
}
}); Object.defineProperty(objA, 'isObjA', {
get: function () {
return this === objA;
}
});
var objB = Object.create(objA);
log(objB.prop);
objB.prop = 'bar';
log('objA.prop =>', objA.prop);
log('objA.isObjA =>', objA.isObjA);
log('objB.prop =>', objB.prop);
log('objB.isObjA =>', objB.isObjA);
}
doTest();
|
class DownloadsController < ApplicationController
attr_reader :tsvs, :files
def show
@tsvs = ['interactions.tsv', 'genes.tsv', 'drugs.tsv', 'categories.tsv']
tsv_dir = File.join('data', 'monthly_tsvs')
unsorted_files = Dir.entries(File.join(Rails.root, 'public', tsv_dir)).each_with_object({}) do |subdir, h|
next if subdir == '.' || subdir == '..'
h[subdir] = tsvs.each_with_object({}) do |file, file_h|
file_h[file] = File.join(tsv_dir, subdir, file)
end
end
sorted_files = unsorted_files.sort_by{|key, values| Date.parse(key).strftime("%s").to_i}.reverse!
@files = sorted_files
end
end
|
module Netverify
class ResponseFetcher
def initialize(params = {})
@params = params
@validation = find_stored_validation
end
def fetch!
prepare_fields
fetch_personal_info_fields
fetch_image_fields
fetch_reject_reason_fields
fetch_additional_info
@validation.save
end
private
def prepare_fields
@validation.personal_information ||= {}
@validation.images ||= {}
end
def find_stored_validation
Validation.find_by(
merchant_id_scan_reference: @params['merchantIdScanReference'],
jumio_id_scan_reference: @params['jumioIdScanReference']
)
end
def fetch_additional_info
@validation.state = @params['verificationStatus']
end
def fetch_personal_info_fields
%w(idCountry idType idFirstName idLastName idDob idExpiry idNumber
idUsState personalNumber).each do |field|
if @params[field].present?
@validation.personal_information[field] = @params[field]
end
end
end
def fetch_image_fields
%w(idScanImage idScanImageBackside).each do |field|
if @params[field].present?
@validation.images[field] = @params[field]
end
end
end
def fetch_reject_reason_fields
if @params['rejectReason'].present?
errors = JSON.parse(@params['rejectReason'])
@validation.error_types = errors
end
end
end
end
|
use super::POW_CHAIN_DB_COLUMN as DB_COLUMN;
use super::{ClientDB, DBError};
use std::sync::Arc;
pub struct PoWChainStore<T>
where
T: ClientDB,
{
db: Arc<T>,
}
impl<T: ClientDB> PoWChainStore<T> {
pub fn new(db: Arc<T>) -> Self {
Self { db }
}
pub fn put_block_hash(&self, hash: &[u8]) -> Result<(), DBError> {
self.db.put(DB_COLUMN, hash, &[0])
}
pub fn block_hash_exists(&self, hash: &[u8]) -> Result<bool, DBError> {
self.db.exists(DB_COLUMN, hash)
}
}
#[cfg(test)]
mod tests {
extern crate types;
use super::super::super::MemoryDB;
use super::*;
use self::types::Hash256;
#[test]
fn test_put_block_hash() {
let db = Arc::new(MemoryDB::open());
let store = PoWChainStore::new(db.clone());
let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec();
store.put_block_hash(hash).unwrap();
assert!(db.exists(DB_COLUMN, hash).unwrap());
}
#[test]
fn test_block_hash_exists() {
let db = Arc::new(MemoryDB::open());
let store = PoWChainStore::new(db.clone());
let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec();
db.put(DB_COLUMN, hash, &[0]).unwrap();
assert!(store.block_hash_exists(hash).unwrap());
}
#[test]
fn test_block_hash_does_not_exist() {
let db = Arc::new(MemoryDB::open());
let store = PoWChainStore::new(db.clone());
let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec();
let other_hash = &Hash256::from([0xBB; 32]).as_bytes().to_vec();
db.put(DB_COLUMN, hash, &[0]).unwrap();
assert!(!store.block_hash_exists(other_hash).unwrap());
}
}
|
package jp.kght6123.floating.window.core.utils
import android.content.Context
import android.graphics.Point
import android.view.WindowManager
/**
* 画面系のユーティリティクラス
*
* @author kght6123
* @copyright 2017/07/29 Hirotaka Koga
* @license http://www.apache.org/licenses/LICENSE-2.0 Apache-2.0
*/
class DisplayUtils {
companion object {
fun defaultDisplaySize(context: Context): Point {
val display = (context.getSystemService(Context.WINDOW_SERVICE) as WindowManager).defaultDisplay
val size = Point()
display.getSize(size)
return size
}
}
}
|
package com.elementalg.minigame
import com.badlogic.gdx.ApplicationAdapter
import com.badlogic.gdx.graphics.FPSLogger
import com.elementalg.client.managers.DependencyManager
import com.elementalg.client.managers.LocaleManager
import com.elementalg.client.managers.ScreenManager
import com.elementalg.managers.EventManager
import com.elementalg.minigame.screens.MainScreen
import java.util.*
/**
* Main class which implements all the required essential methods.
*
* @author Gabriel Amihalachioaie.
*
* @constructor initializes an instance with the passed parameters.
* @param systemLocale instance of the device's [Locale].
* @param displayXDPI density of pixels per inch on the x axis.
* @param displayYDPI density of pixels per inch on the y axis.
*/
class Game(
private val systemLocale: Locale, private val displayXDPI: Float,
private val displayYDPI: Float, private val adsBridge: IAdsBridge,
private val systemNotification: IOperatingSystemOnScreenNotification
) : ApplicationAdapter() {
private val eventManager: EventManager = EventManager()
private lateinit var dependencyManager: DependencyManager
private lateinit var localeManager: LocaleManager
private lateinit var screenManager: ScreenManager
private lateinit var leaderboard: ILeaderboard
private val fpsLogger: FPSLogger = FPSLogger()
/**
* Returns the instance of [DependencyManager] used for the active [Game] instance.
*
* @throws IllegalStateException if [DependencyManager] has not been initialized yet.
*
* @return instance of [DependencyManager].
*/
fun getDependencyManager(): DependencyManager {
check(this::dependencyManager.isInitialized) { "'dependencyManager' has not been initialized yet." }
return dependencyManager
}
/**
* Returns the instance of [LocaleManager] used for the active [Game] instance.
*
* @throws IllegalStateException if [LocaleManager] has not been initialized yet.
*
* @return instance of [LocaleManager].
*/
fun getLocaleManager(): LocaleManager {
check(this::localeManager.isInitialized) { "'localeManager' has not been initialized yet." }
return localeManager
}
/**
* Returns the instance of [ScreenManager] used for the active [Game] instance.
*
* @throws IllegalStateException if [ScreenManager] has not been initialized yet.
*
* @return instance of [ScreenManager].
*/
fun getScreenManager(): ScreenManager {
check(this::screenManager.isInitialized) { "'screenManager' has not been initialized yet." }
return screenManager
}
fun getAdsBridge(): IAdsBridge {
return adsBridge
}
fun getOnScreenNotification(): IOperatingSystemOnScreenNotification {
return systemNotification
}
fun initializeLeaderboard(leaderboard: ILeaderboard) {
this.leaderboard = leaderboard
}
fun getLeaderboard(): ILeaderboard {
if (!this::leaderboard.isInitialized) {
throw IllegalStateException("'leaderboard' has not been initialized yet.")
}
return leaderboard
}
override fun create() {
gameInstance = this
eventManager.create()
dependencyManager = DependencyManager.build()
dependencyManager.create()
localeManager = LocaleManager.build(systemLocale)
localeManager.create()
screenManager = ScreenManager.build()
screenManager.create()
dependencyManager.loadDependencyID("MAIN_SCREEN")
dependencyManager.loadDependencyID("CONTINUOUS_MODE_SCREEN")
dependencyManager.loadDependencyID("WORLD")
dependencyManager.loadDependencyID("RESTART_WIDGET")
val mainScreen: MainScreen = MainScreen(displayXDPI, displayYDPI)
mainScreen.create(gameInstance)
screenManager.setActiveScreen(mainScreen)
}
override fun render() {
eventManager.update()
dependencyManager.update()
if (this::leaderboard.isInitialized) {
screenManager.update()
}
}
override fun resize(width: Int, height: Int) {
screenManager.resize(width, height)
}
override fun pause() {
screenManager.pause()
}
override fun resume() {
screenManager.resume()
}
override fun dispose() {
eventManager.dispose()
dependencyManager.dispose()
localeManager.dispose()
screenManager.dispose()
}
companion object {
const val GAME_PREFERENCES: String = "DTF_ELEMENTAL_G"
private lateinit var gameInstance: Game
@Throws(IllegalStateException::class)
fun instance(): Game {
check(this::gameInstance.isInitialized) { "'gameInstance' has not been initialized yet." }
return gameInstance
}
}
}
|
require 'singleton'
require 'logger'
require 'date'
require 'fileutils'
module SrLog
class Log
include Singleton
def log log_key, msg, opts = {}
@logfiles ||= {}
log_month = Date.today.strftime '%Y.%m'
unless @logfiles.has_key?(log_key) && @logfiles[log_key][:log_month] == log_month
filename = "#{log_month}_#{log_key.to_s}.log"
log_path = if opts.has_key?(:dir)
FileUtils.mkdir_p(opts[:dir]) unless File.directory?(opts[:dir])
File.join opts[:dir], filename
elsif defined?(Rails)
Rails.root.join 'log', filename
else
folder_path = File.expand_path File.join('.', 'log')
FileUtils.mkdir(folder_path) unless File.directory?(folder_path)
File.join folder_path, filename
end
@logfiles[log_key] = {log: Logger.new(log_path), log_month: log_month}
@logfiles[log_key][:log].formatter = proc do |severity, timestamp, progname, msg|
opts[:single_spaced] ? "#{msg}\n" : "\n#{msg}\n"
end
end
msg = "Logged by user: #{opts[:current_user]}\n#{msg}" if opts.has_key?(:current_user)
@logfiles[log_key][:log].info msg
end
end
end
|
# Copyright 2015 Andrew Kerr
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import six
import time
from six.moves.urllib import parse
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions
from manila_tempest_tests.common import constants
from manila_tempest_tests.services.share.json import shares_client
from manila_tempest_tests import share_exceptions
from manila_tempest_tests import utils
CONF = config.CONF
LATEST_MICROVERSION = CONF.share.max_api_microversion
EXPERIMENTAL = {'X-OpenStack-Manila-API-Experimental': 'True'}
class SharesV2Client(shares_client.SharesClient):
"""Tempest REST client for Manila.
It handles shares and access to it in OpenStack.
"""
api_version = 'v2'
def __init__(self, auth_provider, **kwargs):
super(SharesV2Client, self).__init__(auth_provider, **kwargs)
self.API_MICROVERSIONS_HEADER = 'x-openstack-manila-api-version'
def inject_microversion_header(self, headers, version,
extra_headers=False):
"""Inject the required manila microversion header."""
new_headers = self.get_headers()
new_headers[self.API_MICROVERSIONS_HEADER] = version
if extra_headers and headers:
new_headers.update(headers)
elif headers:
new_headers = headers
return new_headers
def verify_request_id(self, response):
response_headers = [r.lower() for r in response.keys()]
assert_msg = ("Response is missing request ID. Response "
"headers are: %s") % response
assert 'x-compute-request-id' in response_headers, assert_msg
# Overwrite all http verb calls to inject the micro version header
def post(self, url, body, headers=None, extra_headers=False,
version=LATEST_MICROVERSION):
headers = self.inject_microversion_header(headers, version,
extra_headers=extra_headers)
resp, body = super(SharesV2Client, self).post(url, body,
headers=headers)
self.verify_request_id(resp)
return resp, body
def get(self, url, headers=None, extra_headers=False,
version=LATEST_MICROVERSION):
headers = self.inject_microversion_header(headers, version,
extra_headers=extra_headers)
resp, body = super(SharesV2Client, self).get(url, headers=headers)
self.verify_request_id(resp)
return resp, body
def delete(self, url, headers=None, body=None, extra_headers=False,
version=LATEST_MICROVERSION):
headers = self.inject_microversion_header(headers, version,
extra_headers=extra_headers)
resp, body = super(SharesV2Client, self).delete(url, headers=headers,
body=body)
self.verify_request_id(resp)
return resp, body
def patch(self, url, body, headers=None, extra_headers=False,
version=LATEST_MICROVERSION):
headers = self.inject_microversion_header(headers, version,
extra_headers=extra_headers)
return super(SharesV2Client, self).patch(url, body, headers=headers)
def put(self, url, body, headers=None, extra_headers=False,
version=LATEST_MICROVERSION):
headers = self.inject_microversion_header(headers, version,
extra_headers=extra_headers)
resp, body = super(SharesV2Client, self).put(url, body,
headers=headers)
self.verify_request_id(resp)
return resp, body
def head(self, url, headers=None, extra_headers=False,
version=LATEST_MICROVERSION):
headers = self.inject_microversion_header(headers, version,
extra_headers=extra_headers)
resp, body = super(SharesV2Client, self).head(url, headers=headers)
self.verify_request_id(resp)
return resp, body
def copy(self, url, headers=None, extra_headers=False,
version=LATEST_MICROVERSION):
headers = self.inject_microversion_header(headers, version,
extra_headers=extra_headers)
resp, body = super(SharesV2Client, self).copy(url, headers=headers)
self.verify_request_id(resp)
return resp, body
def reset_state(self, s_id, status="error", s_type="shares",
headers=None, version=LATEST_MICROVERSION,
action_name=None):
"""Resets the state of a share, snapshot, cg, or a cgsnapshot.
status: available, error, creating, deleting, error_deleting
s_type: shares, share_instances, snapshots, consistency-groups,
cgsnapshots.
"""
if action_name is None:
if utils.is_microversion_gt(version, "2.6"):
action_name = 'reset_status'
else:
action_name = 'os-reset_status'
body = {action_name: {"status": status}}
body = json.dumps(body)
resp, body = self.post("%s/%s/action" % (s_type, s_id), body,
headers=headers, extra_headers=True,
version=version)
self.expected_success(202, resp.status)
return body
def force_delete(self, s_id, s_type="shares", headers=None,
version=LATEST_MICROVERSION, action_name=None):
"""Force delete share or snapshot.
s_type: shares, snapshots
"""
if action_name is None:
if utils.is_microversion_gt(version, "2.6"):
action_name = 'force_delete'
else:
action_name = 'os-force_delete'
body = {action_name: None}
body = json.dumps(body)
resp, body = self.post("%s/%s/action" % (s_type, s_id), body,
headers=headers, extra_headers=True,
version=version)
self.expected_success(202, resp.status)
return body
@staticmethod
def _get_base_url(endpoint):
url = parse.urlparse(endpoint)
# Get any valid path components before the version string
# regex matches version str & everything after (examples: v1, v2, v1.2)
base_path = re.split(r'(^|/)+v\d+(\.\d+)?', url.path)[0]
base_url = url._replace(path=base_path)
return parse.urlunparse(base_url) + '/'
def send_microversion_request(self, version=None, script_name=None):
"""Prepare and send the HTTP GET Request to the base URL.
Extracts the base URL from the shares_client endpoint and makes a GET
request with the microversions request header.
:param version: The string to send for the value of the microversion
header, or None to omit the header.
:param script_name: The first part of the URL (v1 or v2), or None to
omit it.
"""
headers = self.get_headers()
url, headers, body = self.auth_provider.auth_request(
'GET', 'shares', headers, None, self.filters)
url = self._get_base_url(url)
if script_name:
url += script_name + '/'
if version:
headers[self.API_MICROVERSIONS_HEADER] = version
# Handle logging because raw_request doesn't log anything
start = time.time()
self._log_request_start('GET', url)
resp, resp_body = self.raw_request(url, 'GET', headers=headers)
end = time.time()
self._log_request(
'GET', url, resp, secs=(end - start), resp_body=resp_body)
self.response_checker('GET', resp, resp_body)
resp_body = json.loads(resp_body)
return resp, resp_body
def is_resource_deleted(self, *args, **kwargs):
"""Verifies whether provided resource deleted or not.
:param kwargs: dict with expected keys 'share_id', 'snapshot_id',
:param kwargs: 'sn_id', 'ss_id', 'vt_id' and 'server_id'
:raises share_exceptions.InvalidResource
"""
if "share_instance_id" in kwargs:
return self._is_resource_deleted(
self.get_share_instance, kwargs.get("share_instance_id"))
elif "share_group_id" in kwargs:
return self._is_resource_deleted(
self.get_share_group, kwargs.get("share_group_id"))
elif "share_group_snapshot_id" in kwargs:
return self._is_resource_deleted(
self.get_share_group_snapshot,
kwargs.get("share_group_snapshot_id"))
elif "share_group_type_id" in kwargs:
return self._is_resource_deleted(
self.get_share_group_type, kwargs.get("share_group_type_id"))
elif "replica_id" in kwargs:
return self._is_resource_deleted(
self.get_share_replica, kwargs.get("replica_id"))
elif "message_id" in kwargs:
return self._is_resource_deleted(
self.get_message, kwargs.get("message_id"))
else:
return super(SharesV2Client, self).is_resource_deleted(
*args, **kwargs)
###############
def create_share(self, share_protocol=None, size=None,
name=None, snapshot_id=None, description=None,
metadata=None, share_network_id=None,
share_type_id=None, is_public=False,
share_group_id=None, availability_zone=None,
version=LATEST_MICROVERSION, experimental=False):
headers = EXPERIMENTAL if experimental else None
metadata = metadata or {}
if name is None:
name = data_utils.rand_name("tempest-created-share")
if description is None:
description = data_utils.rand_name("tempest-created-share-desc")
if size is None:
size = self.share_size
if share_protocol is None:
share_protocol = self.share_protocol
if share_protocol is None:
raise share_exceptions.ShareProtocolNotSpecified()
post_body = {
"share": {
"share_proto": share_protocol,
"description": description,
"snapshot_id": snapshot_id,
"name": name,
"size": size,
"metadata": metadata,
"is_public": is_public,
}
}
if availability_zone:
post_body["share"]["availability_zone"] = availability_zone
if share_network_id:
post_body["share"]["share_network_id"] = share_network_id
if share_type_id:
post_body["share"]["share_type"] = share_type_id
if share_group_id:
post_body["share"]["share_group_id"] = share_group_id
body = json.dumps(post_body)
resp, body = self.post("shares", body, headers=headers,
extra_headers=experimental, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_shares(self, detailed=False, params=None,
version=LATEST_MICROVERSION, experimental=False):
"""Get list of shares w/o filters."""
headers = EXPERIMENTAL if experimental else None
uri = 'shares/detail' if detailed else 'shares'
uri += '?%s' % parse.urlencode(params) if params else ''
resp, body = self.get(uri, headers=headers, extra_headers=experimental,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_shares_with_detail(self, params=None,
version=LATEST_MICROVERSION,
experimental=False):
"""Get detailed list of shares w/o filters."""
return self.list_shares(detailed=True, params=params,
version=version, experimental=experimental)
def get_share(self, share_id, version=LATEST_MICROVERSION,
experimental=False):
headers = EXPERIMENTAL if experimental else None
resp, body = self.get("shares/%s" % share_id, headers=headers,
extra_headers=experimental, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_share_export_location(
self, share_id, export_location_uuid, version=LATEST_MICROVERSION):
resp, body = self.get(
"shares/%(share_id)s/export_locations/%(el_uuid)s" % {
"share_id": share_id, "el_uuid": export_location_uuid},
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_share_export_locations(
self, share_id, version=LATEST_MICROVERSION):
resp, body = self.get(
"shares/%(share_id)s/export_locations" % {"share_id": share_id},
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def delete_share(self, share_id, params=None,
version=LATEST_MICROVERSION):
uri = "shares/%s" % share_id
uri += '?%s' % (parse.urlencode(params) if params else '')
resp, body = self.delete(uri, version=version)
self.expected_success(202, resp.status)
return body
###############
def get_instances_of_share(self, share_id, version=LATEST_MICROVERSION):
resp, body = self.get("shares/%s/instances" % share_id,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_share_instances(self, version=LATEST_MICROVERSION,
params=None):
uri = 'share_instances'
uri += '?%s' % parse.urlencode(params) if params else ''
resp, body = self.get(uri, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_share_instance(self, instance_id, version=LATEST_MICROVERSION):
resp, body = self.get("share_instances/%s" % instance_id,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_share_instance_export_location(
self, instance_id, export_location_uuid,
version=LATEST_MICROVERSION):
resp, body = self.get(
"share_instances/%(instance_id)s/export_locations/%(el_uuid)s" % {
"instance_id": instance_id, "el_uuid": export_location_uuid},
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_share_instance_export_locations(
self, instance_id, version=LATEST_MICROVERSION):
resp, body = self.get(
"share_instances/%s/export_locations" % instance_id,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def wait_for_share_instance_status(self, instance_id, status,
version=LATEST_MICROVERSION):
"""Waits for a share to reach a given status."""
body = self.get_share_instance(instance_id, version=version)
instance_status = body['status']
start = int(time.time())
while instance_status != status:
time.sleep(self.build_interval)
body = self.get_share(instance_id)
instance_status = body['status']
if instance_status == status:
return
elif 'error' in instance_status.lower():
raise share_exceptions.ShareInstanceBuildErrorException(
id=instance_id)
if int(time.time()) - start >= self.build_timeout:
message = ('Share instance %s failed to reach %s status within'
' the required time (%s s).' %
(instance_id, status, self.build_timeout))
raise exceptions.TimeoutException(message)
def wait_for_share_status(self, share_id, status, status_attr='status',
version=LATEST_MICROVERSION):
"""Waits for a share to reach a given status."""
body = self.get_share(share_id, version=version)
share_status = body[status_attr]
start = int(time.time())
while share_status != status:
time.sleep(self.build_interval)
body = self.get_share(share_id, version=version)
share_status = body[status_attr]
if share_status == status:
return
elif 'error' in share_status.lower():
raise share_exceptions.ShareBuildErrorException(
share_id=share_id)
if int(time.time()) - start >= self.build_timeout:
message = ("Share's %(status_attr)s failed to transition to "
"%(status)s within the required time %(seconds)s." %
{"status_attr": status_attr, "status": status,
"seconds": self.build_timeout})
raise exceptions.TimeoutException(message)
###############
def extend_share(self, share_id, new_size, version=LATEST_MICROVERSION,
action_name=None):
if action_name is None:
if utils.is_microversion_gt(version, "2.6"):
action_name = 'extend'
else:
action_name = 'os-extend'
post_body = {
action_name: {
"new_size": new_size,
}
}
body = json.dumps(post_body)
resp, body = self.post(
"shares/%s/action" % share_id, body, version=version)
self.expected_success(202, resp.status)
return body
def shrink_share(self, share_id, new_size, version=LATEST_MICROVERSION,
action_name=None):
if action_name is None:
if utils.is_microversion_gt(version, "2.6"):
action_name = 'shrink'
else:
action_name = 'os-shrink'
post_body = {
action_name: {
"new_size": new_size,
}
}
body = json.dumps(post_body)
resp, body = self.post(
"shares/%s/action" % share_id, body, version=version)
self.expected_success(202, resp.status)
return body
###############
def manage_share(self, service_host, protocol, export_path,
share_type_id, name=None, description=None,
is_public=False, version=LATEST_MICROVERSION,
url=None, share_server_id=None):
post_body = {
"share": {
"export_path": export_path,
"service_host": service_host,
"protocol": protocol,
"share_type": share_type_id,
"name": name,
"description": description,
"is_public": is_public,
}
}
if share_server_id is not None:
post_body['share']['share_server_id'] = share_server_id
if url is None:
if utils.is_microversion_gt(version, "2.6"):
url = 'shares/manage'
else:
url = 'os-share-manage'
body = json.dumps(post_body)
resp, body = self.post(url, body, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def unmanage_share(self, share_id, version=LATEST_MICROVERSION, url=None,
action_name=None, body=None):
if url is None:
if utils.is_microversion_gt(version, "2.6"):
url = 'shares'
else:
url = 'os-share-unmanage'
if action_name is None:
if utils.is_microversion_gt(version, "2.6"):
action_name = 'action'
else:
action_name = 'unmanage'
if body is None and utils.is_microversion_gt(version, "2.6"):
body = json.dumps({'unmanage': {}})
resp, body = self.post(
"%(url)s/%(share_id)s/%(action_name)s" % {
'url': url, 'share_id': share_id, 'action_name': action_name},
body,
version=version)
self.expected_success(202, resp.status)
return body
###############
def create_snapshot(self, share_id, name=None, description=None,
force=False, version=LATEST_MICROVERSION):
if name is None:
name = data_utils.rand_name("tempest-created-share-snap")
if description is None:
description = data_utils.rand_name(
"tempest-created-share-snap-desc")
post_body = {
"snapshot": {
"name": name,
"force": force,
"description": description,
"share_id": share_id,
}
}
body = json.dumps(post_body)
resp, body = self.post("snapshots", body, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def get_snapshot(self, snapshot_id, version=LATEST_MICROVERSION):
resp, body = self.get("snapshots/%s" % snapshot_id, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_snapshots(self, detailed=False, params=None,
version=LATEST_MICROVERSION):
"""Get list of share snapshots w/o filters."""
uri = 'snapshots/detail' if detailed else 'snapshots'
uri += '?%s' % parse.urlencode(params) if params else ''
resp, body = self.get(uri, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_snapshots_for_share(self, share_id, detailed=False,
version=LATEST_MICROVERSION):
"""Get list of snapshots for given share."""
uri = ('snapshots/detail?share_id=%s' % share_id
if detailed else 'snapshots?share_id=%s' % share_id)
resp, body = self.get(uri, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_snapshots_with_detail(self, params=None,
version=LATEST_MICROVERSION):
"""Get detailed list of share snapshots w/o filters."""
return self.list_snapshots(detailed=True, params=params,
version=version)
def delete_snapshot(self, snap_id, version=LATEST_MICROVERSION):
resp, body = self.delete("snapshots/%s" % snap_id, version=version)
self.expected_success(202, resp.status)
return body
def wait_for_snapshot_status(self, snapshot_id, status,
version=LATEST_MICROVERSION):
"""Waits for a snapshot to reach a given status."""
body = self.get_snapshot(snapshot_id, version=version)
snapshot_name = body['name']
snapshot_status = body['status']
start = int(time.time())
while snapshot_status != status:
time.sleep(self.build_interval)
body = self.get_snapshot(snapshot_id, version=version)
snapshot_status = body['status']
if snapshot_status == status:
return
if 'error' in snapshot_status:
raise (share_exceptions.
SnapshotBuildErrorException(snapshot_id=snapshot_id))
if int(time.time()) - start >= self.build_timeout:
message = ('Share Snapshot %s failed to reach %s status '
'within the required time (%s s).' %
(snapshot_name, status, self.build_timeout))
raise exceptions.TimeoutException(message)
def manage_snapshot(self, share_id, provider_location,
name=None, description=None,
version=LATEST_MICROVERSION,
driver_options=None):
if name is None:
name = data_utils.rand_name("tempest-manage-snapshot")
if description is None:
description = data_utils.rand_name("tempest-manage-snapshot-desc")
post_body = {
"snapshot": {
"share_id": share_id,
"provider_location": provider_location,
"name": name,
"description": description,
"driver_options": driver_options if driver_options else {},
}
}
url = 'snapshots/manage'
body = json.dumps(post_body)
resp, body = self.post(url, body, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def unmanage_snapshot(self, snapshot_id, version=LATEST_MICROVERSION,
body=None):
url = 'snapshots'
action_name = 'action'
if body is None:
body = json.dumps({'unmanage': {}})
resp, body = self.post(
"%(url)s/%(snapshot_id)s/%(action_name)s" % {
'url': url, 'snapshot_id': snapshot_id,
'action_name': action_name},
body,
version=version)
self.expected_success(202, resp.status)
return body
def snapshot_reset_state(self, snapshot_id,
status=constants.STATUS_AVAILABLE,
version=LATEST_MICROVERSION):
self.reset_state(snapshot_id, status=status, s_type='snapshots',
version=version)
###############
def revert_to_snapshot(self, share_id, snapshot_id,
version=LATEST_MICROVERSION):
url = 'shares/%s/action' % share_id
body = json.dumps({'revert': {'snapshot_id': snapshot_id}})
resp, body = self.post(url, body, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
###############
def create_share_type_extra_specs(self, share_type_id, extra_specs,
version=LATEST_MICROVERSION):
url = "types/%s/extra_specs" % share_type_id
post_body = json.dumps({'extra_specs': extra_specs})
resp, body = self.post(url, post_body, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_share_type_extra_spec(self, share_type_id, extra_spec_name,
version=LATEST_MICROVERSION):
uri = "types/%s/extra_specs/%s" % (share_type_id, extra_spec_name)
resp, body = self.get(uri, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_share_type_extra_specs(self, share_type_id, params=None,
version=LATEST_MICROVERSION):
uri = "types/%s/extra_specs" % share_type_id
if params is not None:
uri += '?%s' % parse.urlencode(params)
resp, body = self.get(uri, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def update_share_type_extra_spec(self, share_type_id, spec_name,
spec_value, version=LATEST_MICROVERSION):
uri = "types/%s/extra_specs/%s" % (share_type_id, spec_name)
extra_spec = {spec_name: spec_value}
post_body = json.dumps(extra_spec)
resp, body = self.put(uri, post_body, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def update_share_type_extra_specs(self, share_type_id, extra_specs,
version=LATEST_MICROVERSION):
uri = "types/%s/extra_specs" % share_type_id
extra_specs = {"extra_specs": extra_specs}
post_body = json.dumps(extra_specs)
resp, body = self.post(uri, post_body, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def delete_share_type_extra_spec(self, share_type_id, extra_spec_name,
version=LATEST_MICROVERSION):
uri = "types/%s/extra_specs/%s" % (share_type_id, extra_spec_name)
resp, body = self.delete(uri, version=version)
self.expected_success(202, resp.status)
return body
###############
def get_snapshot_instance(self, instance_id, version=LATEST_MICROVERSION):
resp, body = self.get("snapshot-instances/%s" % instance_id,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_snapshot_instances(self, detail=False, snapshot_id=None,
version=LATEST_MICROVERSION):
"""Get list of share snapshot instances."""
uri = "snapshot-instances%s" % ('/detail' if detail else '')
if snapshot_id is not None:
uri += '?snapshot_id=%s' % snapshot_id
resp, body = self.get(uri, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def reset_snapshot_instance_status(self, instance_id,
status=constants.STATUS_AVAILABLE,
version=LATEST_MICROVERSION):
"""Reset the status."""
uri = 'snapshot-instances/%s/action' % instance_id
post_body = {
'reset_status': {
'status': status
}
}
body = json.dumps(post_body)
resp, body = self.post(uri, body, extra_headers=True, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def wait_for_snapshot_instance_status(self, instance_id, expected_status):
"""Waits for a snapshot instance status to reach a given status."""
body = self.get_snapshot_instance(instance_id)
instance_status = body['status']
start = int(time.time())
while instance_status != expected_status:
time.sleep(self.build_interval)
body = self.get_snapshot_instance(instance_id)
instance_status = body['status']
if instance_status == expected_status:
return
if 'error' in instance_status:
raise share_exceptions.SnapshotInstanceBuildErrorException(
id=instance_id)
if int(time.time()) - start >= self.build_timeout:
message = ('The status of snapshot instance %(id)s failed to '
'reach %(expected_status)s status within the '
'required time (%(time)ss). Current '
'status: %(current_status)s.' %
{
'expected_status': expected_status,
'time': self.build_timeout,
'id': instance_id,
'current_status': instance_status,
})
raise exceptions.TimeoutException(message)
def get_snapshot_instance_export_location(
self, instance_id, export_location_uuid,
version=LATEST_MICROVERSION):
resp, body = self.get(
"snapshot-instances/%(instance_id)s/export-locations/%("
"el_uuid)s" % {
"instance_id": instance_id,
"el_uuid": export_location_uuid},
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_snapshot_instance_export_locations(
self, instance_id, version=LATEST_MICROVERSION):
resp, body = self.get(
"snapshot-instances/%s/export-locations" % instance_id,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
###############
def _get_access_action_name(self, version, action):
if utils.is_microversion_gt(version, "2.6"):
return action.split('os-')[-1]
return action
def create_access_rule(self, share_id, access_type="ip",
access_to="0.0.0.0", access_level=None,
version=LATEST_MICROVERSION, metadata=None,
action_name=None):
post_body = {
self._get_access_action_name(version, 'os-allow_access'): {
"access_type": access_type,
"access_to": access_to,
"access_level": access_level,
}
}
if metadata is not None:
post_body['allow_access']['metadata'] = metadata
body = json.dumps(post_body)
resp, body = self.post(
"shares/%s/action" % share_id, body, version=version,
extra_headers=True)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_access_rules(self, share_id, version=LATEST_MICROVERSION,
metadata=None, action_name=None):
if utils.is_microversion_lt(version, "2.45"):
body = {
self._get_access_action_name(version, 'os-access_list'): None
}
resp, body = self.post(
"shares/%s/action" % share_id, json.dumps(body),
version=version)
self.expected_success(200, resp.status)
else:
return self.list_access_rules_with_new_API(
share_id, metadata=metadata, version=version,
action_name=action_name)
return self._parse_resp(body)
def list_access_rules_with_new_API(self, share_id, metadata=None,
version=LATEST_MICROVERSION,
action_name=None):
metadata = metadata or {}
query_string = ''
params = sorted(
[(k, v) for (k, v) in list(metadata.items()) if v])
if params:
query_string = "&%s" % parse.urlencode(params)
url = 'share-access-rules?share_id=%s' % share_id + query_string
resp, body = self.get(url, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def delete_access_rule(self, share_id, rule_id,
version=LATEST_MICROVERSION, action_name=None):
post_body = {
self._get_access_action_name(version, 'os-deny_access'): {
"access_id": rule_id,
}
}
body = json.dumps(post_body)
resp, body = self.post(
"shares/%s/action" % share_id, body, version=version)
self.expected_success(202, resp.status)
return body
def get_access(self, access_id, version=LATEST_MICROVERSION):
resp, body = self.get("share-access-rules/%s" % access_id,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def update_access_metadata(self, access_id, metadata,
version=LATEST_MICROVERSION):
url = 'share-access-rules/%s/metadata' % access_id
body = {"metadata": metadata}
resp, body = self.put(url, json.dumps(body), version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def delete_access_metadata(self, access_id, key,
version=LATEST_MICROVERSION):
url = "share-access-rules/%s/metadata/%s" % (access_id, key)
resp, body = self.delete(url, version=version)
self.expected_success(200, resp.status)
return body
###############
def list_availability_zones(self, url='availability-zones',
version=LATEST_MICROVERSION):
"""Get list of availability zones."""
if url is None:
if utils.is_microversion_gt(version, "2.6"):
url = 'availability-zones'
else:
url = 'os-availability-zone'
resp, body = self.get(url, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
###############
def list_services(self, params=None, url=None,
version=LATEST_MICROVERSION):
"""List services."""
if url is None:
if utils.is_microversion_gt(version, "2.6"):
url = 'services'
else:
url = 'os-services'
if params:
url += '?%s' % parse.urlencode(params)
resp, body = self.get(url, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
###############
def list_share_types(self, params=None, default=False,
version=LATEST_MICROVERSION):
uri = 'types'
if default:
uri += '/default'
if params is not None:
uri += '?%s' % parse.urlencode(params)
resp, body = self.get(uri, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def create_share_type(self, name, is_public=True,
version=LATEST_MICROVERSION, **kwargs):
if utils.is_microversion_gt(version, "2.6"):
is_public_keyname = 'share_type_access:is_public'
else:
is_public_keyname = 'os-share-type-access:is_public'
post_body = {
'name': name,
'extra_specs': kwargs.get('extra_specs'),
is_public_keyname: is_public,
}
if kwargs.get('description'):
post_body['description'] = kwargs.get('description')
post_body = json.dumps({'share_type': post_body})
resp, body = self.post('types', post_body, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def delete_share_type(self, share_type_id, version=LATEST_MICROVERSION):
resp, body = self.delete("types/%s" % share_type_id, version=version)
self.expected_success(202, resp.status)
return body
def get_share_type(self, share_type_id, version=LATEST_MICROVERSION):
resp, body = self.get("types/%s" % share_type_id, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_access_to_share_type(self, share_type_id,
version=LATEST_MICROVERSION,
action_name=None):
if action_name is None:
if utils.is_microversion_gt(version, "2.6"):
action_name = 'share_type_access'
else:
action_name = 'os-share-type-access'
url = 'types/%(st_id)s/%(action_name)s' % {
'st_id': share_type_id, 'action_name': action_name}
resp, body = self.get(url, version=version)
# [{"share_type_id": "%st_id%", "project_id": "%project_id%"}, ]
self.expected_success(200, resp.status)
return self._parse_resp(body)
###############
@staticmethod
def _get_quotas_url(version):
if utils.is_microversion_gt(version, "2.6"):
return 'quota-sets'
return 'os-quota-sets'
@staticmethod
def _get_quotas_url_arguments_as_str(user_id=None, share_type=None):
args_str = ''
if not (user_id is None or share_type is None):
args_str = "?user_id=%s&share_type=%s" % (user_id, share_type)
elif user_id is not None:
args_str = "?user_id=%s" % user_id
elif share_type is not None:
args_str = "?share_type=%s" % share_type
return args_str
def default_quotas(self, tenant_id, url=None, version=LATEST_MICROVERSION):
if url is None:
url = self._get_quotas_url(version)
url += '/%s' % tenant_id
resp, body = self.get("%s/defaults" % url, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def show_quotas(self, tenant_id, user_id=None, share_type=None, url=None,
version=LATEST_MICROVERSION):
if url is None:
url = self._get_quotas_url(version)
url += '/%s' % tenant_id
url += self._get_quotas_url_arguments_as_str(user_id, share_type)
resp, body = self.get(url, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def reset_quotas(self, tenant_id, user_id=None, share_type=None, url=None,
version=LATEST_MICROVERSION):
if url is None:
url = self._get_quotas_url(version)
url += '/%s' % tenant_id
url += self._get_quotas_url_arguments_as_str(user_id, share_type)
resp, body = self.delete(url, version=version)
self.expected_success(202, resp.status)
return body
def detail_quotas(self, tenant_id, user_id=None, share_type=None, url=None,
version=LATEST_MICROVERSION):
if url is None:
url = self._get_quotas_url(version)
url += '/%s/detail' % tenant_id
url += self._get_quotas_url_arguments_as_str(user_id, share_type)
resp, body = self.get(url, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def update_quotas(self, tenant_id, user_id=None, shares=None,
snapshots=None, gigabytes=None, snapshot_gigabytes=None,
share_networks=None,
share_groups=None, share_group_snapshots=None,
force=True, share_type=None,
url=None, version=LATEST_MICROVERSION):
if url is None:
url = self._get_quotas_url(version)
url += '/%s' % tenant_id
url += self._get_quotas_url_arguments_as_str(user_id, share_type)
put_body = {"tenant_id": tenant_id}
if force:
put_body["force"] = "true"
if shares is not None:
put_body["shares"] = shares
if snapshots is not None:
put_body["snapshots"] = snapshots
if gigabytes is not None:
put_body["gigabytes"] = gigabytes
if snapshot_gigabytes is not None:
put_body["snapshot_gigabytes"] = snapshot_gigabytes
if share_networks is not None:
put_body["share_networks"] = share_networks
if share_groups is not None:
put_body["share_groups"] = share_groups
if share_group_snapshots is not None:
put_body["share_group_snapshots"] = share_group_snapshots
put_body = json.dumps({"quota_set": put_body})
resp, body = self.put(url, put_body, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
###############
def create_share_group(self, name=None, description=None,
share_group_type_id=None, share_type_ids=(),
share_network_id=None,
source_share_group_snapshot_id=None,
availability_zone=None,
version=LATEST_MICROVERSION):
"""Create a new share group."""
uri = 'share-groups'
post_body = {}
if name:
post_body['name'] = name
if description:
post_body['description'] = description
if share_group_type_id:
post_body['share_group_type_id'] = share_group_type_id
if share_type_ids:
post_body['share_types'] = share_type_ids
if source_share_group_snapshot_id:
post_body['source_share_group_snapshot_id'] = (
source_share_group_snapshot_id)
if share_network_id:
post_body['share_network_id'] = share_network_id
if availability_zone:
post_body['availability_zone'] = availability_zone
body = json.dumps({'share_group': post_body})
resp, body = self.post(uri, body, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def delete_share_group(self, share_group_id, version=LATEST_MICROVERSION):
"""Delete a share group."""
uri = 'share-groups/%s' % share_group_id
resp, body = self.delete(uri, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def list_share_groups(self, detailed=False, params=None,
version=LATEST_MICROVERSION):
"""Get list of share groups w/o filters."""
uri = 'share-groups%s' % ('/detail' if detailed else '')
uri += '?%s' % (parse.urlencode(params) if params else '')
resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_share_group(self, share_group_id, version=LATEST_MICROVERSION):
"""Get share group info."""
uri = 'share-groups/%s' % share_group_id
resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def update_share_group(self, share_group_id, name=None, description=None,
version=LATEST_MICROVERSION, **kwargs):
"""Update an existing share group."""
uri = 'share-groups/%s' % share_group_id
post_body = {}
if name:
post_body['name'] = name
if description:
post_body['description'] = description
if kwargs:
post_body.update(kwargs)
body = json.dumps({'share_group': post_body})
resp, body = self.put(uri, body, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def share_group_reset_state(self, share_group_id, status='error',
version=LATEST_MICROVERSION):
self.reset_state(share_group_id, status=status, s_type='groups',
headers=EXPERIMENTAL, version=version)
def share_group_force_delete(self, share_group_id,
version=LATEST_MICROVERSION):
self.force_delete(share_group_id, s_type='share-groups',
headers=EXPERIMENTAL, version=version)
def wait_for_share_group_status(self, share_group_id, status):
"""Waits for a share group to reach a given status."""
body = self.get_share_group(share_group_id)
sg_name = body['name']
sg_status = body['status']
start = int(time.time())
while sg_status != status:
time.sleep(self.build_interval)
body = self.get_share_group(share_group_id)
sg_status = body['status']
if 'error' in sg_status and status != 'error':
raise share_exceptions.ShareGroupBuildErrorException(
share_group_id=share_group_id)
if int(time.time()) - start >= self.build_timeout:
sg_name = sg_name or share_group_id
message = ('Share Group %s failed to reach %s status '
'within the required time (%s s). '
'Current status: %s' %
(sg_name, status, self.build_timeout, sg_status))
raise exceptions.TimeoutException(message)
###############
def create_share_group_type(self, name=None, share_types=(),
is_public=None, group_specs=None,
version=LATEST_MICROVERSION):
"""Create a new share group type."""
uri = 'share-group-types'
post_body = {}
if isinstance(share_types, (tuple, list)):
share_types = list(share_types)
else:
share_types = [share_types]
if name is not None:
post_body['name'] = name
if share_types:
post_body['share_types'] = share_types
if is_public is not None:
post_body['is_public'] = is_public
if group_specs:
post_body['group_specs'] = group_specs
body = json.dumps({'share_group_type': post_body})
resp, body = self.post(uri, body, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_share_group_types(self, detailed=False, params=None,
version=LATEST_MICROVERSION):
"""Get list of share group types."""
uri = 'share-group-types%s' % ('/detail' if detailed else '')
uri += '?%s' % (parse.urlencode(params) if params else '')
resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_share_group_type(self, share_group_type_id,
version=LATEST_MICROVERSION):
"""Get share group type info."""
uri = 'share-group-types/%s' % share_group_type_id
resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_default_share_group_type(self, version=LATEST_MICROVERSION):
"""Get default share group type info."""
uri = 'share-group-types/default'
resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def delete_share_group_type(self, share_group_type_id,
version=LATEST_MICROVERSION):
"""Delete an existing share group type."""
uri = 'share-group-types/%s' % share_group_type_id
resp, body = self.delete(uri, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(204, resp.status)
return self._parse_resp(body)
def add_access_to_share_group_type(self, share_group_type_id, project_id,
version=LATEST_MICROVERSION):
uri = 'share-group-types/%s/action' % share_group_type_id
post_body = {'project': project_id}
post_body = json.dumps({'addProjectAccess': post_body})
resp, body = self.post(uri, post_body, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def remove_access_from_share_group_type(self, share_group_type_id,
project_id,
version=LATEST_MICROVERSION):
uri = 'share-group-types/%s/action' % share_group_type_id
post_body = {'project': project_id}
post_body = json.dumps({'removeProjectAccess': post_body})
resp, body = self.post(uri, post_body, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def list_access_to_share_group_type(self, share_group_type_id,
version=LATEST_MICROVERSION):
uri = 'share-group-types/%s/access' % share_group_type_id
resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
###############
def create_share_group_type_specs(self, share_group_type_id,
group_specs_dict,
version=LATEST_MICROVERSION):
url = "share-group-types/%s/group-specs" % share_group_type_id
post_body = json.dumps({'group_specs': group_specs_dict})
resp, body = self.post(url, post_body, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_share_group_type_spec(self, share_group_type_id, group_spec_key,
version=LATEST_MICROVERSION):
uri = "group-types/%s/group_specs/%s" % (
share_group_type_id, group_spec_key)
resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_share_group_type_specs(self, share_group_type_id, params=None,
version=LATEST_MICROVERSION):
uri = "share-group-types/%s/group_specs" % share_group_type_id
if params is not None:
uri += '?%s' % parse.urlencode(params)
resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def update_share_group_type_spec(self, share_group_type_id, group_spec_key,
group_spec_value,
version=LATEST_MICROVERSION):
uri = "share-group-types/%s/group-specs/%s" % (
share_group_type_id, group_spec_key)
group_spec = {group_spec_key: group_spec_value}
post_body = json.dumps(group_spec)
resp, body = self.put(uri, post_body, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def update_share_group_type_specs(self, share_group_type_id,
group_specs_dict,
version=LATEST_MICROVERSION):
return self.create_share_group_type_specs(
share_group_type_id, group_specs_dict, version=version)
def delete_share_group_type_spec(self, share_type_id, group_spec_key,
version=LATEST_MICROVERSION):
uri = "share-group-types/%s/group-specs/%s" % (
share_type_id, group_spec_key)
resp, body = self.delete(uri, headers=EXPERIMENTAL, extra_headers=True,
version=version)
self.expected_success(204, resp.status)
return body
###############
def create_share_group_snapshot(self, share_group_id, name=None,
description=None,
version=LATEST_MICROVERSION):
"""Create a new share group snapshot of an existing share group."""
uri = 'share-group-snapshots'
post_body = {'share_group_id': share_group_id}
if name:
post_body['name'] = name
if description:
post_body['description'] = description
body = json.dumps({'share_group_snapshot': post_body})
resp, body = self.post(uri, body, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def delete_share_group_snapshot(self, share_group_snapshot_id,
version=LATEST_MICROVERSION):
"""Delete an existing share group snapshot."""
uri = 'share-group-snapshots/%s' % share_group_snapshot_id
resp, body = self.delete(uri, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(202, resp.status)
return body
def list_share_group_snapshots(self, detailed=False, params=None,
version=LATEST_MICROVERSION):
"""Get list of share group snapshots w/o filters."""
uri = 'share-group-snapshots%s' % ('/detail' if detailed else '')
uri += '?%s' % (parse.urlencode(params) if params else '')
resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_share_group_snapshot(self, share_group_snapshot_id,
version=LATEST_MICROVERSION):
"""Get share group snapshot info."""
uri = 'share-group-snapshots/%s' % share_group_snapshot_id
resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def update_share_group_snapshot(self, share_group_snapshot_id, name=None,
description=None,
version=LATEST_MICROVERSION):
"""Update an existing share group snapshot."""
uri = 'share-group-snapshots/%s' % share_group_snapshot_id
post_body = {}
if name:
post_body['name'] = name
if description:
post_body['description'] = description
body = json.dumps({'share_group_snapshot': post_body})
resp, body = self.put(uri, body, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def share_group_snapshot_reset_state(self, share_group_snapshot_id,
status='error',
version=LATEST_MICROVERSION):
self.reset_state(
share_group_snapshot_id, status=status,
s_type='group-snapshots', headers=EXPERIMENTAL, version=version)
def share_group_snapshot_force_delete(self, share_group_snapshot_id,
version=LATEST_MICROVERSION):
self.force_delete(
share_group_snapshot_id, s_type='share-group-snapshots',
headers=EXPERIMENTAL, version=version)
def wait_for_share_group_snapshot_status(self, share_group_snapshot_id,
status):
"""Waits for a share group snapshot to reach a given status."""
body = self.get_share_group_snapshot(share_group_snapshot_id)
sg_snapshot_name = body['name']
sg_snapshot_status = body['status']
start = int(time.time())
while sg_snapshot_status != status:
time.sleep(self.build_interval)
body = self.get_share_group_snapshot(share_group_snapshot_id)
sg_snapshot_status = body['status']
if 'error' in sg_snapshot_status and status != 'error':
raise share_exceptions.ShareGroupSnapshotBuildErrorException(
share_group_snapshot_id=share_group_snapshot_id)
if int(time.time()) - start >= self.build_timeout:
message = ('Share Group Snapshot %s failed to reach %s status '
'within the required time (%s s).' %
(sg_snapshot_name, status, self.build_timeout))
raise exceptions.TimeoutException(message)
###############
def manage_share_server(self, host, share_network_id, identifier,
driver_options=None, version=LATEST_MICROVERSION):
body = {
'share_server': {
'host': host,
'share_network_id': share_network_id,
'identifier': identifier,
'driver_options': driver_options if driver_options else {},
}
}
body = json.dumps(body)
resp, body = self.post('share-servers/manage', body,
extra_headers=True, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def unmanage_share_server(self, share_server_id,
version=LATEST_MICROVERSION):
body = json.dumps({'unmanage': None})
resp, body = self.post('share-servers/%s/action' % share_server_id,
body, extra_headers=True, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def wait_for_share_server_status(self, server_id, status,
status_attr='status'):
"""Waits for a share to reach a given status."""
body = self.show_share_server(server_id)
server_status = body[status_attr]
start = int(time.time())
while server_status != status:
time.sleep(self.build_interval)
body = self.show_share_server(server_id)
server_status = body[status_attr]
if server_status == status:
return
elif constants.STATUS_ERROR in server_status.lower():
raise share_exceptions.ShareServerBuildErrorException(
server_id=server_id)
if int(time.time()) - start >= self.build_timeout:
message = ("Share server's %(status_attr)s failed to "
"transition to %(status)s within the required "
"time %(seconds)s." %
{"status_attr": status_attr, "status": status,
"seconds": self.build_timeout})
raise exceptions.TimeoutException(message)
def share_server_reset_state(self, share_server_id,
status=constants.SERVER_STATE_ACTIVE,
version=LATEST_MICROVERSION):
self.reset_state(share_server_id, status=status,
s_type='share-servers', version=version)
###############
def migrate_share(self, share_id, host,
force_host_assisted_migration=False,
new_share_network_id=None, writable=False,
preserve_metadata=False, preserve_snapshots=False,
nondisruptive=False, new_share_type_id=None,
version=LATEST_MICROVERSION):
body = {
'migration_start': {
'host': host,
'force_host_assisted_migration': force_host_assisted_migration,
'new_share_network_id': new_share_network_id,
'new_share_type_id': new_share_type_id,
'writable': writable,
'preserve_metadata': preserve_metadata,
'preserve_snapshots': preserve_snapshots,
'nondisruptive': nondisruptive,
}
}
body = json.dumps(body)
return self.post('shares/%s/action' % share_id, body,
headers=EXPERIMENTAL, extra_headers=True,
version=version)
def migration_complete(self, share_id, version=LATEST_MICROVERSION,
action_name='migration_complete'):
post_body = {
action_name: None,
}
body = json.dumps(post_body)
return self.post('shares/%s/action' % share_id, body,
headers=EXPERIMENTAL, extra_headers=True,
version=version)
def migration_cancel(self, share_id, version=LATEST_MICROVERSION,
action_name='migration_cancel'):
post_body = {
action_name: None,
}
body = json.dumps(post_body)
return self.post('shares/%s/action' % share_id, body,
headers=EXPERIMENTAL, extra_headers=True,
version=version)
def migration_get_progress(self, share_id, version=LATEST_MICROVERSION,
action_name='migration_get_progress'):
post_body = {
action_name: None,
}
body = json.dumps(post_body)
result = self.post('shares/%s/action' % share_id, body,
headers=EXPERIMENTAL, extra_headers=True,
version=version)
return json.loads(result[1])
def reset_task_state(
self, share_id, task_state, version=LATEST_MICROVERSION,
action_name='reset_task_state'):
post_body = {
action_name: {
'task_state': task_state,
}
}
body = json.dumps(post_body)
return self.post('shares/%s/action' % share_id, body,
headers=EXPERIMENTAL, extra_headers=True,
version=version)
def wait_for_migration_status(self, share_id, dest_host, status_to_wait,
version=LATEST_MICROVERSION):
"""Waits for a share to migrate to a certain host."""
statuses = ((status_to_wait,)
if not isinstance(status_to_wait, (tuple, list, set))
else status_to_wait)
share = self.get_share(share_id, version=version)
migration_timeout = CONF.share.migration_timeout
start = int(time.time())
while share['task_state'] not in statuses:
time.sleep(self.build_interval)
share = self.get_share(share_id, version=version)
if share['task_state'] in statuses:
break
elif share['task_state'] == 'migration_error':
raise share_exceptions.ShareMigrationException(
share_id=share['id'], src=share['host'], dest=dest_host)
elif int(time.time()) - start >= migration_timeout:
message = ('Share %(share_id)s failed to reach a status in'
'%(status)s when migrating from host %(src)s to '
'host %(dest)s within the required time '
'%(timeout)s.' % {
'src': share['host'],
'dest': dest_host,
'share_id': share['id'],
'timeout': self.build_timeout,
'status': six.text_type(statuses),
})
raise exceptions.TimeoutException(message)
return share
################
def create_share_replica(self, share_id, availability_zone=None,
version=LATEST_MICROVERSION):
"""Add a share replica of an existing share."""
uri = "share-replicas"
post_body = {
'share_id': share_id,
'availability_zone': availability_zone,
}
body = json.dumps({'share_replica': post_body})
resp, body = self.post(uri, body,
headers=EXPERIMENTAL,
extra_headers=True,
version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def get_share_replica(self, replica_id, version=LATEST_MICROVERSION):
"""Get the details of share_replica."""
resp, body = self.get("share-replicas/%s" % replica_id,
headers=EXPERIMENTAL,
extra_headers=True,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_share_replicas(self, share_id=None, version=LATEST_MICROVERSION):
"""Get list of replicas."""
uri = "share-replicas/detail"
uri += ("?share_id=%s" % share_id) if share_id is not None else ''
resp, body = self.get(uri, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_share_replicas_summary(self, share_id=None,
version=LATEST_MICROVERSION):
"""Get summary list of replicas."""
uri = "share-replicas"
uri += ("?share_id=%s" % share_id) if share_id is not None else ''
resp, body = self.get(uri, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def delete_share_replica(self, replica_id, version=LATEST_MICROVERSION):
"""Delete share_replica."""
uri = "share-replicas/%s" % replica_id
resp, body = self.delete(uri,
headers=EXPERIMENTAL,
extra_headers=True,
version=version)
self.expected_success(202, resp.status)
return body
def promote_share_replica(self, replica_id, expected_status=202,
version=LATEST_MICROVERSION):
"""Promote a share replica to active state."""
uri = "share-replicas/%s/action" % replica_id
post_body = {
'promote': None,
}
body = json.dumps(post_body)
resp, body = self.post(uri, body,
headers=EXPERIMENTAL,
extra_headers=True,
version=version)
self.expected_success(expected_status, resp.status)
return self._parse_resp(body)
def list_share_replica_export_locations(self, replica_id,
expected_status=200,
version=LATEST_MICROVERSION):
uri = "share-replicas/%s/export-locations" % replica_id
resp, body = self.get(uri, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(expected_status, resp.status)
return self._parse_resp(body)
def get_share_replica_export_location(self, replica_id,
export_location_id,
expected_status=200,
version=LATEST_MICROVERSION):
uri = "share-replicas/%s/export-locations/%s" % (replica_id,
export_location_id)
resp, body = self.get(uri, headers=EXPERIMENTAL,
extra_headers=True, version=version)
self.expected_success(expected_status, resp.status)
return self._parse_resp(body)
def wait_for_share_replica_status(self, replica_id, expected_status,
status_attr='status'):
"""Waits for a replica's status_attr to reach a given status."""
body = self.get_share_replica(replica_id)
replica_status = body[status_attr]
start = int(time.time())
while replica_status != expected_status:
time.sleep(self.build_interval)
body = self.get_share_replica(replica_id)
replica_status = body[status_attr]
if replica_status == expected_status:
return
if ('error' in replica_status
and expected_status != constants.STATUS_ERROR):
raise share_exceptions.ShareInstanceBuildErrorException(
id=replica_id)
if int(time.time()) - start >= self.build_timeout:
message = ('The %(status_attr)s of Replica %(id)s failed to '
'reach %(expected_status)s status within the '
'required time (%(time)ss). Current '
'%(status_attr)s: %(current_status)s.' %
{
'status_attr': status_attr,
'expected_status': expected_status,
'time': self.build_timeout,
'id': replica_id,
'current_status': replica_status,
})
raise exceptions.TimeoutException(message)
def reset_share_replica_status(self, replica_id,
status=constants.STATUS_AVAILABLE,
version=LATEST_MICROVERSION):
"""Reset the status."""
uri = 'share-replicas/%s/action' % replica_id
post_body = {
'reset_status': {
'status': status
}
}
body = json.dumps(post_body)
resp, body = self.post(uri, body,
headers=EXPERIMENTAL,
extra_headers=True,
version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def reset_share_replica_state(self, replica_id,
state=constants.REPLICATION_STATE_ACTIVE,
version=LATEST_MICROVERSION):
"""Reset the replication state of a replica."""
uri = 'share-replicas/%s/action' % replica_id
post_body = {
'reset_replica_state': {
'replica_state': state
}
}
body = json.dumps(post_body)
resp, body = self.post(uri, body,
headers=EXPERIMENTAL,
extra_headers=True,
version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def resync_share_replica(self, replica_id, expected_result=202,
version=LATEST_MICROVERSION):
"""Force an immediate resync of the replica."""
uri = 'share-replicas/%s/action' % replica_id
post_body = {
'resync': None
}
body = json.dumps(post_body)
resp, body = self.post(uri, body,
headers=EXPERIMENTAL,
extra_headers=True,
version=version)
self.expected_success(expected_result, resp.status)
return self._parse_resp(body)
def force_delete_share_replica(self, replica_id,
version=LATEST_MICROVERSION):
"""Force delete a replica."""
uri = 'share-replicas/%s/action' % replica_id
post_body = {
'force_delete': None
}
body = json.dumps(post_body)
resp, body = self.post(uri, body,
headers=EXPERIMENTAL,
extra_headers=True,
version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def list_share_networks(self, detailed=False, params=None,
version=LATEST_MICROVERSION):
"""Get list of share networks w/o filters."""
uri = 'share-networks/detail' if detailed else 'share-networks'
uri += '?%s' % parse.urlencode(params) if params else ''
resp, body = self.get(uri, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_share_networks_with_detail(self, params=None,
version=LATEST_MICROVERSION):
"""Get detailed list of share networks w/o filters."""
return self.list_share_networks(
detailed=True, params=params, version=version)
def get_share_network(self, share_network_id, version=LATEST_MICROVERSION):
resp, body = self.get("share-networks/%s" % share_network_id,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
################
def create_snapshot_access_rule(self, snapshot_id, access_type="ip",
access_to="0.0.0.0/0"):
body = {
"allow_access": {
"access_type": access_type,
"access_to": access_to
}
}
resp, body = self.post("snapshots/%s/action" % snapshot_id,
json.dumps(body), version=LATEST_MICROVERSION)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def get_snapshot_access_rule(self, snapshot_id, rule_id):
resp, body = self.get("snapshots/%s/access-list" % snapshot_id,
version=LATEST_MICROVERSION)
body = self._parse_resp(body)
found_rules = [r for r in body if r['id'] == rule_id]
return found_rules[0] if len(found_rules) > 0 else None
def wait_for_snapshot_access_rule_status(self, snapshot_id, rule_id,
expected_state='active'):
rule = self.get_snapshot_access_rule(snapshot_id, rule_id)
state = rule['state']
start = int(time.time())
while state != expected_state:
time.sleep(self.build_interval)
rule = self.get_snapshot_access_rule(snapshot_id, rule_id)
state = rule['state']
if state == expected_state:
return
if 'error' in state:
raise share_exceptions.AccessRuleBuildErrorException(
snapshot_id)
if int(time.time()) - start >= self.build_timeout:
message = ('The status of snapshot access rule %(id)s failed '
'to reach %(expected_state)s state within the '
'required time (%(time)ss). Current '
'state: %(current_state)s.' %
{
'expected_state': expected_state,
'time': self.build_timeout,
'id': rule_id,
'current_state': state,
})
raise exceptions.TimeoutException(message)
def delete_snapshot_access_rule(self, snapshot_id, rule_id):
body = {
"deny_access": {
"access_id": rule_id,
}
}
resp, body = self.post("snapshots/%s/action" % snapshot_id,
json.dumps(body), version=LATEST_MICROVERSION)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def wait_for_snapshot_access_rule_deletion(self, snapshot_id, rule_id):
rule = self.get_snapshot_access_rule(snapshot_id, rule_id)
start = int(time.time())
while rule is not None:
time.sleep(self.build_interval)
rule = self.get_snapshot_access_rule(snapshot_id, rule_id)
if rule is None:
return
if int(time.time()) - start >= self.build_timeout:
message = ('The snapshot access rule %(id)s failed to delete '
'within the required time (%(time)ss).' %
{
'time': self.build_timeout,
'id': rule_id,
})
raise exceptions.TimeoutException(message)
def get_snapshot_export_location(self, snapshot_id, export_location_uuid,
version=LATEST_MICROVERSION):
resp, body = self.get(
"snapshots/%(snapshot_id)s/export-locations/%(el_uuid)s" % {
"snapshot_id": snapshot_id, "el_uuid": export_location_uuid},
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_snapshot_export_locations(
self, snapshot_id, version=LATEST_MICROVERSION):
resp, body = self.get(
"snapshots/%s/export-locations" % snapshot_id, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
###############
def get_message(self, message_id, version=LATEST_MICROVERSION):
"""Show details for a single message."""
url = 'messages/%s' % message_id
resp, body = self.get(url, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_messages(self, params=None, version=LATEST_MICROVERSION):
"""List all messages."""
url = 'messages'
url += '?%s' % parse.urlencode(params) if params else ''
resp, body = self.get(url, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def delete_message(self, message_id, version=LATEST_MICROVERSION):
"""Delete a single message."""
url = 'messages/%s' % message_id
resp, body = self.delete(url, version=version)
self.expected_success(204, resp.status)
return self._parse_resp(body)
def wait_for_message(self, resource_id):
"""Waits until a message for a resource with given id exists"""
start = int(time.time())
message = None
while not message:
time.sleep(self.build_interval)
for msg in self.list_messages():
if msg['resource_id'] == resource_id:
return msg
if int(time.time()) - start >= self.build_timeout:
message = ('No message for resource with id %s was created in'
' the required time (%s s).' %
(resource_id, self.build_timeout))
raise exceptions.TimeoutException(message)
###############
def create_security_service(self, ss_type="ldap",
version=LATEST_MICROVERSION, **kwargs):
"""Creates Security Service.
:param ss_type: ldap, kerberos, active_directory
:param version: microversion string
:param kwargs: name, description, dns_ip, server, ou, domain, user,
:param kwargs: password
"""
post_body = {"type": ss_type}
post_body.update(kwargs)
body = json.dumps({"security_service": post_body})
resp, body = self.post("security-services", body, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def update_security_service(self, ss_id, version=LATEST_MICROVERSION,
**kwargs):
"""Updates Security Service.
:param ss_id: id of security-service entity
:param version: microversion string
:param kwargs: dns_ip, server, ou, domain, user, password, name,
:param kwargs: description
:param kwargs: for 'active' status can be changed
:param kwargs: only 'name' and 'description' fields
"""
body = json.dumps({"security_service": kwargs})
resp, body = self.put("security-services/%s" % ss_id, body,
version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_security_service(self, ss_id, version=LATEST_MICROVERSION):
resp, body = self.get("security-services/%s" % ss_id, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_security_services(self, detailed=False, params=None,
version=LATEST_MICROVERSION):
uri = "security-services"
if detailed:
uri += '/detail'
if params:
uri += "?%s" % parse.urlencode(params)
resp, body = self.get(uri, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
|
#!/bin/bash -x
set -e
set -o pipefail
# Note this script assumes that the current working directory
# is the root of the repository
if [ ! -f ./.travis.yml ]; then
echo "This script must be run from the root of the repository"
exit 1
fi
: ${LLVM_INCLUDE:?"LLVM_INCLUDE must be specified"}
: ${LLVM_LIB:?"LLVM_LIB must be specified"}
: ${LLVM_BIN:?"LLVM_BIN must be specified"}
: ${LLVM_VERSION:?"LLVM_VERSION must be specified"}
: ${BUILD_SYSTEM:?"BUILD_SYSTEM must be specified"}
: ${NUM_JOBS:?"NUM_JOBS must be specified"}
: ${CXX:?"CXX must be specified"}
: ${BUILD_TYPE:?"BUILD_TYPE must be specified"}
: ${RUN_TESTS:?"RUN_TESTS must be specified"}
# By default don't do incremental build
INCREMENTAL_BUILD="${INCREMENTAL_BUILD:-0}"
if [ ${BUILD_SYSTEM} = 'CMAKE' ]; then
: ${HALIDE_SHARED_LIBRARY:?"HALIDE_SHARED_LIBRARY must be set"}
LLVM_VERSION_NO_DOT="$( echo ${LLVM_VERSION} | sed 's/\.//')"
if [ ${INCREMENTAL_BUILD} = '0' ]; then
rm -rf build/
fi
mkdir -p build/ && cd build/
cmake -DLLVM_INCLUDE="${LLVM_INCLUDE}" \
-DLLVM_LIB="${LLVM_LIB}" \
-DLLVM_BIN="${LLVM_BIN}" \
-DLLVM_VERSION="${LLVM_VERSION_NO_DOT}" \
-DHALIDE_SHARED_LIBRARY="${HALIDE_SHARED_LIBRARY}" \
-DWITH_APPS=ON \
-DWITH_TESTS="${RUN_TESTS}" \
-DWITH_TUTORIALS=OFF \
-DWITH_DOCS=ON \
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
-G "Unix Makefiles" \
../
# Build
make -j ${NUM_JOBS}
# Build docs
make doc
if [ ${RUN_TESTS} = '1' ]; then
# Run correctness tests
TESTCASES=$(find bin/ -iname 'correctness_*' | \
grep -v _vector_math | \
grep -v _vector_cast | \
grep -v _lerp | \
grep -v _simd_op_check | \
grep -v _specialize_branched_loops | \
grep -v _print | \
grep -v _math | \
grep -v _div_mod | \
grep -v _fuzz_simplify | \
grep -v _round | \
sort)
for TESTCASE in ${TESTCASES}; do
echo "Running ${TESTCASE}"
${TESTCASE}
done
else
echo "Not running tests"
fi
elif [ ${BUILD_SYSTEM} = 'MAKE' ]; then
if [ ${BUILD_TYPE} = 'Debug' ]; then
OPT_FLAG='-O0'
else
OPT_FLAG='-O3'
fi
function make_build() {
make LLVM_BINDIR="${LLVM_BIN}" \
LLVM_LIBDIR="${LLVM_LIB}" \
LLVM_CONFIG="${LLVM_BIN}/llvm-config" \
CLANG="${LLVM_BIN}/clang" \
CXX=${CXX} \
OPTIMIZE="${OPT_FLAG}" \
"$@"
}
if [ ${INCREMENTAL_BUILD} = '0' ]; then
make clean
fi
# Build
# Note this runs the internal tests too
# FIXME: The CMake build system doesn't do this
make_build -j ${NUM_JOBS}
# Build the docs
make_build doc
if [ ${RUN_TESTS} = '1' ]; then
# Build an run the correctness tests
make_build -j ${NUM_JOBS} test_correctness
else
echo "Not running tests"
fi
else
echo "Unexpected BUILD_SYSTEM: \"${BUILD_SYSTEM}\""
exit 1
fi
|
import symbolTable from './symbol-table';
import noNestedFunction from './no-nested-function';
import validVariableType from './valid-variable-type';
import variableCasing from './variable-casing';
export default {
symbolTable,
noNestedFunction,
validVariableType,
variableCasing,
};
|
package main
import (
"bytes"
"fmt"
"io"
"log"
"time"
"github.com/zenhotels/astranet"
)
func main() {
var astraNet1 = astranet.New().Server()
var astraNet2 = astranet.New().Client()
var client, server IOLoop
server.Reader, client.Writer = io.Pipe()
client.Reader, server.Writer = io.Pipe()
// Use IOLoops as a pipe that conforms io.ReadWriter on the both ends
go astraNet1.Attach(client)
go astraNet2.Attach(server)
// Bind a server for the "hello" service
l, err := astraNet1.Bind("", "hello")
if err != nil {
log.Fatalln(err)
}
go func() {
conn, err := l.Accept()
for err == nil {
// For the each connected client we produce a message and close the connection
fmt.Fprintf(conn, "Hello! Time: %s", time.Now().Format(time.Kitchen))
conn.Close()
conn, err = l.Accept()
}
if err != nil {
log.Println("[accept ERR]", err)
}
}()
// Now connect to the service and read the reply
conn, err := astraNet2.Dial("", "hello")
if err != nil {
log.Fatalln("[dial ERR]", err)
}
body := new(bytes.Buffer)
io.Copy(body, conn)
log.Printf("Server says: %s", body.String())
}
// IOLoop is the same as the astranet.IOLoop, but copied here for clarity.
type IOLoop struct {
io.Reader
io.Writer
}
|
#--
# Copyright 2017 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#++
require_relative '../utils/duration'
require_relative 'sr_common_handler'
module Handlers
# Sender events handler for sender client
class SenderHandler < Handlers::SRCommonHandler
# Count of messages to be send
attr_accessor :count
# Message properties
attr_accessor :msg_properties
# Message content
attr_accessor :msg_content
# Message content type
attr_accessor :msg_content_type
# Message durability
attr_accessor :msg_durable
# Message TTL (ms)
attr_accessor :msg_ttl
# Message correlation ID
attr_accessor :msg_correlation_id
# Reply to address
attr_accessor :msg_reply_to
# Message group ID
attr_accessor :msg_group_id
# Message destination
attr_accessor :msg_to
# Message priority
attr_accessor :msg_priority
# Message ID
attr_accessor :msg_id
# Message user ID
attr_accessor :msg_user_id
# Message subject
attr_accessor :msg_subject
# Anonymous
attr_accessor :anonymous
# Initialization of sender events handler
# ==== Sender events handler arguments
# broker:: URI of broker
# log_msgs:: format of message(s) log
# count:: number of messages to send
# msg_content:: message content
# msg_durable:: message durability
# msg_ttl:: message TTL (ms)
# msg_correlation_id:: message correlation ID
# msg_reply_to:: address to send reply to
# msg_group_id:: message group ID
# msg_to:: message destination
# sasl_mechs:: allowed SASL mechanisms
def initialize(
broker,
log_msgs,
msg_content_hashed,
count,
msg_properties,
msg_content,
msg_content_type,
msg_durable,
msg_ttl,
msg_correlation_id,
msg_reply_to,
msg_group_id,
msg_to,
msg_priority,
msg_id,
msg_user_id,
msg_subject,
anonymous,
sasl_mechs,
idle_timeout,
max_frame_size,
sasl_enabled,
log_lib,
auto_settle_off,
exit_timer,
duration,
duration_mode
)
super(
broker,
log_msgs,
msg_content_hashed,
sasl_mechs,
idle_timeout,
max_frame_size,
sasl_enabled,
log_lib,
auto_settle_off,
exit_timer
)
# Save count of messages to be send
@count = count
# Save message properties
@msg_properties = msg_properties
# Save message content
@msg_content = msg_content
# Save message content type
@msg_content_type = msg_content_type
# Save message durability
@msg_durable = msg_durable
# Save message TTL (ms)
@msg_ttl = msg_ttl
# Save message correlation ID
@msg_correlation_id = msg_correlation_id
# Save reply to address
@msg_reply_to = msg_reply_to
# Save message group ID
@msg_group_id = msg_group_id
# Save message destination
@msg_to = msg_to
# Save message priority
@msg_priority = msg_priority
# Save message ID
@msg_id = msg_id
# Save user ID
@msg_user_id = msg_user_id
# Save message subject
@msg_subject = msg_subject
# Save anonymous
@anonymous = anonymous
# Number of sent messages
@sent = 0
# Number of accepted messages
@accepted = 0
# Duration
@duration = Duration.new(duration, count, duration_mode)
# True if a send has been scheduled
@scheduled = false
end
# Called when the event loop starts,
# connects sender client to SRCommonHandler#broker
# and creates sender
def on_container_start(container)
# Connecting to broker and creating sender
container.connect(
# Set broker URI
@broker,
# Enable SASL authentication
sasl_enabled: @sasl_enabled,
# Enable insecure SASL mechanisms
sasl_allow_insecure_mechs: true,
# Set allowed SASL mechanisms
sasl_allowed_mechs: @sasl_mechs,
# Set idle timeout
idle_timeout: @idle_timeout,
# Set max frame size
max_frame_size: @max_frame_size,
).open_sender({
# Set target address
:target => anonymous ? nil : @broker.amqp_address,
# Set auto settle
:auto_settle => @auto_settle_off ? false : true,
})
end
def delay
before = @duration.delay("before-send")
after = @duration.delay("after-send") if @sent > 0 # No after-delay on first send
[before, after].compact.inject(:+)
end
# Called when the sender link has credit
# and messages can therefore be transferred,
# sending SenderHandler#count messages
def on_sendable(sender)
if @duration.zero? # Send immediately
send(sender) while (sender.credit > 0) && (@sent < @count)
elsif (sender.credit > 0) && (@sent < @count) && !@scheduled # Schedule to send after delay
@scheduled = true
c = sender.connection.container
c.schedule(delay) do
send(sender)
@scheduled = false # Need to re-schedule for another send
end
end
end
def send(sender)
exit_timer.reset if exit_timer
# Create new message
msg = Qpid::Proton::Message.new
# Set message destination
msg.address = @msg_to
# If message destination is not set
unless msg.address
# Set message destination if anonymous mode
msg.address = @broker.amqp_address if @anonymous
end
# Set message properties
if @msg_properties
@msg_properties.each { |k, v| msg[k] = v }
end
# If message content is set
if @msg_content
# If message content is string and contains formatting part
if @msg_content.is_a? String and @msg_content =~ /%[0-9]*d/
# Format message content with number of sent messages
msg.body = sprintf(@msg_content, @sent)
else
# Set message content as it is
msg.body = @msg_content
end
end # if
# Set message content type if specified
msg.content_type = @msg_content_type if @msg_content_type
# Set message durability
msg.durable = @msg_durable
# Set message TTL (ms)
msg.ttl = @msg_ttl
# If message correlation ID is set
if @msg_correlation_id
msg.correlation_id = @msg_correlation_id
end # if
# Set reply to address
msg.reply_to = @msg_reply_to
# If message group ID is set
if @msg_group_id
msg.group_id = @msg_group_id
end
msg.priority = @msg_priority if @msg_priority
msg.id = @msg_id if @msg_id
msg.user_id = @msg_user_id if @msg_user_id
msg.subject = @msg_subject if @msg_subject
# Send message
sender.send(msg)
# Increase number of sent messages
@sent = @sent + 1
print_message(msg)
end
# Called when the remote peer accepts an outgoing message,
# accepting SenderHandler#count messages
def on_tracker_accept(tracker)
# Increase number of accepted messages
@accepted = @accepted + 1
# If all messages to be send are sent and accepted
if @accepted == @count
# Close sender
tracker.sender.close
# Close connection
tracker.sender.connection.close
end # if
end
end # class SenderHandler
end # module Handlers
# eof
|
import React, { PureComponent } from 'react';
import { Popover, Button } from 'antd';
import MenuItem from './MenuItem';
class AdvancedContextMenu extends PureComponent {
constructor(props) {
super(props);
this.state = {
visible: false,
};
}
closeMenu = () => {
this.setState({
visible: false,
});
};
onMenuVisibleChange = (value) => {
this.setState({
visible: value,
});
};
constructMenu = () => {
const { content = [], ...other } = this.props;
return content.map((current) => {
const props = { ...current, ...other };
return <MenuItem
key={current.label}
closeMenu={this.closeMenu}
{...props}/>;
});
};
render() {
const { visible } = this.state;
const { children, exContent } = this.props;
return (
<Popover
content={exContent || this.constructMenu()}
trigger="contextMenu"
placement="bottomLeft"
visible={visible}
onVisibleChange={this.onMenuVisibleChange}
>
{children}
</Popover>
);
};
}
export default AdvancedContextMenu;
|
# C++ Playground
A CMake based C++ project with various libraries.
The top `CMakeLists.txt` presents two stages: "thirdparty" and "main".
When `-DTHIRDPARTY=TRUE` is passed to cmake configure command, the thirdparty stage is configured,
the "binary directory" is `thirdparty/build`. This build employs
[ExternalProject feature of CMake](https://cmake.org/cmake/help/v3.15/module/ExternalProject.html)
to download and build libraries.
This should be done before the "main" stage and rarely needs to repeat.
Without the "THIRDPARTY" variable set, the main stage is configured, the "binary directory" is
"./build". This is the major playground for user. User's CMake targets can just link to those
thirdparty libraries as CMake packages.
A "run-cmake" helper script (`run-cmake.sh` is for Unix, and `run-cmake.bat` is for Windows) runs
the "configure" and "build" for "thirdparty" stage directly, but for "main" stage, it only runs
"configure".
Note that for "thirdparty" stage, generator is set to platform specific (with preference to IDE
favors if available), rather than Ninja, since "compile_commands.json" is not necessary for this
stage. Using platform specific generator may ease the toolchain configuration.
# Building
## Dependencies
### CMake
CMake 3.15 or later versions should be OK, though more recent versions like 3.20+ are not tested.
### Ninja
In order to enable CMake to export "compile_commands.json" file, the "Ninja" generator is used for
the user project.
You need to install the [`ninja`](https://ninja-build.org/) build tool, I recommend going to
[official release page](https://github.com/ninja-build/ninja/releases) to download the binary and
just put the binary at your `PATH`.
## Using the "run-cmake" script
The following example applies on Linux. You should use `run-cmake.bat` for Windows.
```shell
# To configure and build the "thirdparty" stage
./run-cmake thirdparty
# To configure the "main" stage (must be done AFTER "thirdparty")
./run-cmake
```
# Developing
If you prefer to use Visual Studio (on Windows) or Xcode (on macOS), you should change the generator
at "main" stage to the corresponding IDE's flavors, e.g. "Visual Studio 14 2015 Win64" for Windows,
or "Xcode" for macOS).
## Visual Studio Code
(TODO)
|
package com.funckyhacker.capofiler.view.main
import android.Manifest
import android.annotation.SuppressLint
import android.content.ActivityNotFoundException
import android.content.Intent
import android.databinding.DataBindingUtil
import android.os.Bundle
import android.os.Environment
import android.support.annotation.StringRes
import android.support.design.widget.Snackbar
import android.support.v4.content.ContextCompat
import android.support.v4.content.FileProvider
import android.support.v4.view.GravityCompat
import android.support.v4.widget.DrawerLayout
import android.support.v7.app.AppCompatActivity
import android.support.v7.widget.DividerItemDecoration
import android.support.v7.widget.GridLayoutManager
import android.support.v7.widget.LinearLayoutManager
import android.view.Menu
import android.view.MenuItem
import android.view.View
import android.widget.Toast
import com.afollestad.materialdialogs.MaterialDialog
import com.funckyhacker.capofiler.R
import com.funckyhacker.capofiler.databinding.ActivityMainBinding
import com.funckyhacker.capofiler.event.ClickItemEvent
import com.funckyhacker.capofiler.util.FileUtils
import com.funckyhacker.capofiler.view.adapter.MainLinearAdapter
import com.funckyhacker.capofiler.view.search.SearchActivity
import dagger.android.AndroidInjection
import org.greenrobot.eventbus.EventBus
import org.greenrobot.eventbus.Subscribe
import permissions.dispatcher.NeedsPermission
import permissions.dispatcher.OnNeverAskAgain
import permissions.dispatcher.OnPermissionDenied
import permissions.dispatcher.RuntimePermissions
import timber.log.Timber
import java.io.File
import java.util.*
import javax.inject.Inject
@RuntimePermissions
class MainActivity : AppCompatActivity(), MainView {
companion object {
const val LAYOUT_LIST = 0
const val LAYOUT_GRID = 1
private const val REQUEST = 1
}
@Inject
lateinit var viewModel: MainViewModel
private lateinit var binding: ActivityMainBinding
private lateinit var menu: Menu
private val linearLayoutManager: LinearLayoutManager by lazy {
LinearLayoutManager(this)
}
private val dividerItemDecoration: DividerItemDecoration by lazy {
DividerItemDecoration(this, linearLayoutManager.orientation)
}
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
AndroidInjection.inject(this)
binding = DataBindingUtil.setContentView(this, R.layout.activity_main)
binding.viewModel = viewModel
setSupportActionBar(binding.toolBar)
val actionbar = supportActionBar
actionbar!!.setDisplayHomeAsUpEnabled(true)
actionbar.setHomeButtonEnabled(true)
actionbar.setHomeAsUpIndicator(R.drawable.ic_menu_white_24dp)
viewModel.init(this)
setLinearLayoutManager()
initDrawer()
}
override fun onResume() {
super.onResume()
enableAccessStorageWithPermissionCheck()
viewModel.setData(viewModel.rootFiles)
}
override fun onStart() {
super.onStart()
EventBus.getDefault().register(this)
}
override fun onBackPressed() {
if (viewModel.pageSize == 0) {
super.onBackPressed()
return
}
viewModel.popItem()
}
public override fun onStop() {
super.onStop()
EventBus.getDefault().unregister(this)
}
@SuppressLint("NeedOnRequestPermissionsResult")
override fun onRequestPermissionsResult(requestCode: Int, permissions: Array<String>, grantResults: IntArray) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults)
// NOTE: delegate the permission handling to generated function
onRequestPermissionsResult(requestCode, grantResults)
}
override fun onCreateOptionsMenu(menu: Menu): Boolean {
this.menu = menu
val inflater = menuInflater
inflater.inflate(R.menu.menu_main, menu)
return true
}
override fun onOptionsItemSelected(item: MenuItem): Boolean {
when (item.itemId) {
android.R.id.home -> {
binding.drawerLayout.openDrawer(GravityCompat.START)
return true
}
R.id.switch_layout -> {
val switchMenu = menu.getItem(0)
if (viewModel.layoutType == LAYOUT_LIST) {
switchMenu.icon = ContextCompat.getDrawable(this, R.drawable.ic_view_list_black_24dp)
setGridLayoutManager()
return true
}
switchMenu.icon = ContextCompat.getDrawable(this, R.drawable.ic_view_module_black_24dp)
setLinearLayoutManager()
return true
}
R.id.menu_sort_by_name -> {
viewModel.setData(FileUtils.getSortedListByName(viewModel.rootFiles))
return true
}
R.id.menu_sort_by_date -> {
viewModel.setData(FileUtils.getSortedListByDate(viewModel.rootFiles))
return true
}
R.id.menu_search -> {
startActivityForResult(SearchActivity.createIntent(this), REQUEST)
return true
}
else -> return true
}
}
override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent?) {
super.onActivityResult(requestCode, resultCode, data)
if (requestCode != REQUEST || resultCode != RESULT_OK || data == null || data.extras == null) {
Timber.w("Check onActivityResult")
return
}
val file = data.extras.getSerializable(SearchActivity.EXTRA_FILE) as File
viewModel.setFilesToList(file.absolutePath)
viewModel.setData(Arrays.asList(*file.listFiles()))
}
@Subscribe
fun onClickItemEvent(event: ClickItemEvent) {
if (event.file.isDirectory) {
viewModel.setFilesToList(event.file)
return
}
val apkURI = FileProvider.getUriForFile(this, applicationContext.packageName + ".provider", event.file)
viewModel.sendIntent(contentResolver, event.file, apkURI)
}
private fun initDrawer() {
binding.drawerLayout.addDrawerListener(object : DrawerLayout.DrawerListener {
override fun onDrawerSlide(drawerView: View, slideOffset: Float) {
//Timber.d("onDrawerSlide");
}
override fun onDrawerOpened(drawerView: View) {
Timber.d("onDrawerOpened")
}
override fun onDrawerClosed(drawerView: View) {
Timber.d("onDrawerClosed")
}
override fun onDrawerStateChanged(newState: Int) {
Timber.d("onDrawerStateChanged")
}
})
binding.navView.setNavigationItemSelectedListener { item ->
// set item as selected to persist highlight
item.isChecked = true
// close drawer when item is tapped
binding.drawerLayout.closeDrawers()
when (item.itemId) {
R.id.nav_download -> viewModel.setFilesToList(Environment.DIRECTORY_DOWNLOADS)
R.id.nav_picture -> viewModel.setFilesToList(Environment.DIRECTORY_PICTURES)
R.id.nav_audio -> viewModel.setFilesToList(Environment.DIRECTORY_MUSIC)
R.id.nav_video -> viewModel.setFilesToList(Environment.DIRECTORY_MOVIES)
}
true
}
}
@NeedsPermission(Manifest.permission.WRITE_EXTERNAL_STORAGE, Manifest.permission.READ_EXTERNAL_STORAGE)
fun enableAccessStorage() {
}
@OnPermissionDenied(Manifest.permission.WRITE_EXTERNAL_STORAGE, Manifest.permission.READ_EXTERNAL_STORAGE)
fun onStorageDenied() {
Toast.makeText(this, R.string.permission_storage_denied, Toast.LENGTH_SHORT).show()
}
@OnNeverAskAgain(Manifest.permission.WRITE_EXTERNAL_STORAGE, Manifest.permission.READ_EXTERNAL_STORAGE)
fun onStorageNeverAskAgain() {
Toast.makeText(this, R.string.permission_storage_never_ask_again, Toast.LENGTH_SHORT).show()
}
override fun setAdapter(adapter: MainLinearAdapter) {
binding.listView.adapter = adapter
}
override fun startActivity(intent: Intent) {
try {
super.startActivity(intent)
} catch (e: ActivityNotFoundException) {
showSnackBar("Couldn't show the preview for this file.")
}
}
override fun showSnackBar(message: String) {
val snackBar = Snackbar.make(binding.root, message, Snackbar.LENGTH_SHORT)
snackBar.show()
}
override fun showErrorDialog(@StringRes messageId: Int) {
MaterialDialog.Builder(this)
.title(messageId)
.positiveText(android.R.string.ok)
.negativeText(android.R.string.cancel)
.build()
}
private fun setLinearLayoutManager() {
viewModel.layoutType = LAYOUT_LIST
binding.listView.layoutManager = linearLayoutManager
binding.listView.adapter = viewModel.linearAdapter
binding.listView.addItemDecoration(dividerItemDecoration)
}
private fun setGridLayoutManager() {
viewModel.layoutType = LAYOUT_GRID
binding.listView.layoutManager = GridLayoutManager(this, 3)
binding.listView.adapter = viewModel.gridAdapter
binding.listView.removeItemDecoration(dividerItemDecoration)
}
}
|
/*
* Zed Attack Proxy (ZAP) and its related class files.
*
* ZAP is an HTTP/HTTPS proxy for assessing web application security.
*
* Copyright 2012 The ZAP Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.zaproxy.zap.utils;
import java.awt.event.ActionEvent;
import java.beans.PropertyChangeEvent;
import java.beans.PropertyChangeListener;
import javax.swing.AbstractAction;
import javax.swing.KeyStroke;
import javax.swing.text.Document;
import javax.swing.text.JTextComponent;
import javax.swing.undo.CannotRedoException;
import javax.swing.undo.CannotUndoException;
import javax.swing.undo.UndoManager;
import org.parosproxy.paros.Constant;
/**
* {@code ZapTextComponentUndoManager} manages a list of {@code UndoableEdit}s, providing a way to
* undo or redo the appropriate edits with undo and redo actions accessible through {@code
* KeyStroke}s created with {@code Constant.ACCELERATOR_UNDO} and {@code Constant.ACCELERATOR_REDO},
* respectively.
*
* <p>The default is to maintain a window of 100 undoable edits. When the limit is reached older
* undoable edits start to be discarded when new ones are saved. The limit can be changed with the
* method {@code setLimit(int)}.
*
* <p>There are three policies that affect if, and when, the undoable edits are saved:
*
* <ul>
* <li>{@code UndoManagerPolicy.DEFAULT}
* <li>{@code UndoManagerPolicy.ALWAYS_ENABLED}
* <li>{@code UndoManagerPolicy.ALWAYS_DISABLED}
* </ul>
*
* The policy can be changed with the method {@code setUndoManagerPolicy}.
*
* <p>The {@code ZapTextComponentUndoManager} listens to changes to the {@code Document} of the
* {@code JTextComponent} used with the {@code ZapTextComponentUndoManager}, so there is no need to
* do anything if the document is changed.
*
* @since 1.4.1
* @see #setLimit(int)
* @see #setUndoManagerPolicy(UndoManagerPolicy)
* @see UndoManagerPolicy
* @see UndoManager
*/
public class ZapTextComponentUndoManager extends UndoManager implements PropertyChangeListener {
/**
* There are three policies that affect if, and when, the undoable edits are saved:
*
* <ul>
* <li>{@code UndoManagerPolicy.DEFAULT}
* <li>{@code UndoManagerPolicy.ALWAYS_ENABLED}
* <li>{@code UndoManagerPolicy.ALWAYS_DISABLED}
* </ul>
*
* @see #DEFAULT
* @see #ALWAYS_ENABLED
* @see #ALWAYS_DISABLED
*/
public enum UndoManagerPolicy {
/**
* The undoable edits are saved exactly when the {@code JTextComponent} is enabled and
* editable.
*
* <p>The {@code ZapTextComponentUndoManager} listens to the changes in the properties
* "enabled" and "editable", so that can change accordingly to save or not the undoable
* edits.
*/
DEFAULT,
/**
* The undoable edits are always saved, even if the {@code JTextComponent} is not editable
* and/or enabled.
*/
ALWAYS_ENABLED,
/** The undoable edits are not saved. */
ALWAYS_DISABLED
}
private static final long serialVersionUID = -5728632360771625298L;
private final JTextComponent textComponent;
private final UndoAction undoAction;
private final RedoAction redoAction;
private boolean enabled;
private UndoManagerPolicy policy;
/**
* Creates a new {@code ZapTextComponentUndoManager} with a {@code DEFAULT} policy.
*
* @param textComponent the {@code JTextComponent} that will have undoable edits.
* @throws NullPointerException if textComponent is {@code null}.
* @see UndoManagerPolicy#DEFAULT
*/
public ZapTextComponentUndoManager(JTextComponent textComponent) {
super();
if (textComponent == null) {
throw new NullPointerException("The textComponent must not be null.");
}
this.textComponent = textComponent;
this.undoAction = new UndoAction(this);
this.redoAction = new RedoAction(this);
this.enabled = false;
this.policy = null;
setUndoManagerPolicy(UndoManagerPolicy.DEFAULT);
}
/**
* Sets the new policy.
*
* @param policy the new policy
* @throws NullPointerException if policy is {@code null}
* @see UndoManagerPolicy
*/
public final void setUndoManagerPolicy(UndoManagerPolicy policy) throws NullPointerException {
if (policy == null) {
throw new NullPointerException("The policy must not be null.");
}
if (this.policy == policy) {
return;
}
final UndoManagerPolicy oldPolicy = this.policy;
this.policy = policy;
if (oldPolicy == UndoManagerPolicy.DEFAULT) {
this.textComponent.removePropertyChangeListener("editable", this);
this.textComponent.removePropertyChangeListener("enabled", this);
}
if (this.policy == UndoManagerPolicy.DEFAULT) {
this.textComponent.addPropertyChangeListener("editable", this);
this.textComponent.addPropertyChangeListener("enabled", this);
}
handleUndoManagerPolicy();
}
@Override
public void propertyChange(PropertyChangeEvent evt) {
final String propertyName = evt.getPropertyName();
if ("document".equals(propertyName)) {
if (this.enabled) {
((Document) evt.getOldValue()).removeUndoableEditListener(this);
((Document) evt.getNewValue()).addUndoableEditListener(this);
if (policy == UndoManagerPolicy.DEFAULT) {
handleUndoManagerDefaultPolicy();
}
}
} else if (policy == UndoManagerPolicy.DEFAULT
&& ("editable".equals(propertyName) || "enabled".equals(propertyName))) {
handleUndoManagerDefaultPolicy();
}
}
private void setEnabled(boolean enabled) {
if (enabled != this.enabled) {
this.enabled = enabled;
if (enabled) {
this.textComponent.addPropertyChangeListener("document", this);
this.textComponent.getDocument().addUndoableEditListener(this);
this.textComponent.getActionMap().put(UndoAction.ACTION_NAME, undoAction);
this.textComponent.getActionMap().put(RedoAction.ACTION_NAME, redoAction);
this.textComponent.getInputMap().put(UndoAction.KEY_STROKE, UndoAction.ACTION_NAME);
this.textComponent.getInputMap().put(RedoAction.KEY_STROKE, RedoAction.ACTION_NAME);
} else {
this.textComponent.removePropertyChangeListener("document", this);
this.textComponent.getDocument().removeUndoableEditListener(this);
this.textComponent.getActionMap().remove(UndoAction.ACTION_NAME);
this.textComponent.getActionMap().remove(RedoAction.ACTION_NAME);
this.textComponent.getInputMap().remove(UndoAction.KEY_STROKE);
this.textComponent.getInputMap().remove(RedoAction.KEY_STROKE);
}
}
}
private void handleUndoManagerPolicy() {
switch (policy) {
case ALWAYS_DISABLED:
this.setEnabled(false);
break;
case ALWAYS_ENABLED:
this.setEnabled(true);
break;
case DEFAULT:
default:
handleUndoManagerDefaultPolicy();
}
}
private void handleUndoManagerDefaultPolicy() {
this.setEnabled(this.textComponent.isEditable() && this.textComponent.isEnabled());
}
private static final class UndoAction extends AbstractAction {
private static final long serialVersionUID = 6681683056944213164L;
public static final String ACTION_NAME = "Undo";
public static final KeyStroke KEY_STROKE =
KeyStroke.getKeyStroke(Constant.ACCELERATOR_UNDO);
private UndoManager undoManager;
public UndoAction(UndoManager undoManager) {
super(ACTION_NAME);
this.undoManager = undoManager;
}
@Override
public void actionPerformed(ActionEvent evt) {
try {
if (undoManager.canUndo()) {
undoManager.undo();
}
} catch (CannotUndoException e) {
}
}
}
private static final class RedoAction extends AbstractAction {
private static final long serialVersionUID = -7098526742716575130L;
public static final String ACTION_NAME = "Redo";
public static final KeyStroke KEY_STROKE =
KeyStroke.getKeyStroke(Constant.ACCELERATOR_REDO);
private UndoManager undoManager;
public RedoAction(UndoManager undoManager) {
super(ACTION_NAME);
this.undoManager = undoManager;
}
@Override
public void actionPerformed(ActionEvent evt) {
try {
if (undoManager.canRedo()) {
undoManager.redo();
}
} catch (CannotRedoException e) {
}
}
}
}
|
jOOλ - Java 8中的缺失部分
jOOλ改进了专家组关注其他地方的JDK库。它在顺序Streams周围添加了元组支持,功能支持和许多附加功能。
JDK 8的主要工作(默认方法,lambdas和Stream API)主要围绕保持向后兼容性和实现并行功能API。
jOOR - Java中的流利反思
jOOR是一个非常简单的流畅API,它以更直观的方式访问您的Java类结构。
JDK的反射API很难并且使用起来很冗长。其他语言具有更简单的构造,以在运行时访问类型元信息。让我们更好地进行Java反射。
jOOU - 无符号整数,Object Oriented Unsigned无符号的
女王提供无符号整数版本为四种Java的整数类型byte,short,int和long。
jOOX - 应用于W3C DOM的jQuery的强大功能
与JDBC一样,DOM是一个功能强大但非常详细的低级API来操作XML。
HTML DOM可以用JavaScript中的流行jQuery产品进行操作。为什么我们不用Java中的jQuery?jOOX是jQuery的XML部分,适用于Java。
jOOX 是 Java Object Oriented XML 的缩写(尽管我觉得这名字有点猥琐),是 org.w3c.dom 包的简单封装。用于 XML 文档的创建以及 DOM 的操作,其思路很像 jQuery。
[jOOL Github](https://github.com/jOOQ/jOOL)
[jOOR Github](https://github.com/jOOQ/jOOR)
[jOOU Github](https://github.com/jOOQ/jOOU)
[jOOX Github](https://github.com/jOOQ/jOOX)
[jOOX介绍](https://www.oschina.net/p/joox)
|
## DemoASM - Demos and Examples
### ASKPSWD.ASM
Extremely simple password prompt demo. It should not be used AS-IS. Since,
it uses the command line to provide the username an password. Does not
provide a hashing function. These are only a few of the reasons it is
not secure and should not be used in a production environment.
However, it does demonstrate several things that would be required
for a secure password prompt. Things like maximum incorrect password
tries, invalid password delays, inactivity time out, fixed time password
comparison, sensitive data wiping before exit and more.
|
use reqwest;
use client::voting_choose;
use lib::VoteOption;
use lib::BOARD_PORT;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let options: Vec<VoteOption> = reqwest::get(format!("http://localhost:{}/options", BOARD_PORT))
.await?
.json()
.await?;
voting_choose(&options)?;
Ok(())
}
|
#!/usr/bin/env perl
# Demian Riccardi 06/02/2014
# use kmeans clustering from Math::Vector::Real::kdTree to process the results from FTMap into a set of
# binding sites. Print out as XYZ of mercury atoms.
#
# ftmap.bu.edu
#
# the ftmap pdbs have the small molecule binders in between /HEADER crosscluster/ and a /REMARK/
# if you process the ftmap pdbs and lose these separations, you'll have to work up another solution
# to pulling all the FTMAP small molecule binders into a set of Math::Vector::Real.
#
# arguments:
# 1. Str to find pdbs to use
# 2. Num cutoff for separation between sites.
#
# The initial number of clusters is set at 20. This is decremented until all clusters
# are >= cutoff separation.
#
use Modern::Perl;
use HackaMol;
use FileHandle;
use Modern::Perl;
use Math::Vector::Real;
use Math::Vector::Real::kdTree;
use Math::Vector::Real::Neighbors;
use YAML::XS;
die "pass Str cutoff\n" unless @ARGV ;
my $regex = shift;
my $rcut = shift || 7.5;
my @files = glob("$regex*.pdb");
say foreach @files;
my @ftmap;
foreach my $fh (map{FileHandle->new("< $_")} @files){
while (<$fh>){
if (/HEADER crosscluster/../REMARK/ ){
if (/ATOM\s+\d+/){
my ($symbol, $xyz) = unpack "x13A1x16A24", $_;
push @ftmap, V(split(' ', $xyz) );
}
}
}
}
my $tree = Math::Vector::Real::kdTree->new(@ftmap);
my @means;
my @dist;
my $ki = 20;
while ($ki){
@means = $tree->k_means_start($ki);
@means = $tree->k_means_loop(@means);
my @ineigh = Math::Vector::Real::Neighbors->neighbors(@means);
@dist = map {$means[$_]->dist($means[$ineigh[$_]]) } 0 .. $#ineigh;
if (grep {$_ < $rcut} @dist){
$ki--;
next;
}
else {
last;
}
}
HackaMol::Molecule->new( atoms =>
[
map{ HackaMol::Atom->new(Z=>80, coords=>[$_])} @means,
]
)->print_xyz;
|
(in-package "ACL2")
; hifat-entry-count.lisp Mihir Mehta
; hifat-entry-count is related to the problem of transforming a potentially loopy
; FAT32 disk image into a tree in a bounded amount of time. Some lemmas for
; reasoning about it are placed here.
(include-book "hifat-equiv")
;; We're not counting this very directory, because the root does not have a
;; directory entry for itself.
;;
;; Before disabling, this rule used to cause 436909 frames and 8297 tries in
;; the main book; now those numbers are 4997 and 63 respectively.
(defund
hifat-entry-count (fs)
(declare (xargs :guard (and (m1-file-alist-p fs)
(hifat-no-dups-p fs))))
(if
(atom fs)
0
(+
(hifat-entry-count (cdr fs))
(if
(consp
(assoc-equal (mbe :logic (fat32-filename-fix (caar fs))
:exec (caar fs))
(mbe :logic (hifat-file-alist-fix (cdr fs))
:exec (cdr fs))))
0
(if
(m1-directory-file-p (mbe :logic (m1-file-fix (cdar fs))
:exec (cdar fs)))
(+ 1
(hifat-entry-count (m1-file->contents (cdar fs))))
1)))))
(defthm hifat-entry-count-of-hifat-file-alist-fix
(equal (hifat-entry-count (hifat-file-alist-fix fs))
(hifat-entry-count fs))
:hints (("Goal" :in-theory (enable hifat-entry-count)) ))
(defthm
m1-file-alist-p-of-remove1-assoc-equal
(implies (m1-file-alist-p m1-file-alist)
(m1-file-alist-p (remove1-assoc-equal key m1-file-alist))))
(defthmd
hifat-entry-count-when-hifat-no-dups-p
(implies
(and (m1-file-alist-p m1-file-alist)
(hifat-no-dups-p m1-file-alist)
(consp (assoc-equal x m1-file-alist)))
(equal
(hifat-entry-count m1-file-alist)
(+
(hifat-entry-count (remove1-assoc x m1-file-alist))
1
(if
(m1-directory-file-p (cdr (assoc-equal x m1-file-alist)))
(hifat-entry-count
(m1-file->contents (cdr (assoc-equal x m1-file-alist))))
0))))
:hints
(("goal"
:in-theory (enable hifat-entry-count hifat-no-dups-p))))
(defthm
hifat-entry-count-of-cdr-1
(implies (and (consp fs)
(m1-file-alist-p fs)
(hifat-no-dups-p fs))
(< (hifat-entry-count (cdr fs))
(hifat-entry-count fs)))
:rule-classes :linear
:hints
(("goal"
:in-theory (enable hifat-no-dups-p hifat-entry-count))))
(defthm
hifat-entry-count-of-cdr-2
(implies
(and (consp fs)
(hifat-no-dups-p fs)
(m1-file-alist-p fs)
(not (m1-regular-file-p (cdr (car fs)))))
(< (+ (hifat-entry-count (m1-file->contents (cdr (car fs))))
(hifat-entry-count (cdr fs)))
(hifat-entry-count fs)))
:rule-classes :linear
:hints (("goal" :in-theory (enable hifat-no-dups-p)
:expand (hifat-entry-count fs))))
(encapsulate
()
(local
(defun
induction-scheme
(m1-file-alist1 m1-file-alist2)
(declare
(xargs
:guard (and (m1-file-alist-p m1-file-alist1)
(m1-file-alist-p m1-file-alist2))
:hints (("goal" :in-theory (enable m1-file->contents
m1-directory-file-p)))))
(b* (((when (atom m1-file-alist1)) t)
((when (or (atom (car m1-file-alist1))
(not (stringp (car (car m1-file-alist1))))))
(and (member-equal (car m1-file-alist1)
m1-file-alist2)
(induction-scheme (cdr m1-file-alist1)
(remove1-assoc-equal
(caar m1-file-alist1)
m1-file-alist2))))
(name (caar m1-file-alist1))
(file1 (cdar m1-file-alist1))
((unless (consp (assoc-equal name m1-file-alist2)))
nil)
(file2 (cdr (assoc-equal name m1-file-alist2))))
(if (not (m1-directory-file-p file1))
(and (not (m1-directory-file-p file2))
(induction-scheme (cdr m1-file-alist1)
(remove1-assoc-equal
name
m1-file-alist2))
(equal (m1-file->contents file1)
(m1-file->contents file2)))
(and (m1-directory-file-p file2)
(induction-scheme (cdr m1-file-alist1)
(remove1-assoc-equal
name
m1-file-alist2))
(induction-scheme (m1-file->contents file1)
(m1-file->contents file2)))))))
(local
(defthm
induction-scheme-correctness
(implies (and (hifat-no-dups-p m1-file-alist1)
(m1-file-alist-p m1-file-alist1))
(iff (induction-scheme m1-file-alist1 m1-file-alist2)
(hifat-subsetp m1-file-alist1 m1-file-alist2)))
:hints (("goal" :induct (induction-scheme m1-file-alist1 m1-file-alist2)
:in-theory (enable hifat-no-dups-p)))))
(defthm
hifat-entry-count-when-hifat-subsetp
(implies (and (hifat-no-dups-p m1-file-alist1)
(m1-file-alist-p m1-file-alist1)
(hifat-no-dups-p m1-file-alist2)
(m1-file-alist-p m1-file-alist2)
(hifat-subsetp m1-file-alist1 m1-file-alist2))
(<= (hifat-entry-count m1-file-alist1)
(hifat-entry-count m1-file-alist2)))
:rule-classes :linear
:hints
(("goal" :induct (induction-scheme m1-file-alist1 m1-file-alist2)
:in-theory (enable hifat-no-dups-p hifat-entry-count))
("subgoal *1/7"
:use (:instance (:rewrite hifat-entry-count-when-hifat-no-dups-p)
(m1-file-alist m1-file-alist2)
(x (car (car m1-file-alist1)))))
("subgoal *1/4"
:use (:instance (:rewrite hifat-entry-count-when-hifat-no-dups-p)
(m1-file-alist m1-file-alist2)
(x (car (car m1-file-alist1))))))))
;; This rule is kinda problematic because it has caused an infinite rewrite at
;; least once in hifat-to-lofat-inversion-big-induction, which was only
;; resolved by disabling it. It would be nice to make this a plain congruence
;; rule - but that would require the m1-file-alist and hifat-no-dups-p
;; hypotheses to be removed, which in turn would require the definition of
;; hifat-entry-count to be changed.
(defthm
hifat-entry-count-when-hifat-equiv
(implies (hifat-equiv m1-file-alist1 m1-file-alist2)
(equal (hifat-entry-count m1-file-alist1)
(hifat-entry-count m1-file-alist2)))
:rule-classes :congruence
:hints
(("goal"
:in-theory (e/d (hifat-equiv)
(hifat-entry-count-when-hifat-subsetp))
:use
((:instance hifat-entry-count-when-hifat-subsetp
(m1-file-alist1 (hifat-file-alist-fix m1-file-alist2))
(m1-file-alist2 (hifat-file-alist-fix m1-file-alist1)))
(:instance hifat-entry-count-when-hifat-subsetp
(m1-file-alist1 (hifat-file-alist-fix m1-file-alist1))
(m1-file-alist2 (hifat-file-alist-fix m1-file-alist2)))))))
|
package admin
import (
adminPb "github.com/textileio/powergate/v2/api/gen/powergate/admin/v1"
"github.com/textileio/powergate/v2/ffs/manager"
"github.com/textileio/powergate/v2/ffs/scheduler"
"github.com/textileio/powergate/v2/wallet"
)
// Service implements the Admin API.
type Service struct {
adminPb.UnimplementedAdminServiceServer
m *manager.Manager
s *scheduler.Scheduler
wm wallet.Module
}
// New creates a new AdminService.
func New(m *manager.Manager, s *scheduler.Scheduler, wm wallet.Module) *Service {
return &Service{
m: m,
s: s,
wm: wm,
}
}
|
@file:OptIn(UnstableKatanApi::class)
package me.devnatan.katan.webserver
import io.ktor.http.*
import me.devnatan.katan.api.account.Account
import me.devnatan.katan.api.annotations.UnstableKatanApi
import me.devnatan.katan.api.plugin.Plugin
import me.devnatan.katan.api.role.Role
import me.devnatan.katan.api.security.permission.*
import me.devnatan.katan.api.server.Server
import me.devnatan.katan.api.server.ServerHolder
import me.devnatan.katan.api.server.get
private fun mapPermissions(entity: PermissionsHolder, permissionManager: PermissionManager): List<Map<String, Any?>> {
return permissionManager.getRegisteredPermissionKeys().filter { key ->
when (entity) {
is Account -> key.isTypeOf(PermissionKeyType.ACCOUNT)
is Role -> key.isTypeOf(PermissionKeyType.ROLE)
is ServerHolder -> key.isTypeOf(PermissionKeyType.SERVER_HOLDER)
else -> true
}
}.sortedBy { key -> key.code }.map { key ->
entity.getPermission(key)?.let { permission ->
mapOf(
"key" to key,
"value" to permission.value.code,
"given_at" to permission.givenAt.toHttpDateString(),
"last_modified" to permission.lastModified.toHttpDateString(),
"inherited_from" to if (permission is InheritedPermission) permission.inheritedFrom else null
)
} ?: mapOf("key" to key, "value" to PermissionFlag.NOT_ALLOWED.code)
}
}
@OptIn(UnstableKatanApi::class)
fun Server.serialize(): Map<String, Any?> = mapOf(
"id" to id,
"name" to name,
"state" to state,
"game" to mapOf(
"name" to game.type.name,
"version" to game.version
),
"host" to host,
"port" to port,
"compositions" to compositions.map { it.factory[it.key] },
"container" to mapOf(
"id" to container.id,
"is_inspected" to container.isInspected(),
"inspection" to container.inspection
)
)
fun Account.serialize(
permissionManager: PermissionManager
): Map<String, Any?> = mapOf(
"id" to id,
"username" to username,
"registered_at" to registeredAt.toHttpDateString(),
"last_login" to lastLogin?.toHttpDateString(),
"role" to role?.let { role ->
mapOf(
"id" to role.id,
"name" to role.name,
"created_at" to role.createdAt.toHttpDateString(),
"permissions" to mapPermissions(role, permissionManager)
)
},
"permissions" to mapPermissions(this, permissionManager)
)
fun Plugin.serialize(): Map<String, Any?> = mapOf(
"name" to descriptor.name,
"version" to descriptor.version,
"author" to descriptor.author,
"state" to state.order
)
|
# coding: UTF-8
require 'spec_helper'
describe file('/etc/zookeeper') do
it { should be_directory }
it { should be_linked_to '/opt/confluent/confluent-2.0.1/etc/kafka' }
end
describe file('/opt/confluent/confluent-2.0.1/bin/zookeeper-server-stop') do
it { should be_file }
it { should be_owned_by 'confluent' }
it { should be_grouped_into 'confluent' }
it { should be_mode 755 }
end
describe file('/etc/init.d/zookeeper') do
it { should be_file }
it { should be_owned_by 'confluent' }
it { should be_grouped_into 'confluent' }
it { should be_mode 755 }
end
describe service('zookeeper') do
it { should be_running }
end
# Kafka Connect API
describe port(8083) do
it { should be_listening }
end
|
# Spherical harmonic I/O, storage, and conversions
# Spherical harmonic I/O
# SHRead
# SHRead2
# SHReadJPL
# Spherical harmonic storage
export SHCilmToCindex!
"""
cindex = SHCilmToCindex!(cindex::AbstractArray{Cdouble,2},
cilm::AbstractArray{Cdouble,3};
degmax::Optional{Cint}=nothing,
exitstatus::Optional{Ref{<:Integer}}=nothing)
cindex::AbstractArray{Cdouble,2}
Convert a three-dimensional array of spherical harmonic coefficients
to a two-dimensional indexed array.
See also: [`SHCilmToCindex`](@ref), [`SHCindexToCilm!`](@ref),
[`SHCilmToVector!`](@ref)
"""
function SHCilmToCindex!(cindex::AbstractArray{Cdouble,2},
cilm::AbstractArray{Cdouble,3};
degmax::Optional{Cint}=nothing,
exitstatus::Optional{Ref{<:Integer}}=nothing)
@assert size(cilm, 1) == 2
lmaxin = size(cilm, 2) - 1
@assert lmaxin ≥ 0
@assert size(cilm, 3) == size(cilm, 2)
degmax′ = optional(degmax, lmaxin)
@assert size(cindex, 1) == 2
@assert size(cindex, 2) ≥ (degmax′ + 1) * (degmax′ + 2) ÷ 2
exitstatus′ = Ref{Cint}()
ccall((:SHCilmToCindex, libSHTOOLS), Cvoid,
(Ptr{Cdouble}, Cint, Ptr{Cdouble}, Cint, Ref{Cint}, Ref{Cint}), cilm,
size(cilm, 2), cindex, size(cindex, 2), degmax′, exitstatus′)
if exitstatus === nothing
exitstatus′[] ≠ 0 &&
error("SHCilmToCindex!: Error code $(exitstatus′[])")
else
exitstatus[] = exitstatus′[]
end
return cindex
end
export SHCilmToCindex
"""
cindex = SHCilmToCindex(cilm::AbstractArray{Cdouble,3};
degmax::Optional{Cint}=nothing,
exitstatus::Optional{Ref{<:Integer}}=nothing)
cindex::Array{Cdouble,2}
Convert a three-dimensional array of spherical harmonic coefficients
to a two-dimensional indexed array.
See also: [`SHCilmToCindex!`](@ref), [`SHCindexToCilm`](@ref),
[`SHCilmToVector`](@ref)
"""
function SHCilmToCindex(cilm::AbstractArray{Cdouble,3};
degmax::Optional{Cint}=nothing,
exitstatus::Optional{Ref{<:Integer}}=nothing)
@assert size(cilm, 1) == 2
lmaxin = size(cilm, 2) - 1
@assert lmaxin ≥ 0
@assert size(cilm, 3) == size(cilm, 2)
degmax′ = optional(degmax, lmaxin)
cindex = Array{Cdouble}(undef, 2, (degmax′ + 1) * (degmax′ + 2) ÷ 2)
SHCilmToCindex!(cindex, cilm; degmax=degmax, exitstatus=exitstatus)
return cindex
end
export SHCindexToCilm!
"""
cilm = SHCindexToCilm!(cilm::AbstractArray{Cdouble,3},
cindex::AbstractArray{Cdouble,2};
degmax::Optional{Cint}=nothing,
exitstatus::Optional{Ref{<:Integer}}=nothing)
cilm::AbstractArray{Cdouble,3},
Convert a two-dimensional indexed array of spherical harmonic
coefficients to a three-dimensional array.
See also: [`SHCindexToCilm`](@ref), [`SHCilmToCindex!`](@ref),
[`SHVectorToCilm!`](@ref)
"""
function SHCindexToCilm!(cilm::AbstractArray{Cdouble,3},
cindex::AbstractArray{Cdouble,2};
degmax::Optional{Cint}=nothing,
exitstatus::Optional{Ref{<:Integer}}=nothing)
@assert size(cindex, 1) == 2
# (lmaxin + 1) * (lmaxin + 2) ÷ 2 = size(cindex, 2)
lmaxin = round(Int, (sqrt(8 * size(cindex, 2) + 1) - 3) / 2)
@assert size(cindex, 2) == (lmaxin + 1) * (lmaxin + 2) ÷ 2
degmax′ = optional(degmax, lmaxin)
@assert size(cindex, 2) ≥ degmax′
@assert size(cilm, 1) == 2
@assert size(cilm, 2) ≥ degmax′ + 1
@assert size(cilm, 3) == size(cilm, 2)
exitstatus′ = Ref{Cint}()
ccall((:SHCindexToCilm, libSHTOOLS), Cvoid,
(Ptr{Cdouble}, Cint, Ptr{Cdouble}, Cint, Ref{Cint}, Ref{Cint}),
cindex, size(cindex, 2), cilm, size(cilm, 2), degmax′, exitstatus′)
if exitstatus === nothing
exitstatus′[] ≠ 0 &&
error("SHCindexToCilm!: Error code $(exitstatus′[])")
else
exitstatus[] = exitstatus′[]
end
return cilm
end
export SHCindexToCilm
"""
cilm = SHCindexToCilm(cindex::AbstractArray{Cdouble,2};
degmax::Optional{Cint}=nothing,
exitstatus::Optional{Ref{<:Integer}}=nothing)
cilm::AbstractArray{Cdouble,3},
Convert a two-dimensional indexed array of spherical harmonic
coefficients to a three-dimensional array.
See also: [`SHCindexToCilm!`](@ref), [`SHCilmToCindex`](@ref),
[`SHVectorToCilm`](@ref)
"""
function SHCindexToCilm(cindex::AbstractArray{Cdouble,2},
degmax::Optional{Cint}=nothing,
exitstatus::Optional{Ref{<:Integer}}=nothing)
@assert size(cindex, 1) == 2
# (lmaxin + 1) * (lmaxin + 2) ÷ 2 = size(cindex, 2)
lmaxin = round(Int, (sqrt(8 * size(cindex, 2) + 1) - 3) / 2)
@assert size(cindex, 2) == (lmaxin + 1) * (lmaxin + 2) ÷ 2
degmax′ = optional(degmax, lmaxin)
@assert size(cindex, 2) ≥ degmax′
cilm = Array{Cdouble}(undef, 2, degmax′ + 1, degmax′ + 1)
SHCindexToCilm!(cilm, cindex; degmax=degmax, exitstatus=exitstatus)
return cilm
end
export SHCilmToVector!
"""
SHCilmToVector!(vector::AbstractVector{Cdouble},
cilm::AbstractArray{Cdouble,3},
lmax::Integer;
exitstatus::Union{Nothing,Ref{<:Integer}}=nothing)
Convert a three-dimensional array of real spherical harmonic
coefficients to a 1-dimensional indexed vector.
See also: [`SHCilmToVector`](@ref)
"""
function SHCilmToVector!(vector::AbstractVector{Cdouble},
cilm::AbstractArray{Cdouble,3}, lmax::Integer;
exitstatus::Optional{Ref{<:Integer}}=nothing)
@assert lmax ≥ 0
@assert length(vector) ≥ (lmax + 1)^2
@assert size(cilm, 1) == 2
@assert size(cilm, 2) ≥ lmax + 1
@assert size(cilm, 3) == size(cilm, 2)
exitstatus′ = Ref{Cint}()
ccall((:SHCilmToVector, libSHTOOLS), Cvoid,
(Ptr{Cdouble}, Cint, Ptr{Cdouble}, Cint, Ref{Cint}), cilm,
size(cilm, 2), vector, lmax, exitstatus′)
if exitstatus === nothing
exitstatus′[] ≠ 0 &&
error("SHCilmToVector!: Error code $(exitstatus′[])")
else
exitstatus[] = exitstatus′[]
end
return vector
end
export SHCilmToVector
"""
vector = SHCilmToVector(cilm::AbstractArray{Cdouble,3},
lmax::Integer;
exitstatus::Union{Nothing,Ref{<:Integer}}=nothing)
vector::Vector{Cdouble}
Convert a three-dimensional array of real spherical harmonic
coefficients to a 1-dimensional indexed array.
See also: [`SHCilmToVector!`](@ref)
"""
function SHCilmToVector(cilm::AbstractArray{Cdouble,3}, lmax::Integer;
exitstatus::Optional{Ref{<:Integer}}=nothing)
@assert lmax ≥ 0
vector = Array{Cdouble}(undef, (lmax + 1)^2)
SHCilmToVector!(vector, cilm, lmax; exitstatus=exitstatus)
return vector
end
export SHVectorToCilm!
"""
SHVectorToCilm!(cilm::AbstractArray{Cdouble,3},
vector::AbstractVector{Cdouble},
lmax::Integer;
exitstatus::Union{Nothing,Ref{<:Integer}}=nothing)
Convert a 1-dimensional indexed vector of real spherical harmonic
coefficients to a 3-dimensional array.
See also: [`SHVectorToCilm`](@ref)
"""
function SHVectorToCilm!(cilm::AbstractArray{Cdouble,3},
vector::AbstractVector{Cdouble}, lmax::Integer;
exitstatus::Optional{Ref{<:Integer}}=nothing)
@assert lmax ≥ 0
@assert size(cilm, 1) == 2
@assert size(cilm, 2) ≥ lmax + 1
@assert size(cilm, 3) == size(cilm, 2)
@assert length(vector) ≥ (lmax + 1)^2
exitstatus′ = Ref{Cint}()
ccall((:SHVectorToCilm, libSHTOOLS), Cvoid,
(Ptr{Cdouble}, Ptr{Cdouble}, Cint, Cint, Ref{Cint}), vector, cilm,
size(cilm, 2), lmax, exitstatus′)
if exitstatus === nothing
exitstatus′[] ≠ 0 &&
error("SHVectorToCilm!: Error code $(exitstatus′[])")
else
exitstatus[] = exitstatus′[]
end
return cilm
end
export SHVectorToCilm
"""
cilm = SHVectorToCilm(cilm::AbstractArray{Cdouble,3},
vector::AbstractVector{Cdouble},
lmax::Integer;
exitstatus::Union{Nothing,Ref{<:Integer}}=nothing)
cilm::Array{Cdouble,3}
Convert a 1-dimensional indexed vector of real spherical harmonic
coefficients to a 3-dimensional array.
See also: [`SHVectorToCilm!`](@ref)
"""
function SHVectorToCilm(vector::AbstractVector{Cdouble}, lmax::Integer;
exitstatus::Optional{Ref{<:Integer}}=nothing)
@assert lmax ≥ 0
cilm = Array{Cdouble}(undef, 2, lmax + 1, lmax + 1)
@assert length(vector) ≥ (lmax + 1)^2
exitstatus′ = Ref{Cint}()
SHVectorToCilm!(cilm, vector, lmax; exitstatus=exitstatus)
return cilm
end
export YlmIndexVector
"""
index = YlmIndexVector(i, l, m)
Determine the index of a 1D ordered vector of spherical harmonic
coefficients corresponding to `i`, `l`, and `m`.
See also: [`SHCilmToVector!`](@ref), [`SHCilmToVector`](@ref),
[`SHVectorToCilm!`](@ref), [`SHVectorToCilm`](@ref)
"""
function YlmIndexVector(i::Integer, l::Integer, m::Integer)
@assert 1 ≤ i ≤ 2
@assert 0 ≤ l
@assert 0 ≤ m ≤ l
return l^2 + (i - 1) * l + m + 1
end
|
# frozen_string_literal: true
module Kanrisuru
module Core
module IP
def ip_route(action, opts)
case action
when 'show', 'list'
version = ip_version.to_i
command = ip_route_show(opts, version)
when 'flush'
command = ip_route_flush(opts)
when 'add', 'change', 'append', 'del', 'delete', 'replace'
command = ip_route_modify(action, opts)
when 'get'
command = ip_route_get(opts)
end
execute_shell(command)
Kanrisuru::Result.new(command) do |cmd|
Parser::Route.parse(cmd, action, version)
end
end
def ip_route_show(opts, version)
command = Kanrisuru::Command.new('ip')
command.append_flag('-json') if version >= IPROUTE2_JSON_VERSION
command.append_arg('-family', opts[:family])
command << 'route show'
ip_route_common_opts(command, opts)
command
end
def ip_route_modify(action, opts)
command = Kanrisuru::Command.new('ip route')
command << action
command.append_arg('to', opts[:to])
command.append_arg('tos', opts[:tos])
command.append_arg('dsfield', opts[:dsfield])
command.append_arg('metric', opts[:metric])
command.append_arg('preference', opts[:preference])
command.append_arg('table', opts[:table])
command.append_arg('vrf', opts[:vrf])
command.append_arg('dev', opts[:dev])
command.append_arg('via', opts[:via])
command.append_arg('src', opts[:src])
command.append_arg('realm', opts[:realm])
if Kanrisuru::Util.present?(opts[:mtu])
if Kanrisuru::Util.present?(opts[:mtu_lock])
command.append_arg('mtu lock', opts[:mtu])
else
command.append_arg('mtu', opts[:mtu])
end
end
command.append_arg('window', opts[:window])
command.append_arg('rtt', opts[:rtt])
command.append_arg('rttvar', opts[:rttvar])
command.append_arg('rto_min', opts[:rto_min])
command.append_arg('ssthresh', opts[:ssthresh])
command.append_arg('cwnd', opts[:cwnd])
command.append_arg('initcwnd', opts[:initcwnd])
command.append_arg('initrwnd', opts[:initrwnd])
command.append_arg('features', opts[:features])
command.append_arg('quickack', opts[:quickack])
command.append_arg('fastopen_no_cookie', opts[:fastopen_no_cookie])
if Kanrisuru::Util.present?(opts[:congctl])
if Kanrisuru::Util.present?(opts[:congctl_lock])
command.append_arg('congctl lock', opts[:congctl])
else
command.append_arg('congctl', opts[:congctl])
end
end
command.append_arg('advmss', opts[:advmss])
command.append_arg('reordering', opts[:reordering])
if Kanrisuru::Util.present?(opts[:next_hop])
next_hop = opts[:next_hop]
command << 'next_hop'
command.append_arg('via', next_hop[:via])
command.append_arg('dev', next_hop[:dev])
command.append_arg('weight', next_hop[:weight])
end
command.append_arg('scope', opts[:scope])
command.append_arg('protocol', opts[:protocol])
command.append_flag('onlink', opts[:onlink])
command.append_arg('pref', opts[:pref])
command.append_arg('nhid', opts[:nhid])
command
end
def ip_route_flush(opts)
command = Kanrisuru::Command.new('ip')
command.append_arg('-family', opts[:family])
command << 'route flush'
ip_route_common_opts(command, opts)
command
end
def ip_route_get(opts)
command = Kanrisuru::Command.new('ip route get')
command.append_flag('fibmatch', opts[:fibmatch])
command.append_arg('to', opts[:to])
command.append_arg('from', opts[:from])
command.append_arg('tos', opts[:tos])
command.append_arg('dsfield', opts[:dsfield])
command.append_arg('iif', opts[:iif])
command.append_arg('oif', opts[:oif])
command.append_arg('mark', opts[:mark])
command.append_arg('vrf', opts[:vrf])
command.append_arg('ipproto', opts[:ipproto])
command.append_arg('sport', opts[:sport])
command.append_arg('dport', opts[:dport])
command.append_flag('connected', opts[:connected])
command
end
def ip_route_common_opts(command, opts)
command.append_arg('to', opts[:to])
command.append_arg('dev', opts[:dev])
command.append_arg('protocol', opts[:protocol])
command.append_arg('type', opts[:type])
command.append_arg('table', opts[:table])
command.append_arg('tos', opts[:tos])
command.append_arg('dsfield', opts[:dsfield])
command.append_arg('via', opts[:via])
command.append_arg('vrf', opts[:vrf])
command.append_arg('src', opts[:src])
command.append_arg('realm', opts[:realm])
command.append_arg('realms', opts[:realms])
command.append_arg('scope', opts[:scope])
command.append_flag('cloned', opts[:cloned])
command.append_flag('cached', opts[:cached])
end
end
end
end
|
package com.redhat.vertx.pipeline;
public interface EventBusMessage {
String DOCUMENT_STARTED = "documentStarted";
String DOCUMENT_COMPLETED = "documentCompleted";
String DOCUMENT_CHANGED = "documentChanged";
String CHANGE_REQUEST = "changeRequest";
String SECTION_STARTED = "sectionStarted";
String SECTION_COMPLETED = "sectionCompleted";
String SECTION_ERRORED = "sectionErrored";
}
|
import { CommandParser } from "../lib";
let parser = new CommandParser({ prefix: "~" });
let str = "~hello world";
let { success, command, args } = parser.parse(str);
console.log(success); // true
console.log(command); // "hello"
console.log(args); // ["world"]
|
package com.mahendran_sakkarai.camera.utils;
import android.content.Context;
import android.content.pm.PackageManager;
import android.hardware.Camera;
/**
* Created by Mahendran Sakkarai on 1/5/2017.
*/
public class CameraUtil {
/** Check if this device has a camera */
public static boolean checkCameraHardware(Context context) {
if (context.getPackageManager().hasSystemFeature(PackageManager.FEATURE_CAMERA)){
// this device has a camera
return true;
} else {
// no camera on this device
return false;
}
}
public static Camera getCameraInstance() {
Camera c = null;
try {
c = Camera.open();
} catch (Exception e) {
e.printStackTrace();
}
return c;
}
}
|
// Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.IO;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Configuration.Json;
namespace Microsoft.Extensions.Logging.Test
{
internal class TestConfiguration : JsonConfigurationProvider
{
private Func<string> _json;
public TestConfiguration(JsonConfigurationSource source, Func<string> json)
: base(source)
{
_json = json;
}
public override void Load()
{
var stream = new MemoryStream();
var writer = new StreamWriter(stream);
writer.Write(_json());
writer.Flush();
stream.Seek(0, SeekOrigin.Begin);
Load(stream);
}
public static ConfigurationRoot Create(Func<string> getJson)
{
var provider = new TestConfiguration(new JsonConfigurationSource { Optional = true }, getJson);
return new ConfigurationRoot(new List<IConfigurationProvider> { provider });
}
}
}
|
class TenantLogin {
TenantLogin({
required this.status,
required this.data,
});
late final String status;
late final Data data;
TenantLogin.fromJson(Map<String, dynamic> json) {
status = json['status'];
data = Data.fromJson(json['data']);
}
Map<String, dynamic> toJson() {
final _data = <String, dynamic>{};
_data['status'] = status;
_data['data'] = data.toJson();
return _data;
}
}
class Data {
Data({
required this.id,
required this.address,
required this.fcm,
required this.phone,
});
late final String id;
late final Address1 address;
late final String fcm;
late final int phone;
Data.fromJson(Map<String, dynamic> json) {
id = json['id'];
address = Address1.fromJson(json['address']);
fcm = json['fcm'];
phone = json['phone'];
}
Map<String, dynamic> toJson() {
final _data = <String, dynamic>{};
_data['id'] = id;
_data['address'] = address.toJson();
_data['fcm'] = fcm;
_data['phone'] = phone;
return _data;
}
}
class Address1 {
Address1({
required this.co,
required this.country,
required this.dist,
required this.house,
required this.lm,
required this.loc,
required this.pc,
required this.state,
required this.vtc,
required this.street,
});
late final String co;
late final String country;
late final String dist;
late final String house;
late final lm;
late final String loc;
late final String pc;
late final String state;
late final String vtc;
late final street;
Address1.fromJson(Map<String, dynamic> json) {
co = json['co'];
country = json['country'];
dist = json['dist'];
house = json['house'];
lm = json['lm'];
loc = json['loc'];
pc = json['pc'];
state = json['state'];
vtc = json['vtc'];
street = json['street'];
}
Map<String, dynamic> toJson() {
final _data = <String, dynamic>{};
_data['co'] = co;
_data['country'] = country;
_data['dist'] = dist;
_data['house'] = house;
_data['lm'] = lm;
_data['loc'] = loc;
_data['pc'] = pc;
_data['state'] = state;
_data['vtc'] = vtc;
_data['street'] = street;
return _data;
}
}
|
// Copyright (c) Facebook, Inc. and its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use anyhow::{anyhow, bail, Context, Error, Result};
use bytes::Bytes;
/// This file defines a minimalistic compressor and decompressor interface
/// optimized for below's usage. They are wrappers around general compression
/// libraries. Currently only zstd is supported.
// TODO: Use latest zstd as implementation
// TODO: Consider using experimental feature to load dict by reference
fn code_to_err(code: zstd_safe::ErrorCode) -> Error {
anyhow!(zstd_safe::get_error_name(code))
}
pub struct Compressor {
cctx: zstd_safe::CCtx<'static>,
dict_loaded: bool,
}
impl Compressor {
pub fn new() -> Self {
Self {
cctx: zstd_safe::CCtx::create(),
dict_loaded: false,
}
}
/// Resets the dict loaded.
fn reset_dict(&mut self) -> Result<()> {
if self.dict_loaded {
self.cctx
.load_dictionary(&[])
.map_err(code_to_err)
.context("Failed to load empty dictionary")?;
self.dict_loaded = false;
}
Ok(())
}
/// Loads the given dict.
pub fn load_dict(&mut self, dict: &[u8]) -> Result<()> {
self.cctx
.load_dictionary(dict)
.map_err(code_to_err)
.context("Failed to load dictionary")?;
self.dict_loaded = true;
Ok(())
}
/// Compresses the given frame using the previously loaded dict, if any.
pub fn compress_with_loaded_dict(&mut self, frame: &[u8]) -> Result<Bytes> {
let mut buf = Vec::with_capacity(zstd_safe::compress_bound(frame.len()));
self.cctx
.compress2(&mut buf, frame)
.map_err(code_to_err)
.context("zstd compress2 failed")?;
Ok(buf.into())
}
/// Compresses the given frame after resetting dict.
pub fn compress_with_dict_reset(&mut self, frame: &[u8]) -> Result<Bytes> {
self.reset_dict().context("Failed to reload dict")?;
self.compress_with_loaded_dict(frame)
.context("Failed to compress without dict")
}
}
pub struct Decompressor<K> {
dctx: zstd_safe::DCtx<'static>,
dict: Bytes,
dict_key: Option<K>,
}
impl<K> Decompressor<K> {
pub fn new() -> Self {
Self {
dctx: zstd_safe::DCtx::create(),
dict: Bytes::new(),
dict_key: None,
}
}
/// Gets the dict which is also the decompressed key frame.
pub fn get_dict(&self) -> &Bytes {
&self.dict
}
/// Gets the key associated with the loaded dict.
pub fn get_dict_key(&self) -> Option<&K> {
self.dict_key.as_ref()
}
/// Resets the dict loaded to dctx.
fn reset_dict(&mut self) -> Result<()> {
if !self.dict.is_empty() {
self.dctx
.load_dictionary(&[])
.map_err(code_to_err)
.context("Failed to load empty dictionary")?;
self.dict = Bytes::new();
self.dict_key = None;
}
Ok(())
}
/// Loads the given dict and associates it with the given key, whose meaning
/// is user-defined. Only frames with a matching key should be decompressed
/// with this dict.
pub fn load_dict(&mut self, dict: Bytes, key: K) -> Result<()> {
self.dctx
.load_dictionary(&dict)
.map_err(code_to_err)
.context("Failed to load zstd dictionary by reference")?;
self.dict = dict;
self.dict_key = Some(key);
Ok(())
}
/// Decompresses the given frame using the previously loaded dict, if any.
pub fn decompress_with_loaded_dict(&mut self, frame: &[u8]) -> Result<Bytes> {
let capacity = match zstd_safe::get_frame_content_size(frame) {
zstd_safe::CONTENTSIZE_ERROR => bail!("Error getting frame content size"),
// Decompressed size should only be unknown when using streaming
// mode, which we should never use
zstd_safe::CONTENTSIZE_UNKNOWN => bail!("Unknown decompressed size"),
capacity => capacity as usize,
};
let mut buf = Vec::with_capacity(capacity);
self.dctx
.decompress(&mut buf, frame)
.map_err(code_to_err)
.context("zstd decompress failed")?;
Ok(buf.into())
}
/// Decompresses the given frame after resetting dict.
pub fn decompress_with_dict_reset(&mut self, frame: &[u8]) -> Result<Bytes> {
self.reset_dict().context("Failed to reload dict")?;
self.decompress_with_loaded_dict(frame)
.context("Failed to decompress without dict")
}
}
#[cfg(test)]
mod test {
use super::*;
fn gen_data(n: usize) -> Vec<u8> {
use std::hash::Hasher;
let mut data = Vec::with_capacity(n);
let mut hasher = std::collections::hash_map::DefaultHasher::new();
hasher.write_u64(0xfaceb00c);
while data.len() < n {
let val = hasher.finish();
data.extend(val.to_be_bytes());
hasher.write_u64(val);
}
data
}
#[test]
fn compressor_decompressor() {
let mut c = Compressor::new();
let mut d = Decompressor::new();
let data: Bytes = gen_data(128).into();
let comp_default = c
.compress_with_loaded_dict(&data)
.expect("Failed to compress");
c.load_dict(&data).expect("Fail to load dict");
let comp_with_dict = c
.compress_with_loaded_dict(&data)
.expect("Failed to compress");
let comp_dict_reset = c
.compress_with_dict_reset(&data)
.expect("Failed to compress");
// Using self as dict should get much smaller result than without dict
assert!(comp_with_dict.len() < comp_default.len());
// Compress with dict reset should be the same as with default dict
assert_eq!(comp_dict_reset, comp_default);
let decomp_default = d
.decompress_with_loaded_dict(&comp_default)
.expect("Failed to decompress");
d.load_dict(data.clone(), ()).expect("Failed to load dict");
let decomp_with_dict = d
.decompress_with_loaded_dict(&comp_with_dict)
.expect("Failed to decompress");
let decomp_dict_reset = d
.decompress_with_dict_reset(&comp_dict_reset)
.expect("Failed to decompress");
// All should be decompressed back to original data
assert_eq!(decomp_default, data);
assert_eq!(decomp_with_dict, data);
assert_eq!(decomp_dict_reset, data);
}
#[test]
fn compatibility() {
let data: Bytes = gen_data(128).into();
// Compressor => zstd decompress
{
let comp = Compressor::new()
.compress_with_dict_reset(&data)
.expect("Failed to compress");
let decomp = zstd::stream::decode_all(&*comp).expect("Failed to decompress");
assert_eq!(decomp, data);
}
// zstd compress => Decompressor
{
let comp = zstd::block::compress(&data, 0).expect("Failed to compress");
let decomp = Decompressor::<()>::new()
.decompress_with_dict_reset(&comp)
.expect("Failed to decompress");
assert_eq!(decomp, data);
}
}
}
|
package com.sama.slotsuggestion.domain
import org.assertj.core.api.Assertions.assertThat
import org.junit.jupiter.api.Test
import java.time.ZonedDateTime
internal class BlockTest {
@Test
fun testZeroDuration() {
val now = ZonedDateTime.now()
assertThat(Block(now, now, false, false, 0).zeroDuration())
.isTrue()
assertThat(Block(now, now.plusMinutes(1), false, false, 0).zeroDuration())
.isFalse()
}
}
|
package step
func ModVendor() error {
return runCommand(
"failed to vendor project dependencies: %v",
createCommand("go", "mod", "vendor"),
)()
}
|
const GENERATE_CODE = 'GENERATE_CODE'
const START_PROJECT = 'START_PROJECT'
const KILL_PROJECT = 'KILL_PROJECT'
const CHECK_SERVERS_STARTED = 'CHECK_SERVERS_STARTED'
module.exports = {
GENERATE_CODE,
START_PROJECT,
KILL_PROJECT,
CHECK_SERVERS_STARTED,
}
|
import { PassportStrategy } from '@nestjs/passport'
import { Injectable } from '@nestjs/common'
import { ConfigService } from '@nestjs/config'
import { Strategy, ExtractJwt } from 'passport-jwt'
import { User } from '../../entities/example/user.entity'
import { ValidateToken } from '../utils/validateToken.service'
import { Roles } from '../enums/roles.enum'
interface JwtPayload {
id: number,
name: string,
email: string,
roles: Roles[],
iat?: Date
}
export function getToken(user: User) {
const payload: JwtPayload = {
id: user.id,
name: user.name,
email: user.email,
roles: user.userRoles.map(userRole => userRole.role.name as Roles),
}
return payload
}
@Injectable()
export class JwtStrategy extends PassportStrategy(Strategy, 'jwt') {
constructor(
private readonly validateToken: ValidateToken,
private readonly configService: ConfigService
){
super({
jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(),
secretOrKey: configService.get('JWT_KEY')
})
}
async validate(payload: JwtPayload): Promise<JwtPayload> {
await this.validateToken.validateToken(payload)
return payload
}
}
|
using System;
using System.Threading.Tasks;
using TensorFlowMLNETInceptionv3ModelScoring.Model;
namespace TensorFlowMLNETInceptionv3ModelScoring
{
public class Program
{
static async Task Main(string[] args)
{
try
{
var modelBuilder = new ModelTrainer(
ModelHelpers.GetAssetsPath("data", "tags.tsv"),
ModelHelpers.GetAssetsPath("images"),
ModelHelpers.GetAssetsPath("model", "tensorflow_inception_graph.pb"),
ModelHelpers.GetAssetsPath("model", "imageClassifier.zip"));
await modelBuilder.BuildAndTrain();
var modelEvaluator = new ModelEvaluator(
ModelHelpers.GetAssetsPath("data", "tags.tsv"),
ModelHelpers.GetAssetsPath("images"),
ModelHelpers.GetAssetsPath("model", "imageClassifier.zip"));
await modelEvaluator.Evaluate();
} catch (Exception ex)
{
}
}
}
}
|
using System;
using System.IO;
namespace MonoBrickFirmware.Native
{
internal class AOTHelper
{
public static bool IsFileCompiled (string fileName)
{
return File.Exists(fileName + ".so");
}
public static bool Compile(string fileName){
if (IsFileCompiled(fileName))
File.Delete(new FileInfo(fileName).Name + ".so");
ProcessHelper.RunAndWaitForProcessWithOutput("/usr/local/bin/mono", "--aot=full " + fileName);
return IsFileCompiled(fileName);
}
}
}
|
/**
* @fileOverview The Drone model.
*/
module.exports = function(data) {
this.id = data.id;
this.col = data.col;
this.row = data.row;
this.maxLoad = data.maxLoad;
this.inventory = data.inventory;
};
|
using System;
using System.Collections.Generic;
using System.Data.Entity;
using System.Linq;
using DF.EntityFramework;
using DF.Test.SqlCe.DataModels;
namespace DF.Test.SqlCe.CustomiedRepository
{
public class PostRepository : Repository<Post>, IPostRepository
{
public PostRepository(DbContext context) :
base(context) { }
public IEnumerable<Post> GetPostsByBlogId(Guid blogId)
{
return this.Query
.Where(q => q.BlogId == blogId)
.ToList();
}
}
}
|
# Showtime | All your favourite tv series in one place
A demo of a tv-shows list. You can search trhough it and rate your favourite ones.
[DEMO](http://gianveracoder.xyz/demos/showtime/)
|
(function($) {
siteHeaderSubMenu = function() {
$(".site-header .web-menu .menu-item-has-children").hover(function () {
$(this).find(".sub-menu").show();
}, function () {
$(this).find(".sub-menu").hide();
});
};
})(jQuery);
|
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import React, { FunctionComponent } from 'react';
export const Point2PointLayerIcon: FunctionComponent = () => (
<svg
xmlns="http://www.w3.org/2000/svg"
width="49"
height="25"
fill="none"
viewBox="0 0 49 25"
className="mapLayersWizardIcon"
>
<circle cx="38.311" cy="12.889" r="1.636" className="mapLayersWizardIcon__highlight" />
<circle
cx="10.85"
cy="15.12"
r="1.636"
className="mapLayersWizardIcon__highlight"
transform="rotate(-27.34 10.85 15.12)"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M10.746 14.918l12.499-3.892.162.521-12.499 3.892z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M10.746 14.918l12.499-3.892.162.521-12.499 3.892z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M10.746 14.918l12.499-3.892.162.521-12.499 3.892z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M10.746 14.918l12.499-3.892.162.521-12.499 3.892z"
/>
<circle
cx="6.805"
cy="4.603"
r="1.636"
className="mapLayersWizardIcon__highlight"
transform="rotate(-23.178 6.805 4.603)"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M6.27 4.194l17.235 5.888-.176.516L6.094 4.71z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M6.27 4.194l17.235 5.888-.176.516L6.094 4.71z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M6.27 4.194l17.235 5.888-.176.516L6.094 4.71z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M6.27 4.194l17.235 5.888-.176.516L6.094 4.71z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M22.8 9.673l16.113 2.854-.095.538-16.113-2.855z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M22.8 9.673l16.113 2.854-.095.538-16.113-2.855z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M22.8 9.673l16.113 2.854-.095.538-16.113-2.855z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M22.8 9.673l16.113 2.854-.095.538-16.113-2.855z"
/>
<circle
cx="19.542"
cy="22.262"
r="1.636"
className="mapLayersWizardIcon__highlight"
transform="rotate(8.84 19.542 22.262)"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M19.28 22.43l4.937-12.124.505.206-4.937 12.124z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M19.28 22.43l4.937-12.124.505.206-4.937 12.124z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M19.28 22.43l4.937-12.124.505.206-4.937 12.124z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M19.28 22.43l4.937-12.124.505.206-4.937 12.124z"
/>
<circle
cx="42.691"
cy="3.795"
r="1.636"
className="mapLayersWizardIcon__highlight"
transform="rotate(-6.89 42.691 3.795)"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M42.715 3.993l-18.251 7.243-.202-.507 18.252-7.243z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M42.715 3.993l-18.251 7.243-.202-.507 18.252-7.243z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M42.715 3.993l-18.251 7.243-.202-.507 18.252-7.243z"
/>
<path
className="mapLayersWizardIcon__highlight"
d="M42.715 3.993l-18.251 7.243-.202-.507 18.252-7.243z"
/>
<circle cx="24.578" cy="11.109" r="2.727" className="mapLayersWizardIcon__highlight" />
</svg>
);
|
/*
* Copyright 2017-2020 47 Degrees, LLC. <http://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package higherkindness.mu.rpc.benchmarks
package shared
object models {
type PersonAggregation =
(Person, Person, Person, Person, PersonLinkList, PersonLinkList, PersonList)
case class PersonList(persons: List[Person], count: Int) {
def add(person: Person): PersonList = PersonList(persons = persons :+ person, count + 1)
}
case class PersonLinkList(links: List[PersonLink], count: Int)
case class PersonId(id: String)
case class Person(
id: String,
name: PersonName,
gender: String,
location: Location,
email: String,
picture: Option[Picture])
case class PersonName(title: String, first: String, last: String)
case class Location(street: String, city: String, state: String, postCode: Int)
case class Picture(large: String, medium: String, thumbnail: String)
case class PersonLink(p1: Person, p2: Person)
case class DatabaseException(message: String, maybeCause: Option[Throwable] = None)
extends RuntimeException(message) {
maybeCause foreach initCause
}
}
|
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package net.darkkilauea.intotheheavens;
/**
*
* @author joshua
*/
public class GameMode
{
public enum State
{
Unknown,
Initialized,
Running,
Paused,
Stopped
}
protected GameModeManager _manager = null;
protected State _state = State.Unknown;
protected IGameModeListener _listener = null;
public boolean initialize(GameModeManager manager)
{
_manager = manager;
_state = State.Initialized;
return true;
}
public void resume()
{
_state = State.Running;
}
public void pause()
{
_state = State.Paused;
}
public void shutdown()
{
_manager = null;
_state = State.Stopped;
}
public State getState()
{
return _state;
}
public IGameModeListener getListener()
{
return _listener;
}
public void setListener(IGameModeListener listener)
{
_listener = listener;
}
public void injectTextInput(String input)
{
}
protected void printToAllListeners(String message)
{
_listener.onTextOutput(message);
}
protected void clearAllListeners()
{
_listener.onClearOutput();
}
}
|
<!DOCTYPE html>
<html lang="en">
<head>
@include('layout.partials.head')
</head>
<body>
@include('layout.partials.nav')
<div class="py-5 bg-light">
@foreach($answeredQuestions as $questionId => $answers)
<div class="container">
<div class="form-group">
<h2>{{$answers->first()->question->title}}</h2>
@foreach($answers as $answer)
<h6>{{$answer->answer->value}}</h6>
@endforeach
</div>
</div>
@endforeach
</div>
@yield('content')
@include('layout.partials.footer')
@include('layout.partials.footer-scripts')
</body>
</html>
|
package main
import (
"fmt"
"github.com/stretchr/testify/assert"
"testing"
)
// MagicPacket is simple enough that it doesn't require any test
func TestParseArguments(tester *testing.T) {
tester.Run("empty-target", func(t *testing.T) {
_, _, _, err := ParseArguments("", "0:9009", "192.168.0.255:9")
assert.EqualError(t, err, "the MAC address of the target is required")
})
tester.Run("empty-addr", func(t *testing.T) {
_, _, _, err := ParseArguments("00:00:00:00:00:00", "", "0:9")
assert.NoError(t, err, "empty address means listening on loopback with next available port")
})
tester.Run("empty-broadcast", func(t *testing.T) {
_, _, _, err := ParseArguments("00:00:00:00:00:00", "0:9009", "")
assert.EqualError(t, err, "dial udp: missing address")
})
tester.Run("empty-all", func(t *testing.T) {
_, _, _, err := ParseArguments("", "", "")
assert.Error(t, err, "only check for an error, which one is irrelevant")
})
tester.Run("bad-target-1", func(t *testing.T) {
_, _, _, err := ParseArguments("00:00:00:00:00", "", "0:9")
assert.EqualError(t, err, "address 00:00:00:00:00: invalid MAC address", "only 5 bytes")
})
tester.Run("bad-target-2", func(t *testing.T) {
_, _, _, err := ParseArguments("00:00:00:00:00:00:00", "", "0:9")
assert.EqualError(t, err, "address 00:00:00:00:00:00:00: invalid MAC address", "7 bytes")
})
tester.Run("bad-target-3", func(t *testing.T) {
_, _, _, err := ParseArguments("Z0:00:00:00:00:00", "", "0:9")
assert.EqualError(t, err, "address Z0:00:00:00:00:00: invalid MAC address", "7 bytes")
})
tester.Run("bad-addr-1", func(t *testing.T) {
_, _, _, err := ParseArguments("00:A0:C9:14:C8:29", "0.0", "0:9")
assert.EqualError(t, err, "listen tcp: address 0.0: missing port in address", "missing port")
})
tester.Run("bad-addr-2", func(t *testing.T) {
_, _, _, err := ParseArguments("00:A0:C9:14:C8:29", "0.0.0.256:0", "0:9")
assert.EqualError(t, err, "listen tcp: lookup 0.0.0.256: no such host", "256 is not allowed")
})
tester.Run("bad-addr-3", func(t *testing.T) {
_, _, _, err := ParseArguments("00:A0:C9:14:C8:29", "0:-1", "0:9")
assert.EqualError(t, err, "listen tcp: address -1: invalid port", "port must be >= 0")
})
tester.Run("bad-broadcast-1", func(t *testing.T) {
_, _, _, err := ParseArguments("00:A0:C9:14:C8:29", "0:0", "0.0")
assert.EqualError(t, err, "dial udp: address 0.0: missing port in address", "missing port")
})
tester.Run("bad-broadcast-2", func(t *testing.T) {
_, _, _, err := ParseArguments("00:A0:C9:14:C8:29", "0:0", "0.0.0.256:0")
assert.EqualError(t, err, "dial udp: lookup 0.0.0.256: no such host", "256 is not allowed")
})
tester.Run("bad-broadcast-3", func(t *testing.T) {
_, _, _, err := ParseArguments("00:A0:C9:14:C8:29", "0:0", "0:-1")
assert.EqualError(t, err, "dial udp: address -1: invalid port", "port must be >= 0")
})
// NOTE: it is not guaranteed that any address other that loopback or localhost work on a specific device
// NOTE: cannot guarantee that any port will be available on any device -> must use 0
// NOTE: IPv6 is support by default on most OS
addrValid := []string{"", ":0", "[::]:0", "0:0", "localhost:0"}
broadcastValid := []string{":8", "[::]:9", "0:9", "localhost:9"}
for i := range addrValid {
for j := range broadcastValid {
tester.Run(fmt.Sprintf("good-%d-%d", i, j), func(t *testing.T) {
_, _, _, err := ParseArguments("00:A0:C9:14:C8:29", addrValid[i], broadcastValid[j])
assert.NoError(t, err)
})
}
}
}
|
package com.corndog.dataprotocol
import dataprotocol.DataProtocol
import org.junit.Test
import org.junit.Assert.*
/**
* Example local unit test, which will execute on the development machine (host).
*
* See [testing documentation](http://d.android.com/tools/testing).
*/
class DataProtocolTest {
val protocol = DataProtocol.Builder()
.bytes(1).chars(1)
.shorts(1).ints(1)
.floats(1).doubles(1).build()
val protocolLastLazy = DataProtocol.Builder()
.bytes(1).chars(1)
.shorts(1).ints(1)
.floats(1).doubles(1).bytes(DataProtocol.DECLARE_LAZY_COUNT).build()
@Test
fun protocol_sizeCorrect() {
val sizeExpected = 1 + 2 + 2 + 4 + 4 + 8
assertEquals(sizeExpected, protocol.totalSize)
assertEquals(sizeExpected, protocolLastLazy.totalSize)
}
@Test
fun protocol_headToNextCorrect() {
protocol.headTo(0)
val iters = 100
repeat(iters) {
val componentExpected = protocol.getNextComponent()
protocol.headToNextComponent()
assertEquals(componentExpected, protocol.getCurrentComponent())
}
}
@Test
fun protocol_toString() {
protocol.headTo(0)
repeat(6) {
val comp = protocol.getCurrentComponent()
println(comp)
println()
protocol.headToNextComponent()
}
}
@Test
fun protocol_changeNumImpossible() {
try {
protocol.changeComponentNumber(0, 10)
assertTrue(false)
} catch (e: IllegalStateException) {
e.printStackTrace()
}
}
@Test
fun lazyProtocol_changeNumCorrect() {
val numToChange = 10
val sizeExpected = 1 + 2 + 2 + 4 + 4 + 8 + numToChange
protocolLastLazy.changeComponentNumber(6, numToChange)
assertEquals(sizeExpected, protocolLastLazy.totalSize)
}
}
|
class Solution {
fun canBeEqual(A: IntArray, B: IntArray): Boolean {
return A.sorted() == B.sorted()
}
}
|
object Test {
def main(args: Array[String]): Unit = {
val testData = Seq(("blah", Color.Unknown), ("red", Color.Red(10)))
println(testData)
}
}
|
program life
! A simple implementation of John Conway's "Game of life"
! The system is im x jm cells and runs for nsteps iterations
! Boundary conditions may be periodic or 'closed'.
implicit none
integer, parameter :: im = 16
integer, parameter :: jm = 16
integer, parameter :: imp1 = im+1
integer, parameter :: jmp1 = jm+1
integer, parameter :: nsteps = 12
logical, parameter :: periodic = .true.
integer :: nc, np, n
integer :: tmp, live
integer :: ncount
integer :: i, j
logical, dimension(0:imp1, 0:jmp1, 2) :: state
! Initialise system
nc = 1 ! nc is a pointer to current time level
np = 2 ! np is a pointer to forward time level
state(:,:,:) = .false.
! initial state, for example live cells marked 'X' ...
!
! i
!
! j-2 X
! j-1 X X
! j XX XX
i = im/2
j = jm/2
state(i-2, j, nc) = .true.
state(i-1, j-1:j, nc) = .true.
state(i, j-2, nc) = .true.
state(i+1, j-1:j, nc) = .true.
state(i+2, j, nc) = .true.
! Iterate
do n = 1, nsteps
! halo regions
if (periodic) then
state(0, 1:jm, nc) = state(im, 1:jm, nc)
state(imp1, 1:jm, nc) = state(1, 1:jm, nc)
state(0:imp1, 0, nc) = state(0:imp1, jm, nc)
state(0:imp1, jmp1, nc) = state(0:imp1, 1, nc)
else
state(0, 1:jm, nc) = .false.
state(imp1, 1:jm, nc) = .false.
state(0:imp1, 0, nc) = .false.
state(0:imp1, jmp1, nc) = .false.
end if
! update, and keep a count of the number of live cells
ncount = 0
do j = 1, jm
do i = 1, im
if (state(i,j,nc)) ncount = ncount + 1
state(i,j,np) = state(i,j,nc)
! count the neighbours (including self)
live = count(state(i-1:i+1,j-1:j+1,nc))
if (state(i,j,nc)) then
! currently alive
if (live == 3 .or. live == 4) then
! remains alive
else
! dies
state(i,j,np) = .false.
end if
else
! currently dead
if (live == 3) then
! new cell
state(i,j,np) = .true.
end if
end if
! next cell
end do
end do
! output
do j = 1, jm
do i = 1, im
live = 0
if (state(i,j,nc)) live = 1
write (*, fmt = '(1x,i1)', advance = 'no') live
end do
write (*,*)
end do
write (*, fmt = '(a,i3,i3)') "Step, count: ", n, ncount
write (*,*)
! swap time level pointers
tmp = nc
nc = np
np = tmp
end do
end program life
|
---
home: true
heroImage: /awesome.png
actionText: 404
actionLink: /notexists/
features:
- title: Foo
details: Foo details
- title: Foo
details: Foo details
- title: Foo
details: Foo details
footer: MIT Licensed
---
Table of content
[[toc]]
# Usage
````bash
# dev server listen on localhost:8080
npm run docs:dev
# use eslint
npm run lint
````
## Components
TODO: full example in dedicated pages to explain how the code works
- [table](table.md)
- [sql](sql.md)
### Foo
<foo-1/>
### Table
<table-1/>
### SQL
<sql-1/>
|
# 月日选择
本例展示:月日UI控件绑定默认数据。
[试一试](http://tinper.org/webide/#/demos/kero/monthdate)
# API
## \# u-meta 属性
* type:`u-monthdate`
u-meta基础api请参考[这里](http://tinper.org/dist/kero/docs/moduleapi.html)
相关内容:
[基础月日控件](http://tinper.org/dist/neoui/plugin/monthdate.html)
|
require 'tempfile'
require 'parallel'
require 'parallel_calabash/version'
require 'parallel_calabash/adb_helper'
require 'parallel_calabash/runner'
require 'parallel_calabash/feature_grouper'
require 'parallel_calabash/result_formatter'
require 'rbconfig'
module ParallelCalabash
WINDOWS = (RbConfig::CONFIG['host_os'] =~ /cygwin|mswin|mingw|bccwin|wince|emx/)
class ParallelCalabashApp
def initialize(options)
@options = options
@helper = if options.has_key?(:apk_path)
ParallelCalabash::AdbHelper.new(options[:filter])
else
ParallelCalabash::IosHelper.new(
options[:filter],
{
DEVICE_TARGET: options[:device_target],
DEVICE_ENDPOINT: options[:device_endpoint],
},
options[:ios_config]
)
end
@runner = if options.has_key?(:apk_path)
ParallelCalabash::AndroidRunner.new(@helper, options[:mute_output])
else
ParallelCalabash::IosRunner.new(@helper, options[:mute_output], options[:skip_ios_ping_check])
end
end
def number_of_processes_to_start
number_of_processes = @helper.number_of_connected_devices
raise "\n**** NO DEVICE FOUND ****\n" if number_of_processes==0
puts '*******************************'
puts " #{number_of_processes} DEVICES FOUND:"
puts @helper.connected_devices_with_model_info
puts '*******************************'
number_of_processes
end
def run_tests_in_parallel
@runner.prepare_for_parallel_execution
number_of_processes = number_of_processes_to_start
test_results = nil
report_time_taken do
groups = FeatureGrouper.feature_groups(@options, number_of_processes)
threads = groups.size
puts "Running with #{threads} threads: #{groups}"
complete = []
test_results = Parallel.map_with_index(
groups,
:in_threads => threads,
:finish => lambda { |_, i, _| complete.push(i); print complete, "\n" }) do |group, index|
@runner.run_tests(group, index, @options)
end
puts 'All threads complete'
ResultFormatter.report_results(test_results)
end
@runner.prepare_for_parallel_execution
puts 'Parallel run complete'
Kernel.exit(1) if any_test_failed?(test_results)
end
def any_test_failed?(test_results)
test_results.any? { |result| result[:exit_status] != 0 }
end
def report_time_taken
start = Time.now
yield
time_in_sec = Time.now - start
mm, ss = time_in_sec.divmod(60)
puts "\nTook #{mm} Minutes, #{ss.round(2)} Seconds"
end
end
end
|
module Msf::DBManager::Loot
#
# Loot collection
#
#
# This method iterates the loot table calling the supplied block with the
# instance of each entry.
#
def each_loot(wspace=workspace, &block)
::ActiveRecord::Base.connection_pool.with_connection {
wspace.loots.each do |note|
block.call(note)
end
}
end
#
# Find or create a loot matching this type/data
#
def find_or_create_loot(opts)
report_loot(opts)
end
#
# This methods returns a list of all loot in the database
#
def loots(wspace=workspace)
::ActiveRecord::Base.connection_pool.with_connection {
wspace.loots
}
end
def report_loot(opts)
return if not active
::ActiveRecord::Base.connection_pool.with_connection {
wspace = opts.delete(:workspace) || workspace
path = opts.delete(:path) || (raise RuntimeError, "A loot :path is required")
host = nil
addr = nil
# Report the host so it's there for the Proc to use below
if opts[:host]
if opts[:host].kind_of? ::Mdm::Host
host = opts[:host]
else
host = report_host({:workspace => wspace, :host => opts[:host]})
addr = normalize_host(opts[:host])
end
end
ret = {}
ltype = opts.delete(:type) || opts.delete(:ltype) || (raise RuntimeError, "A loot :type or :ltype is required")
ctype = opts.delete(:ctype) || opts.delete(:content_type) || 'text/plain'
name = opts.delete(:name)
info = opts.delete(:info)
data = opts[:data]
loot = wspace.loots.new
if host
loot.host_id = host[:id]
end
if opts[:service] and opts[:service].kind_of? ::Mdm::Service
loot.service_id = opts[:service][:id]
end
loot.path = path
loot.ltype = ltype
loot.content_type = ctype
loot.data = data
loot.name = name if name
loot.info = info if info
loot.workspace = wspace
msf_import_timestamps(opts,loot)
loot.save!
ret[:loot] = loot
}
end
end
|
pub use crate::{
drivers::{arm::*, look_at::*, positional::*, smooth::*, yaw_pitch::*},
rig::CameraRig,
};
|
package com.senierr.mortal.domain.setting.vm
import android.app.Application
import android.util.Log
import androidx.lifecycle.AndroidViewModel
import androidx.lifecycle.viewModelScope
import com.senierr.base.support.arch.StatefulData
import com.senierr.base.support.arch.ext.emitFailure
import com.senierr.base.support.arch.ext.emitSuccess
import com.senierr.repository.Repository
import com.senierr.repository.entity.DataSource
import com.senierr.repository.entity.bmob.Feedback
import com.senierr.repository.entity.bmob.VersionInfo
import com.senierr.repository.service.api.ICommonService
import com.senierr.repository.service.api.ISettingService
import kotlinx.coroutines.flow.*
import kotlinx.coroutines.launch
import java.io.File
/**
* 设置
*
* @author zhouchunjie
* @date 2019/7/9
*/
class SettingViewModel(application: Application) : AndroidViewModel(application) {
private val _cacheSize = MutableSharedFlow<StatefulData<Long>>()
val cacheSize: SharedFlow<StatefulData<Long>> = _cacheSize
private val _newVersionInfo = MutableSharedFlow<StatefulData<VersionInfo>>()
val newVersionInfo: SharedFlow<StatefulData<VersionInfo>> = _newVersionInfo
private val _noNewVersionInfo = MutableSharedFlow<StatefulData<Unit>>()
val noNewVersionInfo: SharedFlow<StatefulData<Unit>> = _noNewVersionInfo
private val _apkDownloadProgress = MutableSharedFlow<StatefulData<DataSource.Progress>>()
val apkDownloadProgress: SharedFlow<StatefulData<DataSource.Progress>> = _apkDownloadProgress
private val _apkDownloadCompleted = MutableSharedFlow<StatefulData<File>>()
val apkDownloadCompleted: SharedFlow<StatefulData<File>> = _apkDownloadCompleted
private val _feedbackResult = MutableSharedFlow<StatefulData<Feedback>>()
val feedbackResult: SharedFlow<StatefulData<Feedback>> = _feedbackResult
private val settingService = Repository.getService<ISettingService>()
private val commonService = Repository.getService<ICommonService>()
/**
* 获取缓存大小
*/
fun getCacheSize() {
viewModelScope.launch {
try {
_cacheSize.emitSuccess(settingService.getLocalCacheSize())
} catch (e: Exception) {
_cacheSize.emitFailure(e)
}
}
}
/**
* 清除缓存
*/
fun clearCache() {
viewModelScope.launch {
try {
settingService.clearLocalCache()
_cacheSize.emitSuccess(settingService.getLocalCacheSize())
} catch (e: Exception) {
_cacheSize.emitFailure(e)
}
}
}
/**
* 检查新版本
*/
fun checkNewVersion() {
viewModelScope.launch {
try {
val versionInfo = settingService.checkNewVersion()
if (versionInfo == null) {
_noNewVersionInfo.emitSuccess(Unit)
} else {
_newVersionInfo.emitSuccess(versionInfo)
}
} catch (e: Exception) {
_newVersionInfo.emitFailure(e)
}
}
}
/**
* 忽略此版本
*/
fun ignoreThisVersion(versionInfo: VersionInfo) {
viewModelScope.launch {
try {
settingService.ignoreUpdateVersion(versionInfo.versionName)
} catch (e: Exception) {
// ignore
}
}
}
/**
* 下载APK
*/
fun downloadApk(versionInfo: VersionInfo) {
viewModelScope.launch {
commonService.downloadFile(versionInfo.url, versionInfo.fileName, versionInfo.md5)
.onEach {
when (it) {
is DataSource.Progress -> {
_apkDownloadProgress.emitSuccess(it)
}
is DataSource.Success -> {
_apkDownloadCompleted.emitSuccess(it.value)
}
else -> {}
}
}
.catch { _apkDownloadCompleted.emitFailure(it) }
.collect()
}
}
/**
* 意见反馈
*/
fun feedback(content: String, userId: String) {
viewModelScope.launch {
try {
val feedback = settingService.feedback(content, userId)
_feedbackResult.emitSuccess(feedback)
} catch (e: Exception) {
_feedbackResult.emitFailure(e)
}
}
}
}
|
---
id: "_connector_write_"
title: "connector/write"
sidebar_label: "connector/write"
---
## Index
### Functions
* [write](_connector_write_.md#write)
## Functions
### write
▸ **write**(`operations`: [Operation](../interfaces/_diff_operation_.operation.md)[], `config`: Config, `schemaName`: string, `tablePrefix`: string, `columnPrefix`: string, `plugins`: [MigratePlugin](../interfaces/_plugin_migrateplugin_.migrateplugin.md)[]): *Promise‹void›*
*Defined in [connector/write.ts:36](https://github.com/aerogear/graphback/blob/bc616b51/packages/graphql-migrations/src/connector/write.ts#L36)*
**Parameters:**
Name | Type | Default | Description |
------ | ------ | ------ | ------ |
`operations` | [Operation](../interfaces/_diff_operation_.operation.md)[] | - | - |
`config` | Config | - | Knex configuration |
`schemaName` | string | "public" | Table schema prefix: `<schemaName>.<tableName>` |
`tablePrefix` | string | "" | Table name prefix: `<prefix><tableName>` |
`columnPrefix` | string | "" | Column name prefix: `<prefix><columnName>` |
`plugins` | [MigratePlugin](../interfaces/_plugin_migrateplugin_.migrateplugin.md)[] | [] | - |
**Returns:** *Promise‹void›*
|
---
title: 清华软院零字班上网配置
author: 孙子平
date: 2021-01-17T15:49:14Z
category: 配置
tags: [配置]
---
这篇文章是关于清华软院零字班学生上网所需要的配置方案的。
<!-- more -->
::: encrypt encrypted key=lzb owners=sunziping2016
FZEevv0jRNC93NbfX3rJa1tv9kohqbP4Ps4F8+mtNAKWJ6u72lj9QNpz6CVP61kGLMPC48c9espuqiI
wHPLFCpypphBPayIRTAkhoMj+wq3Ln+191Lo8zwHGALPhf62FMQhGRcb1rHkjLWzQH0tKLF0XR4bI+T
34cQRBdfZ2bJy0N20bHeEyv2l4tiG64S1bq6oqhPuEeA6YOhJL5+CxLMxz/ODvKA8fhjzBPJkzqZoeU
MMbwcQCZpVm7bhLkr6C3SNvoRCihiykHvT93jEpBPb1KXsF9vbGO3+5tPL+vIWuP6MdrmULZpx/0BF6
sXXIDHVLCcSCd/1axXBywhDco3BboSCHyUuSwyRvEuvBEKoCDFgXyryl3o8RLpGuo9Tec25XKaFdeC+
fwqnnw2xmnjejI4/G7uLhsAXjn3YMWtZ+IjtXATdStV3x/R2GR2r/LWsbG9seZe7U+NKCfGpITcIIzp
EkHLSmHvfu4nS1tLQlL/ajQU+GQA95e8Jz/XxzSyUfBPSuLktaewUPWGy1Nhx5/qLw9fF49X9f0qlaF
CXl8sB0f0bQ5r96imdxqWOu1962Li+rL4b6oh/PrxrVs6s9cqY3XK6HUFm5avRnGWk0P/GkEsmwMx+A
gPl7ba1CnbcdDrV7N7pQyZI0omF9s2hQVdiB2xUNHIsanm8MbBgciDDMxuTTPrQMcA117y94yo5rg2O
BGkHbDIsmAzAP7rtPxuHwwhd203KI4hSt8+zd6S1cggADhIixMKBKrxw83ga9gHLIkV+EDkwUOTu5Gp
cJZOyGx1XvaFZObgXk892d6XK7yd3Xm1D2qA7DKKT4KQmEYbTzbi9bXCk1FpWMYtd4Zldtnis8k2FzH
L0+D5+pgoXcouvBpW1xbXeg7L2ydm3widIYa5ZFCdG3Mgk9wFHQaOMHgExd5Zrj6zBt1E/H7SmIgrAM
YmGCucXtXBgJcicNjOiVhcPcsQhaqC18rUXfzeXQ3DbDJAerY35xDa7X51U3C2CTkpEMSQZICpOffOY
C4QHVllxIgEB2RyLlsAiKdDXfkzU0fa9OPMA6YcIcLBiRwBJEcVe2dvwk+dbSz+WegjyS1mRgLppoRD
/5ICwgfPdopbVq1Towj0B3zDBZUVk1vX4U/8xFIHQqx2PHJAPZS6GZh25r7kLnQH+PRBk/pElCrICYi
f/d+B0BDs4+N1BDaupOXT8PpRvNxn5q5jH1xu7JKcJOuu64q9SvNb/4aQLmDJ9nFZXW5ydZ0Qnv9o9G
BzxjjjVPywrTsdNxK3Ug4uIMRsOoDylQGie3cNr5jrH5nV++pF8Z05KROpK47QUXWp8z6h9qWAqtY9s
DQHCXMd03sdqKc1gXMJSG5UwafXYD+ED3ql2W7hN8AAioFZhSVw9Kk9W3RLvzZvdGa/tnD/uPHT3ps5
2j5pRhLFJzRDPBOPIKate2PMj0XhkCF4AfyH/RaCiTcASsBKfiov1hAF7edMJ3HZAFG8o6VpwEO9wGc
kqofZ4fmrSb3R+Ez5f8gG+WwpGMyIoE59JXA6pmogZ8dofZFqccKf6CWsYf/reU2vkqzi5vsu6ic4w1
f1ImaSSk9l+YfZbdJGSXKBI2A7mRlGEd22LnABFK0HthLDRMLQv9eNy8AzW1kD0sRJN54L8k/3OyLYQ
jyeehV02wpPNvW/uXxilRWbnlgrFXmRydsK6bPLm4BFRMMumtE0Qb0sagKdp0EMRIL5WNNPHMVdtuCH
LMISPMsJlmu+2gP/vvS2PKJ0miXNcijVrBnlYDITiXQDmP8WmxJwkKCBUR8jPGDjIw7Wn7f+V8AJEUn
B4kxrOSLJ+q2yqpdbh85aNnzH2L1EVgNiB0hCapKK3ei5daKiQhBiGeAi8CmcwUU793SiZj7cCKqFql
WsN8q3WcvlftUF33VmdzOaqM1NZvRwCd2yFbqkmgi7SzvJrLrAxroEch9t/6Z+UpWgewxOWavFR4+Xn
R6BIg0dLOIgVFxNPTJTLtLCTprDC3LX6+mMch1ZWUURIaTlE5z+bW91eHwy0MeAX0FM0/ehgGQ8DmSo
fQs/TsWPaYTSpO1G2sz2XThde/uxEChJwqpMgQCRkaThdLgDYuLpBDAm8mE91su/Nx+99/IhDxqpd0g
AE9UqDL4OUhKaAZnrbSJSbsYuavNaCZ0a8UfP/HxzCSsLuuxekFdLcY+h3Oev3bkIxJFoX9aEZwAc0x
CVC3/fQLI5ycce3FNuuoLVnoEnK7UmORofhkwSmGX+kCS5T5qpHtv/r335mbBQa8nlRY/pXMTWmTWiG
levM4Tu9mr0gLLfNbeoTg15Sa0p2QngTPV8sHNeRMEWfxA8C3lSgyd+yvl7Ey4Gf00Y3BqgfcvbasX8
vNGOgUpBrdJrUHwy2xw6LlTFODPMlF6UjpmGKG/kuhORhn8OQjUOVLFPhqxt2b3o+0GiM/uiZFma6lP
/3dZvc4zNikB8DX7iAas+h51XU75sAB2E2F0Zm0nxcNPKkpuRAzDXocecrdFbhwf8qIwF6gslT7Wxlw
/GZZxGHjUO7d8kM0xF6RsT2EUnZAYaovwSw991e8CSkzeOFulYWTv7TBQLMc9HbrIh1dmkLj5yLv4o/
jM5Tc37UP0yCEBV4DN8nwgUcOoV+0PZViIFfu9PXFNmfAOzH2/vAS+MIoL1XgUrrhfFuC+L3XYx/gxG
v4NnKdknp+1xu+8CQc3j5rc+IAFczh9EqCNhq/BPs0r1VedTDCfCfefHSicbFvidP1Bx8sRTVbZWHgk
WjDy0lhc/snUrjEmil5WzcU839jNMNa61+ZJUlMluUZaWIRsnGMo++/JCJyXnt68/eujjQE28LCQP4h
aRvvY40PR7bSXGRJgM8h5D0Mf9GY2ccH+8nGXqKqJEiDhVhpJrZY2R4la6Kq8WXfHQzUxvmuvXC3YqI
aNh74UbuYOo+z+EQhKtvNHfXxGtTpOEMsJk1I3I9zcS0jGJJs1w9HRsJr6EsWo3wQzCSWNWwTyEeNL+
3C6YXT3/UmwBc2Kl/7kMKiSsDbnyHnzNrbHR0kZ7Bi628CMiPw5gC2VX5VhN26f/VWEsfTy3f4Z6J+D
/Cn+GBSPgGbNBqmblg3ai2R8mO3/sR2EeaZi/MmvEZZABpZIl8I2X+Fs4shqgh12CbZG49hzvPQz2oG
lwMhB8SL2+scacKy3z0SQW6Tay/kY5DXwC1R+cs3gJonxw1VZA1i4LJLjB0rlw65ttJN8Y/Yg+BVSZ/
GuSCO9+SkQHvywVsWM/QLxvclWkwDZ3JnmhRHByzcCxZp7b4gHiH+YsEKu1W1g4BNgnkI26Bf02X0rC
m0pi0co+iuiWjChWakFv8kSplr0rZ4nktG5rFCI3uQSedLQfeU5xR+QusCgozB9z0xNw6D0dA74zKex
bRONa54SuDG+voJv4HYsdrj/Pg9XvcRmgB0UUcTiqHdOm7XZyT2OXjagM9n4v7TTxWN32RwEab+w1H2
xjfEZDR6H+UxWRjYtBpL5eHUQCNh9YdHp61viWgN/33kc/M9wVyNlslaim9da+Yw92fNpntzuX7nHc1
0g4/FO+BKX8Ej/dIRKV+OY90x1vyhpy2/yJkFbrXYJilbrPn1RZ2HSvfai2v5qkEER0jr65wdoSOJ8b
a53haoH5LoaifZe7Mb+pUuAlTsiRTmwmbgnK3Ocdcu+n4M5ZPJ2GRDtoRZ0D6nFnLf3S9QyDPT78WAQ
jK+sB3Nl5Lyvg4x+XkEKCYJmcIfi3WxHR3pXKQk3dFMLb4eiU7Ivpk0gA7Yfa7wGHLAjHyXbtGEEqwF
jDP1KDL0SvCWLXc3W8sNuGLOd+stx3Z9kGsJ4lLQnV/7GaHoGyOXIy2ZnRz9GNEbvq0jKBTzhGUdtdd
eCJ2cyr/MurMc7mbtBOPqCTLashKHjfK5gvJ4z4NhUpRvxq0+9Qajqq6TDZNp+gqCec/VOXbHwdWucd
1/7zQngmKwRsvPRYSJHMq71EHdMTlYVOYLy+XqwmNNVW+fn6kO71rvoWl49z1PDzPVokX5Ul7uDEapw
UX9ydxPhTqMqL236F5k1//SHTPMG8sxu46++MfBpDZanBh+GH6l3DbCFy6r4+9z8wlYQzhmIxXLkTgH
gbQr4Ak0xp4uM+0l+7FTWcLItw6vKHJ4Zkct/JBTMdteILQsp/JVSB7Bo8c2TNp4aGIXavLBknJyZ0a
hulAW0IZnNN7jSimqjrDCmIWRkeFy/kdMI8vuxM17nui1NZi2QcNbdAaVIxrHz1tPJ+tx9VAmgzt2JQ
nbOif+CSMa3GeAmz7B7SEpflSxzCgLFbjaMzQmRFGY1SOHTIGksSk5psjcSKXgQqcSuu+96OBRZX2x9
WWunm45ydvt+Bza+Txg7PFeWS2h0fTRbyB9mZoACPd3+TNH0s4a6ZlWzCWBTOaLc938yqqH+K7yeUaL
gMk1K506AaDMrziDZYK3JU5pyxya4ml4gpqkGY+FL51/BKYdEZJaY68MOhj3gNejKxyKGoms7O0xlkE
l4KO1987mpdcyv8p0LACq9aeKmQOolvAN0Tlwr2HeUQTi4i4gcUAv0ZeOR5Qn66q0ZKWfOUu/9Yp5Li
WdHHwsWa8YJYU7yWfy2JYj/+ORfWo4oOj9/lGPOTcDua+7fB5V5R4/lxoUUX7icrIl3YrineEmBc7/1
1P7fiGFAOniQ+vRlCgsp19r3R7pxy2FN3mz3dPnceUhackoH20RHDlB79h6KLh4wStfsDq6lhvNhJb3
4Qc/nCaSnlSoswSP7N7aDfv4bTlnRWAhQ8v0hBDKR82NfgriMpLZqmhUUckIRUjT+O7haOqi68leite
Is1G71Nbk3FZMXu4AeUzgZXDynRfCGj9S9hftf5SA6sl5h+4IoeSDbKC97q8R4P6d7UQjAoFJ7SbmYG
TSLj21hvSupV9ZAfHfFgsZZzxNJXMnXmvfcBlarXFcXuaRyxp3xLul0NGMYMs4t/kLeopOLZrLKI5ig
nne7mh7YRAZ4b24piEsDrmlDYuOplsSsdrFp3eQ4vkwdOrZnqqbYRGiHV/2SM9Wqi0yksqJ02yJxNeg
9AYjdIUIqXmYjKWNtBsx/ee61m0c3Pa/pgYnpXK7Vt0LpSpJjbQO9RQVpwtaz1T3qw9jN1lsk61pu3Y
rq+/syn+FMujTC+L3XH7bQae1vPNSoYbzIgErF4zbPhK7BvCycxzTHL1oFPte91RbK4T/4F0PguQIKy
XWPiDE4sPAGAJsOUMhDVoG0+sgZg0P4JaM/9YHfZ7WkKaayq0NA8pBsS6+PYRjtY7mTBZw34RjqQn83
HHIWSZNReRvdwpdoSuXb/iG/ovV2tN0s6AfKDK7RGHyP4s6aVQoBeAI4R/EoHd55RQJ7sd6tbEzNJMc
5mP84JVoiXzJbkaI7cS2XUg5Ll6AdDZhJMFI+p7ewamDp++AhXKhq7PYPY7D86HXNwJrYl51hFH1mrT
vns6NWOoedwyZGiSFCHnp7TOyGb+GV/1Yd7ltFZLcFEHmfMAeJxE/wGpvEqQLgPnRXEu3G5US8stW4Y
MzGjmT4cTtwuzk6KyQO0elgpNmnba5BWvj477e6v/F0T5mvRaPKsKqEFzaNsAJhyZRv2AnhVbPEpOkg
YNjbznMlJWi/UkFz0WIEIMcD5E9CdF6ri98qJ713ZhwDG4T8HmKn5C+wOItR/nOFF9SEWMj01eTUSX8
VvBtxbW3UmWq2pL4vR8NF0rLy/UPEbQIXWR2NWGKLc3JUt/x+nJWTKSeQ7km+g4VvZBdiISpCTdqZRR
rW8TIwJYUc+FQi2sGpDeuiODliuMNRr0xQLkNzSYy0rOXQjSMX/0XiPdosaZHk1n30d7pJrqS8ZOo+n
13ppeihwqDiiIhUUw2MC+ooM1W5R2ipuRisZTMuwfWXW96kM2JvvpXrRtQP+jkWnYTm+91SsdHfSOPG
ZdxT1lE9pRyP7QZo0ZEaZpblP69+1rX3eav60gsGpdfAkg8jkRNlATYs3fJx/KyVaOdFy6aqoag3nS3
a/huVNjgEGW4BvQNzKAUadahbfmC2c8gawQLnlFGi/iGHEfddah1oru/6htBNxfEnOKA0o4ul32KHKW
/RIL5eCQ0PsLjhlJv7WD1X3deSR0lBIzFFdnSF2jFS1PPjZAppRkyAgpMwAdyKpzx9vXeRbdMBiBgnx
3ISERZzL6R/SbL9dM82J5v+yz8xe7Jucf6cuUQaG0FtDXdAak0E9meJA/SQtOM1AkAHg+vnhOcX8Ij3
rdKxgLTnrlOF1Drw4yslNOlzOPIWetrGw5tCXXBnsOHQVpAYES1EiZwU7ZyGdgtQB0decSYUuMDYoV/
v8MbwH7N61vecTR5vm+T/TSFVQkDvi9V0hMSS9+PYuiKCsY1EsBKu6okO+OQUviuOthgPMugb7Xy6Iu
ybPXxxPfFWcFcqCZU0nV4SEe7+MddrfZQQ8Q5HDGJxJyrE8eQOfRhl6u0VolgkX38AH/nD7eHyMWMtB
e4ywKQKvVNdUvrMg9BmCVGDdv4mx21XgOhp1xAZdrfUiLuYVp9g44yGoyKoM+2E4TL1JblsFcZiEg3U
kKU1S1vYCu/CXENaaAzLIPwq4l6BAozpDL2xNqKiLqK31ME3AfrGB2jrYxr6MUM5m2PBEFDybmZS5sP
tEatjeZg/RO7eS5b8nKpVOlFMx7pM4TvfWbVA0hF+uGTWk40UIQNulxXuTbV6ZpPNNppdwqBhOYRQKO
WynEqTNeoDIDjfVpda8HYGNsRyBIh/0MvE4TaAXRXyGP3barLJUSIcTjp/NGa5xQNoFmuoSaN3DBoSV
Wc9oqKUtb9Xk4IIe1a11FaOODEWF1hYG86+qZ7Rd6/kS3ftYRxSTzFcs4QUP0fV1eUUoNn2rTB56AyC
0GpXVKT1tsdZ/JOhXqljFsOwlIu7qTLcdOIxdLXTVszAmFzZnLhc9dzFJ3seFz1dGs88KDSqgfQZ3Xu
IXxWLnDhPeLM3fob5fUub/eiNwNps3t+dLIyURYk/Bl4LHJ4KAL5d1k9Pm3iThgw+75Y0chVqUr4LI9
4SVjpnhF3z9QtVhJtMG8fAiVPqNhDrxgIe4e+xochALNSY9aTWsxCJlz6Mu3cBRH470UV/xzga/kC7B
n9I6ytBRw8ayZSuHqRa4vhNgIYkB9Bbvx6fFy/mkZvkqCG9+N2O/CYcoXK7N8cOBnoh32uuoZRtvBXZ
LC3lBuHfYbLp0ONyN+N+JDwpWd6kulxR36kXu8DsK/ev5zOBVM/GCt4mwhPgaxplHfVlm4f2Q4ZEVXD
WURI7L5Ul/+9T24FTfsmzA5Cie6BBBpTESPBXjJYLw7DsiisLl1OwdO7N6ZWcV6aqho1gHO8rM1Xn0j
4ckxYPSHM3ybyIxs9+MgPK80wbSj8g2SIEpvaGGdigBAd4+rYI9r7qRaMGInGeIvHAM5GpYcODWN52S
0nnxjneieC/J3vngVP4ilewgdNG6gGVYuiXYePEv1PCN5uvbUYGlkcuQdp9GjzGrDHEVATl1uWTlYlT
FCinTZjXe2nhuQx44uJm+PJZ5CyAe2uSIn4aAsH8iDLXePQZz/gAJ2vY0lxVDQS3A21mUAyKjnHAIOW
Ee8h7AvZCnv2nvFNxj3O/H7JgtRX1V2A2fn8RtJUa0J9Akva1cLkg6LZIH8Yjl7QIAhN5U4iH7bhpOb
ADKa1oYFGIUPYmK1xacokA7UtllggQJ9GUwcV7iDP/AXpyJkd8hsvcheCAw2B4tFuBPT5LDvZhnay8j
gyV64qTlZoEEm2JKnoMdF1NQ5Z8Q7ZEVpRyaX8rc4TLwEhqhe5/Z5yIG/P+ixtzQ6dW1jxDUJd7vO47
q9SOXO7E9PP3ZKLxO+HV2JkPsM/VaQfphhkP2Cj1GovxApzQvN2kC5SkmwyoY/bzA+IrGMqOsiXpFkX
4KV0EqxuwK5pWfefDVpS+fI88oBCwBH8ROgRYu0awjNaiPmjQv7CGO/v0oS1bUZk76R/hWZtEW/J90c
rslLCpIPqlk8QeIxlyVQPW1HAO3umg125Rm2iq4ZFGP+sCnFcF4F+6b6In/tplMxV6dwW4ZbrS1e3X3
ekflT/uf3fGTIQcI8/NCizGJn0khr+y9k3WyjIu5GiEfSEgWHZ4NDntaMnNiVO4+ls0pXFZPpDwfOPL
5zsKchuLqYrG93H2E+prdBLb+2D01A+tMpaBZkCuJSFGdWriGeKC6RSAu6uxqyK6xpXDAiTcGhzT+9K
amnFmnogfzx1vnV4KCFm9NYcENlRVtFAtq6KJtQmEKblmb9GqCj+Dl7G1zmUIFAiVsbmUDZJuT6Ytdt
uQj2qvKY8Jx0rnrI8xg/y8j2UqybqE3TIW6HDw5StLPzx2eRmYmOaKoqT+TZzTet1Iyo9cut227K3C6
FkXEv/XY0TCRoGhUgyaoq6ylB4lbmKAn5Yo/ScScg3CkDJm9sH+yKJXqh2kppewuaVSXFZB+NNBx776
rJRCpz0riZTBSYybq/8fDerJDmRrWX4Cy3dV7hW5Sa8PapyTOsIcPl8rfzAck8dzq3qAlGRWmvFbL1t
VKvWU+CfrqNjay65QU9yb5i7UGB8bSP++N8wo9ijqSpBpIttOh0UuIPo2leUeJnwXvu6ForMSyh3uKN
EUG0rbJm1JZN+t5YYi93SUnuN1g5Uj0DudFUgk8HKjDsCLf1qC//ZHQN3INr2YRWkDDxMh8BFagFOHZ
uufyQKNevBEx57hbdJFvRGDbn2mg4mUizYxfDjvtVF40KXL8eDXXbw1X/ZpL0M/V0lw2CtlGRUA6hPS
mRd2VG3l/mG1cY2wTsni5w6mC9EkbRc/v/fnzfGS1IBpVabwP7RYAtfT10gUkArybNWhMcL9IjbL48Y
4X4hFC64g53ewzB3/IdYseUUtM+MWALejf1rI3MsBkMlCVsd1MlpjQVNQGjeWYgo3529N6hdJ8Bc31V
sYwQ8TvRIBVSTl1H5y6xUiRvLz5EbK8Xah6abRStjnlE6m48IKGbITsJLhlZvr7zft7cYts0gsy0wq4
Hbvd8gAWXMzM6NSBAbqrMgdSHI/uUlTdanR60iqpRUakS+zXCfTavsaPdReriSTF34BSof0cpOqdgag
AxNRbKp8VhILtdjmmxVx0gZTc6Y8DgmdMpbyB7v6Mz9ykHeaeIlQ/eq/IJnbvfv4d/1skLiTRy8+/8C
83eCqeDwrK4gk8jqhZzhfUK4WEJi5hQVnkjXdq66T0Peqw9S2r4BKJ9O6gO7mSL9+yhXrLMLATMxOu8
lqb5U9+kTtY0vLeh1EoYGdQK8e3iIRm1wSL1n58YHTwunHam5eZAM/4V2owHmX6wTRNdQRkzTVDAIRI
gC/tizSIj+yaI8/b77OoNdX42wbsSV6qdRD9fFhHpUEIpi4tx+8y3Kba6jk9/u8q+M+22MsHgrrXlP+
oHE7GEBPbDFvmhuiMVaA+GZ7GRm2Gdi5oMvgIz/GdD1uHPLSd5ZyxHaczouLgiJLx6XCq1FdMSXm67N
NaHPJiSG3VpGOC3QLEEFN4DMFqmtP7CBvGNoQBLRY7vHeDSN4eQrFuilGOf3TevubzvS1f4RUVDkdL0
X5HTWaNUNkliNjnjrdx8NLwyHwmn8A2G+T7L2H77iMIUrwHx99oO2URuaP6rJeeqsfcByKml3BRKZLF
J2qk3B3Bn7Ac988x+svgOUj60KYdTiG603bTWrkZTCPIqfzUS1tBuysgGfplppAdt16RRRZENhc2SX+
iYCAk3oePLZeZxOGEbDZW+jiwN+f4Bs6BRpqdCTTwE0U79aaGyNOmgSiDRTrHjRG85iRg8UmP3QiEfC
k1qzzFGruFi6VIRQ2MSM1YOnAKmMmWOeS7jndLXHyVrEgrxZ5StEaeDLQ9ekOD1/LvltHDQVT1R661Q
4EDXVa+SRzWwbCQ9lKHgV0PlXWWgeHXHyi1XbTpFR4yE/bW43Vddh8Uk/4Jqt8otzI4QFjTubEBmDZP
f32bX5ODiN88//SKo+GpHJOvqVteSZBnphQ2LaRDQTDgLeWqioR4MnKfkr9ZkSS531tCqzwvJCv5n6N
HooiWvJauqWe2cU+keznUfLW9dViPTck9OqFjsVFx7tWPOBz506G8WzHZTDjkWpj1DWgrfIVBXVTE/R
Dfm5V2tuNX/HUk36aMLTtEzianSl08rUyGMrJ/p6EIOitFmBr1jzhmU7Zw/Pmj23of+nxDtVf9HZgjI
xr/CwWodbGAXFmA7cYGL5JEJPY1s/KyURvDUVSq3mL/gFVrjlV5AjVwvU4OpK/9XDToAIdr73qV4dEW
6RdN/A44Zs2CwuisgJvwuBQsMipEJ71wDyO0v59vpqhIvy4G9oXXMQPqvoVt1iORpzt6KUrCq3OadZj
fGejCFv54Hyn7r/RzmXYafT7qMFGu3B4qZ18He9bSrMq0ihruDCuw7aj6+4N2UdmVEMHVYGX98E2cW7
1qX5KySM5u9mw2q3qOy3sSFay/6+XtM+Q3CL+aZwW++/6pRjmCq6yUFGoPwNwPBhV8Yi04zHH3lGuRH
JcvaK/DW18a6SZGFBZ0bIhVBj15uD3w9crJcNoDE0SHTyGvLDvrSIGMbVI3rgOzSgWC/YhHn/mOymeR
H1xQ0WgZs3qlgX+65zxjFFYO1ulz7yAZLYUuo8TGAMukwR+PsYTgnJAIim8h6Lr/b2qC9nENrby0AxQ
xEA3AWspNFl67YlDT8TEBTu1YRVTR32HEm2PAlXzJ5TzchIfbOhnCIOmmqEftc7QlGnokpxHzNzgz/L
I2lCHCQ7ztZXP8Aszpach4PnF9U29sm7rXIfZA1HUf4JH7oxbUejsGV3yDbmdhcp7wF3dajx8nA4URD
p2Xf61ZwQtdfR78DwrkkgDvHaFEK/vDQP2SKSMB9sis9Ka5OLZcDOwvw41Yfj1x/5NGw7g1M0Ml4tUz
8IRm34O+cN9wUk2kI8066ZnqsfbPqRIQOUtBdzUlm9bNm++DG/Q/y+0r2ls/IrDVw/sA98HYM18rdVH
yMv2VdKByzveaD6+JFnrj4D8chozCC6Ze2q8SbQmG7kRWLVeZ98sIKt9Et4hLp9X1phO4XL22Ngmdpd
OkKI5pv0m9qKr48mOBsMlHRX5u0vdWtu4GdDtL2O6wDoIEYq+lHC8oY0B3gTxDBsBrDfUzEYFUQYeZq
jgffqZsjBN8WkLqTJr4AQg2I9Fax0SCCjvQtYaSv6E1P2+So0di5/RvQYQSsCpebHIVWDUQiEdYr4rD
waPkqbFiIS5I5PhPU0zz37DNAWk/dA5GH/9juwL1b995X2owXjHX8VADTlecD+sMV23uPbqH7ASyj5d
5IxkNYb+hSBZQ4btVfnttVUChQ8mMFg2yVr0Agux3egQPPMJaqvP8SSnRRAkscQzt03HtWMubHxuEim
NJNI2rfPLwGOH220GPiF5FPvLzFVa0cFiFuozKwY1QAksOvbOt1B1L0uuyaO6pmPAULo23HZ9rJXXjM
bf35YUS9KbSk4/FaD4puGGUm1nOlodMj3dOTWa3Ts3jQ83gd6edTxIhgLad+OAu4n2UqpCfhfoujegn
be3KozgRce4L/MUn6tpI96piuejoMfXZZJlJjzLw8TvStBEGRfe3DxQWy6tO7CSfsQ5ge6TTfFHll11
hqqkfeY9X75Y1R8AY1iq6kIURqhKK/N9Vw8XEyyVISev0fwjRvnL/3akK1pJOFpisFrT5ubvtvm6Qz2
6/VoJnEF1NxOPS3tZ9MLIl5Y9reOdnzvm7seES8o/wOBStF7DBkqnHytexg90bNv4Zv6AvKrw0H4jmY
X7LFvls6cXz/FQsPSa0tvJO8T0ZKNDfOxFprmT+YCZFw2kjyfjTZWsWXo/ZGRQoDoZKrD93RS8VwwkV
PjG25Qjns1UQXMIPg7pkv7Cb6+z35/FsMLevEk11nIlb3bh4kNiT3c0kRNpvNleqhg2oolm5Gv5AHwe
Q76/WPp8JFoox+CHxO9VTZh53NTRcWEixvyALkCEx6et97EFlfAq5xFzcS7eEwx4gcPEd3I+TuFZmRj
OQ/iDCdV0ICDC3aerV05T06p3pGEQrd34CrqfgUtbGV+oJ568lxn4Fb6eq4Lk5+bDigbYMaIW/9osy3
TCCP9+gYpfR5c8z+h7dciGHQzMcDobyFxK+e3dfWU8RUpsOtjYPT6+WvaZ80DEsAxOiLz1HY8W/5Fj8
xmL4Ijj5cUHWPGuhwPKb0AS741jw2HZT43F64DzJ8ahznSnOVdfdXosBPyBkcMl6YCoJu3kyAhCn+rG
tjwvW/AifK3NhUl5D+3PZ568D2BBx8Zc5vaf4+sFxdRUEfULl5RkjAD0NpFbGAjrtrFsorXRC8SFf1q
SU8stJCY83iJwDozx6YWwZb8OFzQ/ClfZLkquwDvWV/11+yNsSMJ71lka2qHGGkuDEd7WsaZWAjts0J
S5wli0f6zPNnbTr4kVQ3SaLoTPY8twlX2rPENS+mm9/64YqAEZyZOyWDnbDKnc1Z2Dqv9e/edsIdDI+
C6s4E9Glx4ZD85lGTAZN0GoBFox3G1tV+W6qBKa1yv7kHAizoy8BIfcedQDjRD1dXuZTxi3Na0WsXZ2
9hrsxkLWbFWtMvbZp1ZnGjhreiy/CBBPgUepMOzEIrXerBIvq9wNeh1hwU95b+06AHd9EDnIn4KzA+4
LNczD6ZZ2Gsf+1uSDEvU5I3fpFWPSs3k69tILOxgGSX1lyi2FPm2FHEA5R4TkNv/VBUu1keNDEwqxmz
PsTj775LL9cZhdLEnq3DiGN7UIeNjdLtS7LHtqYdi2nstjQ7sHvn0KIsWkmoG2D4zzvPkoRhCsbH9eu
/5lzknnSWXmZjRLBtR3M6hXrf9rJL/XFV615pUFo9aTF7WsG0mC8Ak4756XRSJa9RcfrEWkdSym5y8X
X7IGLJAQFU4rNWBm/dyxsGwJ8pN3/6kYesThN43CxeeqbRAlVU7iyJwLwfEQJMaZgAB65DDhAoZuutw
gdqTUnCa0QGskgZddGZ6IO9OU9NYAHOCDyuD9TzznMRDAkSjpAqUabNG80nOlSLLddWuOdDsFtcGUdE
KI53R3KXwSDjLs7W5hQWSuR7xPjMkkGwEVO8GDVC6riTa4c5LIDxBLYPm1GUMn0FkpjyBbbrpMVIbBB
s58RVlwYKzViSaEp938LblMMHBEaxOs1UUfIyqYYRi3WlhbUMiDIcjGhKhTIRx/HQOw9kNa5t9mppQk
GuIxZcFwelynkTQmsOPPpgVKb4I/+G+DvEG33v8OBpDgAWe8AyxQMNStgnOaEETl57UVGwAsJRiHMqo
fF3CMjjGd+U/qr+zo8s9DRxI/sWN/vg2zA7FkixOU3amLNFGO6wVSlpM/J63+QQ7sv7IzIqFGRxGJu1
zl9UxTcM3GmSUYkh+6Lt3kpyaiDBFg2kgWjFHa68Y53P5Ng/prwavJtb/VimCyzbw5julB6oqFdIRY5
cvIWiR95+wGA9q/05F5ZaBCnYL//0oAKuqcaJTiqt9jZw1xJ2JSVkVMj+e1Lq1e4/K0x2gUmrLrNm53
VeTnZ4NX2PuSsggFYNZIj51Q+PnYntKE3Aq+oB6F0FwfFDWNBjxcoKa3PcF251Z3FDvF68Xe6+8Sbab
tQ0i7FNUr18UsnB1bnzUob6eTcwY1DaxCE/b84A6b21lQUoXt13D3c4Ab/1VwRdnv6rAlfJ++P3OCLp
XnyYm60fkTgDosLBBPAeHKSwen3id/UWIDslZST2vDvWCsoKKJcoW2NveJOEsUbl6AN4sjUQWcbGc+f
RMBTA4CcEQkvbFjsNv2FBWWmk49GrUXTM4KxnhTv/KlIjSCGfcgauy28f8zhM9rOsX7FZn22FyAMHIw
OEj0BkTV5f71Nlia0Kr8/jwGW9EVaBIHAuL2RoZ1JwqlL4FwO/bvXFQT9vtmvNso9a/ENqRHu7zeuVJ
olLCcOXIxo4tUfXDmQpXXIxryI7oz+44Hh83K16QuTwGYRqSzFnY0IbexDVh1pfRvP6FAGzn1OwcYJG
DMrLHUWw76Sa3z5uUBkFC76B/XPcn2URiObN493A60kpSQFDry1nFmI8T3yPLtWfxd+JZ1mlbNE0qHp
XtdVXzqvD43OeoRRz6+XSKyY4HJJmjmUqFqlP5VyUwKriVsgaK1JEqr/gEgMVDzqvPqF8nk34Mwp0Fz
87keHY9sGLGaK6IpwlOd4SmnwEwicOIdZDiU4p1CHXT3a6q3QKr3zCWyDiKTv/C3PUYBwQFiab2A1l3
PHBd+suofe73xLXJ0xN8jkDhDVRamtOKeC5/fkedsx+wjdyPTJJ8Lu1KxidtXzmfh7UMXzCxalmD/v8
SNoUmqo39wocw66q9n8OoVIKIXTEIWheMTflJNIObXvIx6+w0nHFht03nj+JaYwT/+De1HylHV1+Js3
HvChnidX9Dcmo0rUH7lywN/sm/9lj3sUiukzdIazmlUBrjLvAo4c7TcWGXXNZArw6J8aERqVaFRc44K
HSeRqzfs+nmNrpzm5ujLW04y4GtJTeatTOpwjLPcfp74HPVuVWoEUepRHi+LouVQk0LbvIomB0Bx6Gq
cv+CDRGiIu//Ak4aU2KtMsH4LwARxIfb5mKAXTe8FTszaPHOu6Pv4RWdgQOvyMuNd7HY4m+oymZMPwh
CSk8FpKkyQjBlBbR9MOuTegP26rU9lHkoFuRUCYvzXnSJ9cMbOBCDHMNfNMAsCnxu3SbiDjSmXsyBy1
TOAui0Maauh5VERY2Ajz8szgMm5vd3SndZwQiOi0C9gSkReh7lTYt8YB56jlI/dnVrMhBNHml2ltY8M
odV+MwCN4S+6+9ffhowMDb0YAXREOso2QbQa3o885XSur2vimpeva8bR9FzvjzUTDLM1DNxhBMyVPJ2
O1wm60Q1g7xWEk9+kDME9ka7UvzWzTVDVWeJpfSk6PG4U5YAWbYaQBm4SEocHOrnABrVBYhrywcBCla
qA5qlD9TfefDIuE6hdaz8L3p0aZb2y0rBjYcz2PIc2rzmoQUgU/xKEjlECltdtdnskfrvHdYRcYyzhv
k4XCBB/fzu+O1rRomJ417xa/mZUlJj5OKSYFW6Taqi2s/+vRfDE7sXQbyLUIAn5bViOYx+Kguc2u+qx
0K9as+pAW0siCwETn0W7c9DZA1xWjBbI4DIGf0XLH2n9gW6xbnpcmfBh6yPSHYQffPQ7fun+wRk7YdJ
n++OANVQ2QK10lsb7a2vL+09Y1gqkIbcRkbDe89LAUUI8QUCV+TvCg9yzNWtoYIgLYo+YM3cos5xWAf
TL3OsIQiKXKq2orv2BjV1Ppaz9D9zz3KZUUdhkKcWCK/19Db7HpyGnngE1O5+bsvY6UwrPUFCii6Gv4
nrWCV1ERgOu97nbtA9OQLskJL7hCM9jdc3Kak/dxeKdBmebujFUvRe+uH13LfCdx29e5tffZd0P/K2z
uMkUaWHNlqXYoBfzk3WqXWvNOV+6FVjkwvGx5RN0Rn5km9Ddo9kAdg8GU4JZqv+YugFHzPRKCOLtjxw
4TcwEje9H0VZpqO4nGNngdy8rrilG3AlIJz3Z8PWUPODedqEGzpNEgwWrvN+xFA5jO+gDH25Ra2q/+j
kjJohJme2zmRYbnrraZKGJkOgev/iiIP5DhyPP18GeqfvJN3XeP6+Aff6dWVt/fNLQAufHRbfuZ1EtW
Pmd8hCJMDbexmQKII+sES4PcO8tI5sNF0Fj509yz97dqmHJ2vjrGOJDP4PisPcT/pgYllOfGSh223Q+
2MfQsQ0HtqLBovxhdOLjZ0XlwIlX4yToAGRKM/ifqogWKLEajittCKeRfGaseFbgnqJgoy8bs+trGe+
7C3rHlm1HPK+rywrrMRcqPeR2gR9NrLaN6I3WlV4j6mahZRWthJR7qBGYUN9Zdl/IqT0ic5k/AKQxsg
IH2hdZGsfDLaKATFggvobdV2aBgozeM9SYklyUXMgdsoa731C/DUn/b1IjKAdW7+peLa2ybElOmFJk+
ss80bLSX5qBLvEoL76F9kKTfu9aaY7hH1jSfPFfKpTfz3i/6S3onYBxxuEhkHNsrVVa6naNQjHRGl8p
i/aCxzjCvk5kuPevI+iNWipxeKzza5E63A60+4G3BEke5D4QfHSkEzBaXqTcrPT1LqCN5FG5r1QRUJX
GgJI2+P7DkpriE+g0UcdPcSYlIl5BjERCVzz4xHdIj+pyBngGn8ctGWeXuYXuh19GKW4nKzIZdym8g4
o2OF1Du9Aar1fKjImGXRRRiUJIgOW6q6s+SWGW2gB+oVdmKs0eOAvqSwMAbc0CjFrwOcV4kRPtYMKcM
bJIGc1gkkz6vY1nOyDwPXgpDVjMthYhX6sutpZ6Z0wbA4sRNq/3NHsEo5B2hEVrIhrFew/hekVieS9+
b60sWXDu9lGJWeHqTXhigfaHC97fhubO1qeq6maSooATzWMrRUJI4tHRGD5yVmkoXYiXmPBwjz+Kqjq
QXJcFNygP653K2PUokdwYAYYkiuPDzKrxwHCLECECVVJ4abRO4kLo4pgtB3I28vgd/2yl3GCNvmFIKe
thOl8ZZ9P/Uor3WJ2rSSJ7cI+1UEdmWyE2IjLTDM5qkz52no//H1W+BJcCg3vhYMSJa1jvIfI03KtKh
1OpCuUHtfPrsH/CvNWkErt9eB6IX0I2qloAMSZapme90EllqGlYHpBUuZMlibPDrd7LTXqAA52ULKsB
b+KvbVV2O8TCQjG6nBAOC5gd9geJD3ci+oKeUrJzN7e6NXJAU0BGmH0s/NEYSMqtGgEcWDCl6+YRveL
DbDbNpZjN3eaXyNetpI1tYI8FpNJGHN0Fo74Wjoy03AW1BPQhE1nXPEygpsEZ5vY3BJdu1Tagm/w/Ka
GQRGI8taJfKZWTue6YlpslmOENHtod2JI+egJT7bNugPou7Ev9Adf2JGh9Mt1hQoc2pFJqY3GTGoP/s
y8A//HnfTVr0muQgtxIIoYJelriAjt0W33r9Fiq1LnBcYJLeKmqNbfscHHJoQcaCaY3NYrArcnZxS5V
jL+SB0CmzGSA7cjN4SjHiAMf/uDg5zrD2LzwhdgB5a2v2mGW1jQQ0IQeawX4+n7F5WkytlGTAkEtts/
bgBr6RDelRpM4cRYnGmOnE1aIK86azDFz0hBkUkv7lsu6q+ktFI7bAIZNboWfbHA30BLwV2EEr305l3
oPurx3ahYIlsSKF4U4F86ONmKy3QgIWJOPPYy4Tj81JfQjXn71i+pygc6zVSkE8cjyXu73FBjMFLmBn
HjxhuS90Wfimrvnczl4imY99sC6vSNTT/wRiO9uIq0JzVLgpmM4S4lr3UI7AOFX8ne/u9Uo8msJxUWt
WBFejYthAwy17gAJyFN/WyI8t/ujyhExEIBxxo2A9tJIYEUusybacbgzhJCFpezYk98OjBtNHxywout
hG1Y2ULTBdFlHtXIgHHKZNBk4QobGuYq4UqzkiLy62CsUGNOUXhU8O/tk03SVcSnmkoH4SufreEf1wp
b476m+KN9uTG0ne87OybMmSPmA6h6VZcRmh9Q7ji2YexAVPQi11WzASVLoum5/y0T/YQY12bKAb8uIY
2XlQgJJ9zdLn/R+YT6cFgmlDlu/ufwEq4sU8JFhIwigDF+v4sd1UGfiXdcxXcomW5nF6i/GEVpBpI6n
BxMOP1tfKcNd3peho4nmoaXkgg2MatAOWP4URuwgBoZ/lnHg/n8Pwgaf5yYshlhPfLuJVZL0xX3oULS
b5pQlQecD/lpDXBxwjd3gjBrCAWCnS2F+i+p7x1KeIbSPx89Hx+oLyUPx26bdKAcHcY1+WUDS0HtU2W
SIforPz6RhQ7wW4neGwVlGiCDVcxB3TS8Z9mqYvBKwxbRsC9HdhUYO30QvnZy86Eg6uWtb8rxfyJgyZ
ivVosA/gEH8ZFb3Ig+TCwY3HLJqJYKqn4PugZ9CF6R3SZ1zbsctmYIcHONrL2m21XFwQXaRJSVFzDbc
f0wBXLBh23bWvwMSzROw3eiBJ4JKIodzASxrijTUAdz1HVX0tC+M+g5UDep8idrus6welOChU4opViH
cNkFeyhb3ahltEPl7fZU8cyqxXMHkiI6M0uhjlQaSmZrxm9YUVAmEtoFVi0+CZ0OpwlptHi6HVbNqrQ
ab7/awSTZL6REYPaV6Q8JbTKJSsNmzHirNzdUluzB2kC7jt9VRpSop7djdc4dvxIsMEC2jRe4NVeCc4
3Ic1gyOPTMyX1jQRGNqbASjMqskQjMWMrLFtu4maJV57n9UDpSsrxec2b51mZa/5nxGTd7eDe17pTfV
gnE1ZZRwnfK5+InBl7zF218paHqoeJ8W2X8vrlUBXVMttwUxpgvuzDQp5NzcE3fgUF77Q5ry+Yt1MNx
TVFHXup9qixvOPy6usuA6MsUgO5T5S9HH1FHr6wf7xv+pcrF3+KEvjPCqQY85VxwMenFS/yXRZe//yq
rUHqsmePLotwlTYhDNS8pprpyuddabZTRtC9tCFPwb+mtMRfokpcsHHQcaq0L4dIFjMHDVfHxaG2yPY
fzhl8qyR+V4Dadfmbdl99//HZlzj7IF+Dp69VZSJt/iGLwbRyKgGy2fc2AwpyB7Yv4vgiuAhaQHc+mq
upLK4MDSgna+bQ+ahvd1ivflBEH6uchLzbTdHmJcBLtdxxeE7aXFJUkWjUFjJPzlNsbx3gMpMieNiUz
qciIBDXPV8NUrY2JgQWNwSa0DNJiBYoGUct7xd0E432+MsNdnqTzq3fNFkolQn7aFKnHzvUa6j0HCaE
d00zsLJLBi7WfwYsbDENRcGM3J6/5FhJOIjP5uokF746TKwsDXJbh9AkGUrM74jm+Q+rfswejmUTbQb
8e5s7nIFIOVMlsXnfE3al9r8cPz7Dc0qYGXaLtxKgiiA/ZeVp0FICR55H5D1rVNolp63tuRBT2UcvX2
6+lsjGZ8gXzeblHiy+UEGCcuX+L5HZz0rE8B30T0EQAlRlO2IB1K9Y+myHza6xjhe276YiUPfOW1w7E
m8GvVsHpXvVMFf6PvYlQc68Az8gTIh3d6BqZPhewhc054s/dT2K934QwWFDC9UEsyesAeWqECEX4+Zm
E48gH4Ue/EQ4zOntjkJoW7+E/NKt1J1DN6ykGHoy50UD6noyJ5dYkMPCSkKLOnBwjvqxGkYUYVuWnSF
knpQEvAwc//90yp7O24mwYf/yhxnfoIf/Rz/KcRyPYe6R+2eDL01IYDhQGCOalRyclVXTy+ACvocDnE
OR3c5LXVx0DcXMxEZTToDtD/a0DJUt31Lv7wJa6CAKjnNBpeoVyj3FoOIwWGAAOdVVOW5WHdQrhPfCz
nNo3v4UHNERlXvCTuTPvj4CY24NugIdouN9Kno5zTizFYdJ0IOnIjE218XU5lfstzXTVZ/q6Q4k35oA
Qp8eiCt25HgbZtZnhJ6BE1UkDE6ij3LrvhXQq9gs97vEMLBhN6jAS12qcltogyX7VRFy4csXa+OFx/K
Zwpm3+JWa+cKkIU7SZaPL44YNrrLVixj01EWkf2pYkDtrTfXRjBSN6i/PVId0/vYskRZ0v8XS3W/OcA
f+sjVa01lAnyCSP0PkCq+AIZh6dUQOWcg3uubZ9DEY5WmAZgM+XW2pXLrIeVZXKzZjLA1KbAghga9i+
prYgJ+lWy5yhEhe5+HT9+hohH8ErQ0ZhIxkLM7cp+z31occLkS/cqN34X1FT0b0zQP2JBp82yXk7P+H
oKUCUZatCNY/uzqO64r5p7ZcoOhVxFwyA1NESEyIw5aLRbo0P6Va8rX+wJ80Wx4wSah5qMSkV6Tlt7f
dUboKh37NFDgtdbeNNYekcVtbbMezHNfZjxX+QiJ3SsjXDkKnHm1082qBxbmweA1TUEIEe9ovlDv8Mx
Xg3qekEOd7qUUUye2gpYq9PorAHoNl30Tm7kU6mNVsiMCSgsw6YO/uwi8qrKPjLHU5Io6W9HQfTWWah
B6bMmP3tc1XFq5ZI3gGaVTsTPGT0pQLcA1fc1riup4SSmBecqNhAcUcSXqcmG1cF8O7aneLvYxxQh4q
HinKVcKULz2iYiSQ3Ge+Mw3eTNDj2PIV2GgZRrGwg5VZsCWo8yxg3LetGAfQyjn2fCXLyQf6/jIkkl5
8P9eafP8Z8QYidCKgdkLuCZ7NiRoKxxM2hSmoRoUphwQcNyiJ/nB08GISE1V89SpMqsUVvjq7vTxL/9
bm3rAmXt/9okHbQ4/cg5YLlsXA6VrT+trrOw6xis8x0dMbpMqB50eS3kEuAbksuN0wCgnrziJ1t1qSu
Cxt0mEgicrQwPS/+ICeV4yi+iEZGO8p8gFJngea71CuCoPGCU6rYGC47WVFGpIZlYP/xXo91AoLJQU5
wdrIDA1pXYtHWcidgTy8plSXGzDdFNPH95HTBupO6es5Lbtm3mfrVcnHgqffWsDQs8dc7vMaWL/BbMn
caAF1i9BYPlK4yxJJ+UkhdyIxSEiMGKIhT79qyfg10Z7yJXGBpgsHV2m3l5SiB4nPR22lOFZKrCThNg
WdgkHiwrctgja1wDPpKFAPYbjR+q7NR+MhteiW1FnUBjArSUGPUuGzFr9JJSgPBKoiZRAxLIAZ6HdZE
NI/fpBx931/8v6JHiWHo8SSUBuePNyWHw+RfV29AgXIZLUDoehEMryhb2c/Rd8dSaqIADLkHQlI+t6c
+VTZW1neiyJ6lXRysFg/FIYz1j57enaK3HcfpkPKz9nFwT7eX+0rkS8jP38FmC/Zd+kz/w5cHXTXH57
V2J06wsHPzZr0mKQXrc1bnKIWZOoXh2bAFQbRvcJv27rO1TFgWnwLvSIpqrlvVBuFIme1vMHyd2AWJN
55JS1c7y9HlHv4J6CgbYwRYIaHK1trAlbKh+P90fBF31ySoCGY7v1r01UHLbmPlviDl7vWVySBNuQk7
kqu+nUdZkoa4dd5EwFjjvlbN1DFLeW7GszapPHSUBQSJHUZkSaZv6UHyOq6l2/vz7WdykyI/mMzS8j8
AMYIpWfWS7oXuCR5wAEcTxwnt7jEcX5XVG7YZPmZR+RGShtbR+qHeeyBMkjzd2N5UKp3rNhXTqsYqiZ
qruOsMD42AUX95fa11WGRYiKOQoMaa3DxftFoxlwiezq6hXcquDu6V3h/xnz0P4yC+IezEOIYZWqhkc
kmUNJZVMbKIrB5UJp3qy9KWEbWHSXdL3Zn8Z4BpnMkPyvxx1cSFARpd/Svfpfk6AMZ74R9zDvM8JH7O
6OG4UU2ycv2J4PL6/LMMj3XSiRftHjVOTj1i9d+LM49wIUSWKxEnJiZ20OkzjR+ipbf+hmZV9pzuclv
yO70vf5W/w0jvI9lVeUrdRq087WWbUVAZri/Q9BMqGvPmj9LTPekXJIcvYFXzq5zs8PzhsBygbn0wzT
4eMPMzqd+q6NeZTdJXDxW0Pct9yXoVtNaobNKplWIJV+DJ+aMgRtiPnpYPBQgjsBx1MJ/k/ZcQQlTlq
IhYo0hW+F4a/q4o70wl2ra/Gkuy0Mn/r09AuEOGnX9Ad461XdXi3/uIcY21xJ90UYH6GoQEjkZdtOlW
zFEXnLFjo9oVoKGPEJKQB6mhub5Z6HKB6MJMFqT063OZ1TNlFWoyH+afeY8lYdgUExNc4EBCOQx20cw
M163IQaCu4ZLoYhBWJ0hg/gyS38wnyyt1xVUpwk/CSwbeh8yHjhOBfMBhjrf9wCcVEFDWYWCTv3ktn0
9GGZiLk/6TAkrMgYHbgCUFjIu8b7bBS8RVj2iH4KufIzrNNCKcC9UvpoDNeryKMwq1P0H/VUI+OE8R3
1T+XDgK3Hl/FE/DzKd9UXmeNj5J5GeCiF7VJ3ersH0Ypdm1+fgMnCtEZjN6e+rj6nPuBsK0EkD9P3O0
LHk0Od0OaGxikTQwiJZGjjdJHpmekd10/8pkGJldjAMdR/Dt5N+TtDRT6GwdI2L/4QCBPE62dZw3idg
0K74fdwGUiLCfZ66XObjGS2KuQAzwrs1BKWzusxFVGZHl6YSrE17YufLx6vRN71GEfozBVxBjoFUW/1
B9UqXfRcEFbjmEaZRZSoEsO/yO1TBhi/PBefy4eunmTUg4USIlwyeEqFkhFSjoXQHkd3ExUt87KE0ze
OPwgcu+fgWxzc5DGodWx0QGaNZQOGIoVULuv82rnMdda+DtzLZqKXSbOoBdtpV3X0Qhxb0sOboZ9sGN
+gJck5dNZEAxT0a0dj1ZVo6Esg+b9250oVIco4e32Z02fyJbm8qpEHBpc1ItO//SY4Tp5Ht+4fpizzF
x4eacqW0/Z572OT0gq9F5+FtsVSbgTF28DRDqOnNggc2+TUJSCuLJ1vgBoaDDEKXw+9RrQLEsehaeBR
fYnk4DSbIEKH9vC6xfzsxMpEhgoX7gvsicMhRKbzdHz1LZJnHpMpU258l5eJ6gRf0pU0vEtm2NhT973
jAe8omvwhMdsVWmq5u00Z3qGhPUPVMff/dzF6InEBiSPde7rF2vS6xpMnan0Myui2p4oZRiL/6y0HLq
0aXccWbHnoFI/OgfpFeZfueg4toRwKTs5vdOH7uyTPNNWri4hM6AyUfI2iEfok6Ll1GmJdlluWMOThS
jJqhjnRr/GwThb1IrNYu8dubljznjGO9GRWEmcq0Fq056RYXnIFncPL06a3jQnSyqNYVutHtV5zzCtt
v8r3zWchf/n5zVQMXtJImVt0uEEu10yiyZK6JJMZyW0ofNmj5HJ8dmait/CcL0rlzyTF461xm4K86j0
F6pCP+CzAx3woa3pCz1/jGHUebR4kffR1qRTISa6UwWxihkWfpGExP/gJI3uGDV4MR5OoHH/Hzl2v9n
i3KXdOUf5ikRD3JiikAUlevCjI0jtbS6b6UWiXltZvP5nFtOSc5drvfr+x05QLHLLkxUoULkZQBNGK4
zfmICxplnPf1UFvGudQFQHgcsmyrv39J/Yuel7XEIcq+7RMBLVTKEDmP0IK2ZHWpfgGsL/Mj3rswXCg
o5iO8idttLk0CbTXp2Mn8j3AnxtmRQEt5Veww82IUZIoER8HLl/LJNjeexyWnMssQPim995hTFMUGfO
/3mWMR27zeIGXcHW9coB+hAdUXQMJFCnBl83IriXwyuVsihZOnNaqLv8yrLvXzeGXJh1+ipCshzz/4D
oaKRvR2NS+OGGuLoq00DUM/PZxxB6Yjzye/oidP6ov4JGlVBac75Lix/sKVbgrqgSqL3dv5IHKDJDvD
VdCojxAu9MEtUD7M/UclFw0jilDvXqelRwlMo5JXSZoXyn5GW0e+SxTkeHpnKqw3o56XwgU8hGvQdfO
owsN3mh4THzB0UrE33Zj40DttzaDljdN7w8HwSWOm9VMC5r793cz85eYcZpk/Qs8vLwlfQzU/Db7Cl2
9hrSHY1ERg3V6giwvgGUPeKHk7hucxJFhjsbs4spCjHC5Mj/2/LhiU5o69jgZxw5zYBbzBtwecXq2U1
HRhCr98eR1CvR8hX+uDAtIQHZDrkx3mwrPXtfxw/EmxtfMIH6q+xbSMhDdNUuH7VhljtMCHaBx8pLOO
cZfKzj2seqHUaoWyZYpsKQ1BzIppAEJSlyPkKLNJzhSS7YF5DAreNBKJSfO70z6J4/PCMGc/vySzG9g
uW8Sx+1tAyVVtngxCRBw9Sta+okAxQk1fy+K95JaoXPZ6K6SzV0fefeHmBLJ3DtMU42C0b2yOIYpZ3V
MfzuALGB/UoG0979RnpYF9dJ6c4TcNiVAgrQg4ezjzxEmG0r289lUtwKY4xUeWYj7IF/qh3pl3eezvS
ajdDznbw4LpjS1GD4fTAz8DDfK6ksmY2kEgYmS0NxM8g7BNiB5wKu2JmEBV2HVwyu41H3pIEPOizwTf
UXjd5E5A3ax4Fsex/wVpfsSgoAovkDXyLw+nZSr0K2nIaw/LlmvvXF4Aculf2ogeHPipLqoEFn8s0eI
CCLgP85V9tVf4WFJ9dnOMpmQyZwYbd72HBxWECyh5W+zPNjcfNgxq/BVeo+HYFEQ+4wtMr46W64OGqn
K/hMaz75cMgxNcXqf7wxZDtrdpdok/qlyAjrF+oCmpNgzWLa6M54FiERdOMUemYImwnenxH0NJIC9tD
JUYh54xhtzGRC7Q9oUrCaAACY6KyPDeJNCb+wW+zzMffIpjJT4fORaDyXKfUQBpLCTA40DMjjQIuMx1
KMLOo4ww8Cz+dn2/FTCagc+lfJBbxCmzkrRWm1qjBZ7qpp+aT21S9hJe1IMvCvOWj59/Q2F+n7jAmfg
LMLyLmkFwJGwctqBtD9o7glyWUcX9VBqHqEdVrrhw55JZKyqvIf1c3LErzbicy17F6qUYZZRkaJR0EK
9Ihe3JTprRRWDOqKh68PqwbcVJAShp4FuIx4cG+TmqoTOcxrIkAdIA3Sat3Cu+WSNQ5FdyxlTZ2Y8DQ
Vxeba+XvOEkJqTuQFnMBnnFxuo57IZ2XRmB76cLQDLcax63FL8z8GNH9GrTDPryLeq+cxXYE7l8p0ez
u3ziQHch+x7lqqXCoWQdYyFhih9WR3OIbBQOBFJSta9/Y/uYzxP45oDLqDn1Z5fW0vQ9O6kF53ZJg1C
EQzvwYuY/PFEh490fcG3yvNIRGhRZyHFINGMIAt8Lo8aw3Ntc4nM6OBhUDe7Mcpr3lWxN408fjMtgyL
hVoCvVQCGTtCESknz4JoHTuEcMMhC4HOZQO4kW+ZQKXhDdEv6ZlQr3gxsmdoKGditdSYXHIZniyolBY
cSV5WnVNU0h/Aj+VZUhBou0pEX9QcFdyx4c8i5d+GOOl2s5+4vAd+Z96+OGd/R0gerhIUE4qS+t8E3s
SHbmcf1o62G+6YLvY1+3xOgsv5oG7IOJ/LiZNwBW1knswYWUsgiISCgSml86c47cibjvlkYp3O6NnPg
e416f9szyIyqB63k/VB1YB8k0WK/QFcxzSGXNM6NevTyR3tZHsqZkQA9bSLzdj4LxndZwOjKOkD6OnL
XiebtqFEGjEAx0f3XOGC1qI9eP0d0ClbiiPgYQqO9ipR+sSdWKx89Ex2i5FtISbJMG5ZxC/f0RfyQ8d
uwYIuOPq9TgvosH7PWtX6KRocPcOGo+it4QQAEYnGqAAxMX0y5GO/a6weLYo76kXSw9ehQiugyhWYYA
XLJNb8xMhy6GSxAHgKMhBlTrcpEPHqswhBtXEd3n9pRICL102VVrPTUNP/9o/waJgS/ZBd5q4/2GQnL
sQaMxdizxMDY5NX5ADq3ZSrqADbaruBte00FlbvUneNI19smQ0P31mgEOW+jsfUiqRt+by6/xT6cB5+
tu3JykrDGZYvnMTMV/4STIT9+jKKQBvvpQh7V6QehLmDsbMxW0CvC4gvFxSEim7LwYo5JRe3KL3ozo3
t0wqlGV5EW0fiKHW9e3N9qZpCZrkRg6JLD1Bu8myr/DuYaVmxiZvNF89WR+a3fuLnNQqaifAC/gC8om
vT752k9bOHYz6Dr/JICQmMmmzI7tpDK2avFtjJn+br0HsxUp5yPg6AALHPov//kOhosCnbmxxiDg03W
4djQJvdmy1b7TNcjwqpFZb7sbKn7cdJxGlELgjMjtX2YKa4i55Q1rzMauqrU3ofpE+7EvKUhbsKzS2F
zf1ZXAWJ4P7V6mkLw1gHhmTHR3hhzpV0HXaDnpokmtCVDJQa+99KFShLIOeci7xj9t3Q1t2BQVamHN1
FSelJlhFcxS5XNoFZiUfnbDrmKBSluULS8o4izpi3s5neRRh9YGpfCrvyadE9ciMZuZY91r4MvoUsun
hU/0TJqClPBCAcj2B5gkebMG4FllaMUeDVTU0o4t3RB11JbEYe/uDmAE5mLpD683OSy2UUE2ylXNH9Q
UaTggaH9hBM2aC32oL8vnpWjduQ532Ge/aacr1i3VS11YOcNDuv+XN/yaSqXcun0qE/3AX++K6Ogoap
e3EfmDW1MKx3tzbjgvVf6w9T8C0Wiud0fl+P6lqoomMfPUAqW1IpA1LmsJxRITYkx7w2AF3EXTJ1F6A
egfYZAwyduNdhfJX280Gyabb8Mf1v2D6R/0v6AzB0sMWrSYBA/H8dafVS0nTE5t0BZj3lC15qVn7sWP
NoMD22dO6AfpqADk2UKuOrD0cPKa/tnpOd0XH0KGeFzjTCUI08pQ0AkljUlzxtTQa+uC9gDh4M8a1ul
Vrn7clquKQMyc5zXGRsIU0falfMUnhKcWFZ1okXbZRe5cjv5kL9mzJxRWcO6APLgtw1z1I/x/3exS8c
kty7H5/cOT7eQfqUyQbJwyeHwvmOv2lmBC7Jz2fTnRlwXQ34yfwGywJu3BQX+JX5BbXAVuCSEZoF5B3
D4l5Fu5zQmF1KK+KlfzQ/qyjhSqSM4DRAH8CTSwjPiQCnYfoi+dt2MWuCLTahAAi69PX8y3yEdUncXY
pcIMgBZ7URyNLse7+WL2ZU4jTuCKp+hk7AxaT3bKwzqttBjTGwt0nVVl6DV+6isozycrJn++4AIPNbm
KtcNss48hecDlOkXkcygNv0IzIO38YPUEb4BrI9nM9KGF7pXy45iTB2w1g7FjgHDxJ7Ukw1NhlZ3yiQ
4bILbGmroHWVNTyQt6JsUPVTFgWV7ploSHLXugaPWKehFwZoLIvmwUo0zaNm0zbwGqQAuywk8nMaNRm
7loicQO8nw9hrPtNsXw2OFmkPLkJwOSwd3cZTQcKpHjEuOJxSgkEi8e+SrwdI4DGZUxZ/xjQy58GT6d
EAq59uH7nITvQi9QssiBJpDfgUeGEsiAug3SPvA1iqDDiEElv5MfNn2o00pYp3brhlT89orPGVz4TiN
NVtUkpUYVivQKDUTTA3PSuBYsSn4ctCd352wFPps95oE9k80eC1Io0Tl6mVCFNP2j59AsUYS3eGYSxa
TyLYWCRpgBOD5wl7yTThdE4QVWjIRTzlqAXSfEPlIY73lQmQ0AhBFmdTX2d7KJbNJdBBR2LgK0smiBe
a6Jb7NGyl/XNp6vjf7x8uw+8Co8JYTIeUaZDZel0vvXQ7dLVUsb7nIaSESAzyklmS/i7V8SafJy6RDH
bEsjmKN+K3BWSKr3Esc/sr5k9wf3Ud2Yd5g4+aQJSC6ZLkxmBci3/9VZpsOZrr7ZYXNPYhYTo+jvJaL
CWuMhY3Duzg1iKjKml7llonqu+ZPKp4tIe7NtUpvMHxU2EO1Hb1dcmkiTvq9ZvmLXgxWpdzBGtOxjqk
OuGfQSikg22ZjdVYP2mtiP1w2nwa0jdprCnPNIQ4FYV8WZRvAfH4bzWmmLHokSuz6CE7CHUQOQS/uZf
3S5Pgc/r+4fDh+b0ArcBoForPnPWbZPjsj2ZkKXGrXHzxNUTiQ/Htzyu8oX93Ni86aG7XHt4p0Y7UfK
u78cHfRZoRwWThIYRrQWw3LMKNCNyQZl4GpCOadrhyqpLqQ89P3a+2cXZQy9ZzCS50NWvZxkKrmrvdy
Ih2V3FDZbpN1HZEAfJe3Pi6wg7sfJQg2zPOOW2BbQcd/6D1T3eLaH8PcEAA2ozRb0Ljw2lDBSlyA5y+
9U50CBVSccMIx1Q1fZwpUnOKgBv7bJajfiRSp/QaMm0lIYPFbpXgJX7T8TJ9Fz9QSJrDwRWcNNrSVVJ
TxeIeKKuTyt5VAUom0pUYxchQ1lHZ2sAPxnS+q5xhtfWVbvukm0qqWHz6SzChQ2MbO591qBxjMok2Vs
xPP285oBp8GCFGK2p0VENEcd9mldPxJpGLgZCj0bPVuNX9EC0WfYsYYNwVQH6a4NGABFHPUBuWV6M02
noS6pr+GzjD2bgj/atDXPp05Fmdz1IIAmMv3hfHHE7qGOsDYVTpHCdU71RSHCuEP0y3592TG2OerCu1
jkn5Pl8IzArL10dZQdumuRuWv7uLZnOsGx6IPZfBvGql1EPYdwWaPNxF7nVVPYtHC/f91CLD55FuTvz
mjUcGgQL/SxjDmCpZ4Otehl+PhDzvAU63fsrdWjtMZUGrPCplFZifr7aTYRCnlSMHfUGT0oq5vYiHjH
axixVMeb2PCJblABptP/XmIZ7oGj8rx4Dqj/Q2FJdIobUDr2TRTRTPWy4S2tmHwMbQ1JjRURaUBadjR
/yfKF53Um8lDLucqq2gnYyP7gQpzT7qYXkI4Wdyn4mf5typR+ovLG0BbLEuAEYIUGQXhmqR/xfaxQhL
Eh4VDATGeGvEbBwAN8Ptk0QkScEirh+RDJwqqiJwoAfzHr8WvSlvB0k3FHmjRVfuYUtn2q/TY2trwhi
X6SdBfiSRsWqhP6Vw2NbuVQkkRx4gX939DBpRiRiCs2QffJ2K8+trYXAxfINBkv2xun++KtuDJSVhLM
0R0TCraRh5MSY1UzVmcocfULOw9ciIvvvjeprIYosh2eLPzOyO99Pv4aoVFhWCdmXTKB+/PwX39fNXN
nDLixM/AKk4ywInNOV43CKp/EjHCu0YD/dV6Q51DN6El7PQPow5/xBcH0STtDGJrrKlQXyJRtD3avC0
uwEWeh932oIzvvA1gvVcS1SbZEdf19jHn0RkOuCO+hbPaG0byE/u4KEohUjtslVyOyCEMW4zFxu8kSJ
eyVzA29upzmq4UGY6YQ4Dv35vMXT+73XqXTVQ4xH9fjYlnQwDrVUvA9WfgC9GutA/FLwB1VdaDilTg9
J8aNIrd+HoFGSfT9/QsRycJ8hAeCooFdDa1XpI6JTVFCgLbMDXvZ2JY4mpsOvoE6UoAFTbaCRrNPXWX
ax3i9Lz+e1qUZcl2URM7IpY1NarGwFpFdSwPzx7Ww0DKn28kmcKwyQGSMZp0mGhjnA5X6EiMw2BFqXi
4OXisz0B8svlExeQ7jSwFFdmg3Wo/eEeV+dzZzmMkIyILSSZ/VwreL/dJ1nthbRapNw/GD0fqZqsj8O
rEzI/gh1hWgEx7Spf3dETaVuyEnuzhAoEMQV4S9SyV7lPJHIQWxp87DuLkds699LxJcmHZEOcAid0D0
bgPq6aBFb5LbaJJdhjzt+ATmN6JMOdY8lJOrRMOso9pqrx/IKQqsp7XKLnIwvwUNUST3CqKUhAacp6l
XpBKCg2/Dtkc4lrUGyhrUMLN1wYGJfKFzkcQI0MCJ31pxr9Y3CMTD6sz2YDIQC7jMnlP8LiFS7B0VVs
r7r+3d0izjsAs044cX/Q83Ep7QkaNxHSYTZ7gdSQw4kvHCXhYLr0ni4a9hnwhtgysh8oxzNCluCsQKk
VuWIiTJHBJsnEUPY2GW0V0QV1cLJvGgAMgoCZjJiqaVYdsIWc8CM+5jEuX+CjTOcEVuwlk8*
:::
|
<?php
namespace App\Controllers;
use App\Models\ConsModel;
use App\Models\AccountModel;
use App\Models\DepModel;
use CodeIgniter\Controller;
use Google_Service_Oauth2;
class Home extends BaseController
{
protected $loginModel;
protected $prodiModel;
protected $konsModel;
protected $googleAPI;
public $level = array(
1 => 'Super Admin',
2 => 'Kepala Prodi',
3 => 'Dosen Pembimbing',
4 => 'Mahasiswa'
);
public function __construct()
{
require_once APPPATH. 'Libraries/vendor/autoload.php';
session();
$this->loginModel = new AccountModel();
$this->depModel = new DepModel();
$this->consModel = new ConsModel();
$this->googleAPI = new \Google_Client();
$this->googleAPI->setClientId('774409238361-8hv321i1e7d8ptib2md8osh3epvrsbnl.apps.googleusercontent.com');
$this->googleAPI->setClientSecret('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');
$this->googleAPI->setRedirectUri('http://localhost/THICK/public/index.php/login');
$this->googleAPI->addScope('email');
$this->googleAPI->addScope('profile');
}
public function index()
{
switch (session()->get('level')) {
case 1:
return redirect()->to('/admin');
break;
// case 2:
// return redirect()->to('/kaprodi');
// break;
case 3:
return redirect()->to('/dosen');
break;
case 4:
return redirect()->to('/mahasiswa');
break;
default:
return redirect()->to('/login');
}
}
public function keluar()
{
session()->destroy();
return redirect()->to('/login');
}
public function register()
{
$this->loginModel->insert([
'id_account' => $this->request->getVar('id_account'),
'id_concentration' => $this->request->getVar('concentration'),
'name' => $this->request->getVar('name'),
'competence' => $this->request->getVar('competence'),
'email' => $this->request->getVar('email'),
'contact' => $this->request->getVar('contact'),
'picture' => $this->request->getVar('picture'),
'level' => $this->request->getVar('level')
]);
session()->set([
'id' => $this->request->getVar('id_account'),
'name' => $this->request->getVar('name'),
'level' => $this->request->getVar('level')
]);
return redirect()->to('/');
}
public function registration()
{
$account = session()->getFlashdata('account');
// cek data apakah sudah ada?
$cek = $this->loginModel->cekEmail($account['email']);
if($cek == 0)
{
if($account['hd'] == 'ukdc.ac.id')
{
$level = 3;
}else{
$level = 4;
}
$data = [
'title' => 'Daftar',
'dep' => $this->depModel->getProdi(),
'cons' => $this->consModel->getKons(),
'akun' => $account,
'level' => $level
];
return view('home/register',$data);
}else{
$raw = $this->loginModel->getEmail($account['email']);
session()->set([
'id' => $raw['id_account'],
'name' => $raw['name'],
'level' => $raw['level']
]);
return redirect()->to('/');
}
}
public function login()
{
$code = $this->request->getVar('code');
if (isset($code))
{
// $this->loginGoogle($code);
$account = $this->loginGoogle($code);
session()->setFlashdata('account', $account);
return redirect()->to('/registration');
}else{
$data = [
'title' => 'Login',
'google' => $this->googleAPI->createAuthUrl()
];
return view('home/login',$data);
}
}
public function loginGoogle($code)
{
$token = $this->googleAPI->fetchAccessTokenWithAuthCode($code);
if(!isset($token['error'])){
$this->googleAPI->setAccessToken($token['access_token']);
session()->set('AccessToken', $token['access_token']);
$googleService = new \Google_Service_Oauth2($this->googleAPI);
$data = $googleService->userinfo->get();
$akun = array(
'id' => $data['id'],
'email' => $data['email'],
'picture' => $data['picture'],
'hd' => $data['hd']
);
return $akun;
// echo "<pre>"; print_r($data);die;
}else{
session()->set('pesan', 'Login google bermasalah');
return redirect()->to('home/login');
}
}
}
|
import {NextApiRequest, NextApiResponse} from "next";
import {Config} from "../../../../config";
import fetcher from "../../../../utils/fetcher";
import firebaseDB from "../../../../init/firebase";
export type ChartData = {
data: {
stockValues: Array<number[]>
}
}
export default async (req: NextApiRequest, res: NextApiResponse<ChartData>) => {
try {
const {symbol: symbolQuery} = req.query
const symbol = String(symbolQuery)
if (!symbol || symbol === 'undefined') {
throw Error('Symbol required')
}
const stockValues = await queryApiMarketStockValues(symbol)
return res.status(200).json({data: {stockValues}})
} catch (e) {
// FIXME: For demo purpose only
console.error(e)
const stockValues = getMockStockValues()
return res.status(200).json({data: {stockValues}})
}
}
function getMockStockValues() {
const stockValues: Array<number[]> = []
const totalYears = 2
const finalYear = 2021
let initialValue = 200
const maxDiff = 10
for (let i = 0; i < totalYears; i++) {
const year = finalYear - i
for (let month = 11; month >= 0; month--) {
for (let day = 28; day > 0; day--) {
initialValue += (Math.random() - 0.5) * maxDiff
stockValues.push([Date.UTC(year, month, day), initialValue])
}
}
}
return stockValues.reverse()
}
export async function queryApiMarketStockValues(symbol: string): Promise<Array<number[]>> {
// Check if data exists in Firebase
const marketRef = firebaseDB.collection('markets').doc(symbol);
const marketDoc = await marketRef.get();
// Check info from cache
let stockValues = marketDoc.exists && marketDoc.data() && marketDoc.data()!.stockValues
if (!stockValues) {
// Get info from 3rd party
const baseApiUrl = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol'
const timeSeriesUrl = `${baseApiUrl}=${symbol}&apikey=${Config.stockApi.alphavantage.key}`
const response = await fetcher(timeSeriesUrl)
if (!response || !response['Time Series (Daily)']) {
return []
}
const daysWithValues = response['Time Series (Daily)']
const daysKeys = Object.keys(daysWithValues) as any
const stockValues = daysKeys
.map((dayString: string) => {
const dayValues = dayString.split('-')
const year = Number(dayValues[0])
const month = Number(dayValues[1]) - 1
const day = Number(dayValues[2])
if (isNaN(year) || isNaN(month) || isNaN(day)) {
return null
}
const utcDate = Date.UTC(year, month, day)
return [utcDate, Number(daysWithValues[dayString]['4. close'])]
})
.filter((e: any) => !!e)
.reverse()
// Update Firebase entry
await marketRef.set({stockValues: JSON.stringify(stockValues)}, {merge: true});
} else {
stockValues = JSON.parse(stockValues)
}
return stockValues
}
|
#!/usr/bin/perl -w
# This file was preprocessed, do not edit!
package Debconf::FrontEnd::Kde;
use strict;
use utf8;
use Debconf::Gettext;
use Debconf::Config;
BEGIN {
eval { require QtCore4 };
die "Unable to load QtCore -- is libqtcore4-perl installed?\n" if $@;
eval { require QtGui4 };
die "Unable to load QtGui -- is libqtgui4-perl installed?\n" if $@;
}
use Debconf::FrontEnd::Kde::Wizard;
use Debconf::Log ':all';
use base qw{Debconf::FrontEnd};
use Debconf::Encoding qw(to_Unicode);
our @ARGV_KDE=();
sub init {
my $this=shift;
$this->SUPER::init(@_);
$this->interactive(1);
$this->cancelled(0);
$this->createdelements([]);
$this->dupelements([]);
$this->capb('backup');
$this->need_tty(0);
if (fork) {
wait(); # for child
if ($? != 0) {
die "DISPLAY problem?\n";
}
}
else {
$this->qtapp(Qt::Application(\@ARGV_KDE));
exit(0); # success
}
$this->window_initted(0);
$this->kde_initted(0);
}
sub init_kde {
my $this=shift;
return if $this->kde_initted;
debug frontend => "QTF: initializing app";
$this->qtapp(Qt::Application(\@ARGV_KDE));
$this->kde_initted(1);
}
sub init_window {
my $this=shift;
$this->init_kde();
return if $this->window_initted;
$this->{vbox} = Qt::VBoxLayout;
debug frontend => "QTF: initializing wizard";
$this->win(Debconf::FrontEnd::Kde::Wizard(undef,undef, $this));
debug frontend => "QTF: setting size";
$this->win->resize(620, 430);
my $hostname = `hostname`;
chomp $hostname;
$this->hostname($hostname);
debug frontend => "QTF: setting title";
$this->win->setTitle(to_Unicode(sprintf(gettext("Debconf on %s"), $this->hostname)));
debug frontend => "QTF: initializing main widget";
$this->{toplayout} = Qt::HBoxLayout();
$this->win->setMainFrameLayout($this->toplayout);
$this->win->setTitle(to_Unicode(sprintf(gettext("Debconf on %s"), $this->hostname)));
$this->window_initted(1);
}
sub go {
my $this=shift;
my @elements=@{$this->elements};
$this->init_window;
my $interactive='';
debug frontend => "QTF: -- START ------------------";
foreach my $element (@elements) {
next unless $element->can("create");
$element->create($this->frame);
$interactive=1;
debug frontend => "QTF: ADD: " . $element->question->description;
$this->{vbox}->addWidget($element->top);
}
if ($interactive) {
foreach my $element (@elements) {
next unless $element->top;
debug frontend => "QTF: SHOW: " . $element->question->description;
$element->top->show;
}
my $scroll = Qt::ScrollArea($this->win);
my $widget = Qt::Widget($scroll);
$widget->setLayout($this->{vbox});
$scroll->setWidget($widget);
$this->toplayout->addWidget($scroll);
if ($this->capb_backup) {
$this->win->setBackEnabled(1);
}
else {
$this->win->setBackEnabled(0);
}
$this->win->setNextEnabled(1);
$this->win->show;
debug frontend => "QTF: -- ENTER EVENTLOOP --------";
$this->qtapp->exec;
$this->qtapp->exit;
debug frontend => "QTF: -- LEFT EVENTLOOP --------";
$this->win->destroy();
$this->window_initted(0);
} else {
foreach my $element (@elements) {
$element->show;
}
}
debug frontend => "QTF: -- END --------------------";
if ($this->cancelled) {
exit 1;
}
return '' if $this->goback;
return 1;
}
sub progress_start {
my $this=shift;
$this->init_window;
$this->SUPER::progress_start(@_);
my $element=$this->progress_bar;
$this->{vbox}->addWidget($element->top);
$element->top->show;
my $scroll = Qt::ScrollArea($this->win);
my $widget = Qt::Widget($scroll);
$widget->setLayout($this->{vbox});
$scroll->setWidget($widget);
$this->toplayout->addWidget($scroll);
$this->win->setBackEnabled(0);
$this->win->setNextEnabled(0);
$this->win->show;
$this->qtapp->processEvents;
}
sub progress_set {
my $this=shift;
my $ret=$this->SUPER::progress_set(@_);
$this->qtapp->processEvents;
return $ret;
}
sub progress_info {
my $this=shift;
my $ret=$this->SUPER::progress_info(@_);
$this->qtapp->processEvents;
return $ret;
}
sub progress_stop {
my $this=shift;
my $element=$this->progress_bar;
$this->SUPER::progress_stop(@_);
$this->qtapp->processEvents;
$this->win->setAttribute(Qt::WA_DeleteOnClose());
$this->win->close;
$this->window_initted(0);
if ($this->cancelled) {
exit 1;
}
}
sub shutdown {
my $this = shift;
if ($this->kde_initted) {
if($this->win) {
$this->win->destroy;
}
}
}
1
|
// Copyright (c) 2018, Gregor Riepl <onitake@gmail.com>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package kvl
import (
"time"
)
const (
// filterListAllocation is the default length of the filter list, and
// its growth.
filterListAllocation = 10
)
// AddTimeFilter adds StdTimeKey, containing the current local time as
// a time.Time object.
// If TimeFormat is set, the current time will be formatted according to
// this format.
// If StdTimeKey is already present and a string, it will not be modified.
// If it is present but a time.Time object and TimeFormat is set, it
// will be formatted according to this format.
// If it is present but of a different type, it will be treated like it
// wasn't present.
type AddTimeFilter struct {
TimeFormat string
}
func (filter *AddTimeFilter) Printd(kv map[string]interface{}) {
switch t := kv[StdTimeKey].(type) {
case string:
// pass
case time.Time:
if filter.TimeFormat != "" {
kv[StdTimeKey] = t.Format(filter.TimeFormat)
}
default:
if filter.TimeFormat != "" {
kv[StdTimeKey] = time.Now().Format(filter.TimeFormat)
} else {
kv[StdTimeKey] = time.Now()
}
}
}
// MergeFilter merges a constant dictionary with anything that is being logged.
// Existing values are not replaced.
//
// Note: Go does not actually support constant dictionaries or generic
// value types. Do NOT do deep modifications of values.
type MergeFilter struct {
Dict map[string]interface{}
}
func (filter *MergeFilter) Printd(kv map[string]interface{}) {
for k, v := range filter.Dict {
if _, ok := kv[k]; !ok {
kv[k] = v
}
}
}
// MultiFilter applies a list of filters in sequence, then sends the output
// to a Logger (or another filter).
type MultiFilter struct {
Filters []Filter
Logger Filter
}
func (filter *MultiFilter) Printd(kv map[string]interface{}) {
for _, f := range filter.Filters {
f.Printd(kv)
}
}
// AddFilter appends a filter to the end of the list.
func (filter *MultiFilter) AddFilter(f Filter) {
// grow if necessary
if len(filter.Filters)+1 >= cap(filter.Filters) {
filters := make([]Filter, len(filter.Filters), len(filter.Filters)+filterListAllocation)
copy(filters, filter.Filters)
filter.Filters = filters
}
filter.Filters = append(filter.Filters, f)
}
// ClearFilters clears the filter list.
func (filter *MultiFilter) ClearFilters() {
// allocate with default size
filter.Filters = make([]Filter, 0, filterListAllocation)
}
|
/*eslint-env node, es6*/
/*eslint no-unused-vars:1*/
/*eslint no-console:0*/
module.exports = [{
"id": "announcements",
"position": 2,
"hidden": false,
}, {
"id": "syllabus",
"position": 3,
"hidden": false,
}, {
"id": "modules",
"position": 4,
"hidden": false,
}, {
"id": "grades",
"position": 5,
"hidden": false,
}, {
"id": "people",
"position": 6,
"hidden": false,
}, {
"id": "pages",
"position": 7,
"hidden": true,
}, {
"id": "files",
"position": 8,
"hidden": true,
}, {
"id": "outcomes",
"position": 9,
"hidden": true,
}, {
"id": "assignments",
"position": 10,
"hidden": true,
}, {
"id": "quizzes",
"position": 11,
"hidden": true,
}, {
"id": "discussions",
"position": 12,
"hidden": true,
}, {
"id": "conferences",
"position": 13,
"hidden": true,
}, {
"id": "collaborations",
"position": 14,
"hidden": true,
}, {
"id": "context_external_tool_7",
"position": 15,
"hidden": true,
}, {
"id": "context_external_tool_2",
"position": 16,
"hidden": true,
}, {
"id": "context_external_tool_1",
"position": 17,
"hidden": true,
}, {
"id": "context_external_tool_9",
"position": 18,
"hidden": true,
}, {
"id": "context_external_tool_103",
"position": 19,
"hidden": true,
}, {
"id": "context_external_tool_132",
"position": 20,
"hidden": true,
}];
|
# irc-message-stream
> A tiny Stream interface for [irc-message](https://github.com/expr/irc-message).
## Installation
`npm install irc-message-stream`
## Usage
```JavaScript
var net = require("net"),
MessageStream = require("irc-message-stream");
var messageStream = new MessageStream;
messageStream.on("line", function(line) {
console.log("Got raw line: " + line);
});
messageStream.on("data", function(message) {
console.log("Got parsed message: " + JSON.stringify(message));
});
var freenode_conn = net.connect(6667, "irc.freenode.net");
freenode_conn.pipe(messageStream);
```
|
module AffectedByGravity
GravityFallSpeed = 5
def apply_gravity
unless tile_below?(GravityFallSpeed)
@y += GravityFallSpeed
end
end
end
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ui/webui/options2/chromeos/pointer_handler.h"
#include "base/basictypes.h"
#include "base/utf_string_conversions.h"
#include "base/values.h"
#include "chrome/common/url_constants.h"
#include "content/public/browser/web_ui.h"
#include "grit/generated_resources.h"
#include "ui/base/l10n/l10n_util.h"
namespace chromeos {
namespace options2 {
PointerHandler::PointerHandler()
: has_touchpad_(false),
has_mouse_(false) {
}
PointerHandler::~PointerHandler() {
}
void PointerHandler::GetLocalizedValues(DictionaryValue* localized_strings) {
DCHECK(localized_strings);
static OptionsStringResource resources[] = {
{ "pointerOverlayTitleTouchpadOnly",
IDS_OPTIONS_POINTER_TOUCHPAD_OVERLAY_TITLE },
{ "pointerOverlayTitleMouseOnly",
IDS_OPTIONS_POINTER_MOUSE_OVERLAY_TITLE },
{ "pointerOverlayTitleTouchpadMouse",
IDS_OPTIONS_POINTER_TOUCHPAD_MOUSE_OVERLAY_TITLE },
{ "pointerOverlaySectionTitleTouchpad",
IDS_OPTIONS_POINTER_OVERLAY_SECTION_TITLE_TOUCHPAD },
{ "pointerOverlaySectionTitleMouse",
IDS_OPTIONS_POINTER_OVERLAY_SECTION_TITLE_MOUSE },
{ "enableTapToClick",
IDS_OPTIONS_SETTINGS_TAP_TO_CLICK_ENABLED_DESCRIPTION },
{ "primaryMouseRight",
IDS_OPTIONS_SETTINGS_PRIMARY_MOUSE_RIGHT_DESCRIPTION },
};
localized_strings->SetString("naturalScroll",
l10n_util::GetStringFUTF16(
IDS_OPTIONS_SETTINGS_NATURAL_SCROLL_DESCRIPTION,
ASCIIToUTF16(chrome::kNaturalScrollHelpURL)));
RegisterStrings(localized_strings, resources, arraysize(resources));
}
void PointerHandler::TouchpadExists(bool exists) {
has_touchpad_ = exists;
base::FundamentalValue val(exists);
web_ui()->CallJavascriptFunction("PointerOverlay.showTouchpadControls", val);
UpdateTitle();
}
void PointerHandler::MouseExists(bool exists) {
has_mouse_ = exists;
base::FundamentalValue val(exists);
web_ui()->CallJavascriptFunction("PointerOverlay.showMouseControls", val);
UpdateTitle();
}
void PointerHandler::UpdateTitle() {
std::string label;
if (has_touchpad_) {
label = has_mouse_ ? "pointerOverlayTitleTouchpadMouse" :
"pointerOverlayTitleTouchpadOnly";
} else {
label = has_mouse_ ? "pointerOverlayTitleMouseOnly" : "";
}
base::StringValue val(label);
web_ui()->CallJavascriptFunction("PointerOverlay.setTitle", val);
}
} // namespace options2
} // namespace chromeos
|
/*
*
* Copyright(c) 2018 VerstSiu
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.ijoic.akka.websocket
import com.ijoic.akka.websocket.client.*
import com.ijoic.akka.websocket.options.WrapPusherOptions
import com.ijoic.akka.websocket.pusher.AddSubscribe
import com.ijoic.akka.websocket.pusher.PusherResponse
import com.ijoic.akka.websocket.pusher.RemoveSubscribe
import com.pusher.client.Pusher
import com.pusher.client.channel.Channel
import com.pusher.client.channel.ChannelEventListener
import com.pusher.client.channel.SubscriptionEventListener
import com.pusher.client.connection.ConnectionEventListener
import com.pusher.client.connection.ConnectionState
import com.pusher.client.connection.ConnectionStateChange
/**
* Pusher socket client
*
* @author verstsiu created at 2018-11-29 11:30
*/
class PusherSocketClient : SocketClient, ConnectionEventListener, ChannelEventListener {
private var pusher: Pusher? = null
private var options: WrapPusherOptions? = null
private var listener: ((SocketMessage) -> Unit)? = null
private val channelMap = mutableMapOf<String, Channel>()
private val listenerMap = mutableMapOf<String, SubscriptionEventListener>()
override fun connect(options: ClientOptions, listener: (SocketMessage) -> Unit) {
if (options !is WrapPusherOptions) {
throw IllegalArgumentException("invalid options: expected WrapPusherOptions, found $options")
}
this.listener = listener
val oldPusher = this.pusher
val oldOptions = this.options
if (oldPusher != null) {
if (options == oldOptions) {
oldPusher.connect(this)
return
} else {
oldPusher.disconnect()
channelMap.clear()
listenerMap.clear()
this.pusher = null
}
}
val pusher = Pusher(options.appKey, options.options)
this.pusher = pusher
pusher.connect(this)
}
override fun disconnect() {
pusher?.disconnect()
}
override fun release() {
this.pusher = null
this.listener = null
channelMap.clear()
}
override fun send(message: Any) {
val pusher = this.pusher ?: return
when(message) {
is AddSubscribe -> pusher.subscribe(message.channel, this, message.event)
is RemoveSubscribe -> pusher.getChannel(message.channel)?.unbind(message.event, this)
}
}
override fun onConnectionStateChange(change: ConnectionStateChange?) {
change ?: return
val currentState = change.currentState ?: return
if (change.previousState != currentState) {
when(currentState) {
ConnectionState.CONNECTED -> post(ConnectionCompleted())
ConnectionState.DISCONNECTED -> post(ConnectionClosed())
ConnectionState.ALL,
ConnectionState.CONNECTING,
ConnectionState.DISCONNECTING,
ConnectionState.RECONNECTING -> {
// do nothing
}
}
}
}
override fun onError(message: String?, code: String?, e: java.lang.Exception?) {
if (e == null) {
post(MessageError(code, message, e))
} else {
post(ConnectionError(code, message, e))
}
}
override fun onEvent(channelName: String?, eventName: String?, data: String?) {
if (channelName == null || eventName == null || data == null) {
return
}
post(PusherResponse(channelName, eventName, data))
}
override fun onSubscriptionSucceeded(channelName: String?) {
// do nothing
}
/**
* Post socket [message]
*/
private fun post(message: SocketMessage) {
listener?.invoke(message)
}
}
|
from clld.web.maps import LanguageMap as BaseLanguageMap
from clld.web.maps import ParameterMap, Map, FilterLegend
class LanguageMap(BaseLanguageMap):
"""small map on contribution detail page
"""
def __init__(self, ctx, req, eid='map'):
super(LanguageMap, self).__init__(ctx.variety, req, eid=eid)
class FeatureMap(ParameterMap):
def __init__(self, ctx, req, eid='map', col=None, dt=None):
self.col, self.dt = col, dt
ParameterMap.__init__(self, ctx, req, eid=eid)
def get_legends(self):
for legend in super(FeatureMap, self).get_legends():
yield legend
yield FilterLegend(self, 'EWAVE.getType', col=self.col, dt=self.dt)
class VarietiesMap(Map):
def __init__(self, ctx, req, eid='map', col=None, dt=None):
self.col, self.dt = col, dt
Map.__init__(self, ctx, req, eid=eid)
def get_legends(self):
for legend in super(VarietiesMap, self).get_legends():
yield legend
yield FilterLegend(self, 'EWAVE.getType', col=self.col, dt=self.dt)
def includeme(config):
config.register_map('contribution', LanguageMap)
config.register_map('parameter', FeatureMap)
config.register_map('contributions', VarietiesMap)
|
package cn.navigational.dbfx.kit;
import cn.navigational.dbfx.kit.enums.Clients;
import cn.navigational.dbfx.kit.ex.NotSupportException;
import io.vertx.core.Vertx;
import io.vertx.mysqlclient.MySQLConnectOptions;
import io.vertx.mysqlclient.MySQLPool;
import io.vertx.pgclient.PgConnectOptions;
import io.vertx.pgclient.PgPool;
import io.vertx.sqlclient.Pool;
import io.vertx.sqlclient.PoolOptions;
import io.vertx.sqlclient.SqlClient;
import io.vertx.sqlclient.SqlConnectOptions;
/**
* Sql client factory
*
* @author yangkui
* @since 1.0
*/
public interface SqlClientFactory {
/**
* Default pool options
*/
PoolOptions DEFAULT_POOL_OPTIONS = new PoolOptions().setMaxSize(1).setMaxWaitQueueSize(10);
/**
* Create a sql client
*
* @param vertx {@link Vertx} instance
* @param cl Current support sql client type
* @param connectOptions Sql connection options
* @param poolOptions Connection pool options
* @return {@link Clients} for sql client
* @author YangKui
* @since 1.0
*/
static Pool createClient(Vertx vertx, SqlConnectOptions connectOptions, PoolOptions poolOptions, Clients cl) {
final Pool client;
if (cl == Clients.MYSQL) {
client = MySQLPool.pool(vertx, (MySQLConnectOptions) connectOptions, poolOptions);
} else if (cl == Clients.POSTGRESQL) {
client = PgPool.pool(vertx, (PgConnectOptions) connectOptions, poolOptions);
} else {
throw new NotSupportException("Target database [" + cl + "] not supported.");
}
return client;
}
/**
* Create a sql client use default pool options
*
* @param vertx vertx instance
* @param connectOptions Connection options
* @param cl {@link Clients}
* @return {@link Clients} for sql client
*/
static SqlClient createClient(Vertx vertx, SqlConnectOptions connectOptions, Clients cl) {
final SqlClient client;
if (cl == Clients.MYSQL) {
client = MySQLPool.pool(vertx, (MySQLConnectOptions) connectOptions, DEFAULT_POOL_OPTIONS);
} else if (cl == Clients.POSTGRESQL) {
client = PgPool.pool(vertx, (PgConnectOptions) connectOptions, DEFAULT_POOL_OPTIONS);
} else {
throw new NotSupportException("Target database [" + cl + "] not supported.");
}
return client;
}
/**
* According {@link Clients} to create {@link SqlConnectOptions} object
*
* @param cl Target cl
* @return {@link SqlConnectOptions}
*/
static SqlConnectOptions createConnectionOptions(Clients cl) {
final SqlConnectOptions options;
if (cl == Clients.MYSQL) {
options = new MySQLConnectOptions();
} else if (cl == Clients.POSTGRESQL) {
options = new PgConnectOptions();
} else {
throw new NotSupportException("Target database [" + cl + "] not supported.");
}
return options;
}
}
|
using System.Linq;
using System.Web.Mvc;
using Casper.Mvc.Models;
namespace Casper.Mvc.Controllers
{
//Disclaimer, this is DEMO code and should not be used in production!
// If you do please do not accreditate me
// No honestly don't...
public class AccountController : Controller
{
public ActionResult Index()
{
return View();
}
[HttpPost]
public ActionResult Index(LoginViewModel model)
{
if (ModelState.IsValid)
{
if (model.Username == "test1@test.com")
return RedirectToAction("Search", new { id = 1 });
if (model.Username == "test2@test.com")
return RedirectToAction("Search", new { id = 2 });
ModelState.AddModelError("Username", "User could not be found");
}
return View(model);
}
public ActionResult Search(int id)
{
var model = new SearchViewModel { Id = id };
return View(model);
}
[HttpPost]
public ActionResult Search(SearchViewModel model)
{
if (ModelState.IsValid)
{
model.Results = TestData.Fill()
.Where(w => w.Name.Contains(model.Query) && w.OwnerId == model.Id)
.ToList();
}
return View(model);
}
}
}
|
from microbit import *
obj = {
"key": "val"
}
arr = [
'a',
'b'
]
def show_num(num):
if 1 + 1 == 2 and 2 + 2 == 4 or 3 + 3 == 6 \
and 3 - 3 == 0 and 'this is nonsense' == 'this is nonsense':
print('Hello')
text = """
This should survive
"""
i = Image()
i.fill(0)
if num & 0b00001000:
i.set_pixel(0, 2, 9)
if num & 0b00000100:
i.set_pixel(1, 2, 9)
if num & 0b00000010:
i.set_pixel(2, 2, 9)
if num & 0b00000001:
i.set_pixel(3, 2, 9)
display.show(i)
show_num(12)
|
#coding=utf-8
import sys
import os
path = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(path+"/../src")
from common.zk import *
from common.util import *
zk = ZK(CONF.zk_address)
root = zk.get_node(CONF.root)
if root:
root.delete()
zk.create_node(CONF.task_path)
host = zk.create_node(CONF.host_path)
host.add_child("list")
lastSeqNode = host.add_child("lastSeq")
lastSeqNode.set_value("1")
|
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class Backdrop : MonoBehaviour
{
public GameObject prefab = null;
public int resolutionX = 10;
public int resolutionY = 10;
public float size = 5f;
private void Awake()
{
for (int y = 0; y < resolutionY; y++)
for (int x = 0; x < resolutionX; x++)
{
float xx = (float)(x - (resolutionX / 2)) * size;
float yy = (float)(y - (resolutionY / 2)) * size;
Instantiate(prefab, new Vector3(xx, yy, transform.position.z), Quaternion.identity, transform);
}
}
private void OnDrawGizmosSelected()
{
int halfX = resolutionX / 2;
int halfY = resolutionY / 2;
float sizeX = size / transform.localScale.x;
float sizeY = size / transform.localScale.y;
Vector3 lowerLeft = Vector3.left * halfX * -sizeX + Vector3.down * halfY * -sizeY;
Vector3 upperLeft = Vector3.left * halfX * -sizeX + Vector3.down * halfY * sizeY;
Vector3 lowerRight= Vector3.left * halfX * sizeX + Vector3.down * halfY * -sizeY;
Vector3 upperRight= Vector3.left * halfX * sizeX + Vector3.down * halfY * sizeY;
Gizmos.color = Color.yellow;
Gizmos.matrix = transform.localToWorldMatrix;
Gizmos.DrawLine(lowerLeft, upperLeft);
Gizmos.DrawLine(upperLeft, upperRight);
Gizmos.DrawLine(upperRight, lowerRight);
Gizmos.DrawLine(lowerRight, lowerLeft);
}
}
|
using System.Collections.Generic;
using System.Net.Mime;
using System.Threading;
using System.Threading.Tasks;
using API.Routes;
using API.Services.Interfaces;
using Ardalis.ApiEndpoints;
using Microsoft.AspNetCore.Mvc;
using Swashbuckle.AspNetCore.Annotations;
namespace API.Endpoints.Blobs;
[Route(BlobRoutes.List)]
public class List : BaseAsyncEndpoint
.WithRequest<string>
.WithResponse<IAsyncEnumerable<string>>
{
private readonly IBlobService _blobService;
public List(IBlobService blobService) => _blobService = blobService;
/// <summary>
/// All blob name
/// </summary>
/// <remarks>
/// Retrieves all blob name from blob storage.
/// </remarks>
/// <param name="container">The container name.</param>
/// <param name="cancellationToken"><see cref="CancellationToken"/> instance.</param>
/// <response code="200">List of blob names</response>
[HttpGet]
[SwaggerOperation(Tags = new []{"Blob"})]
[Produces(MediaTypeNames.Application.Json)]
[Consumes(MediaTypeNames.Application.Json)]
public override async Task<ActionResult<IAsyncEnumerable<string>>> HandleAsync([FromQuery] string container,
CancellationToken cancellationToken = new()) =>
Ok(_blobService.GetListAsync(container, cancellationToken));
}
|
# frozen_string_literal: true
RSpec.describe "repository plugin" do
include_context "repository / database"
include_context "relations"
include_context "seeds"
include_context "repo"
let(:nullify_plugin) do
Module.new do
def self.apply(target, **)
target.prepend(self)
end
def set_relation(*)
super.where { `1 = 0` }
end
end
end
before do
plugin = nullify_plugin
ROM.plugins do
register :nullify_datasets, plugin, type: :repository
end
end
let(:user_repo) do
Class.new(repo_class) { use :nullify_datasets }.new(rom)
end
# FIXME: this is flaky
xit "always returns empty result set" do
expect(user_repo.all_users.to_a).to eql([])
end
end
|
import {IsEmail} from '../../../../src/decorators/validate/IsEmail';
import {Required} from '../../../../src';
export class ValidEMail {
// detect by reflactions
@IsEmail()
mail: string;
@IsEmail({message: 'something else should happen for field %propertyName'})
mailOtherMessage: string;
}
export class ValidEMailRequired {
@Required()
@IsEmail()
mailOtherMessage: string;
}
|
import type { DefaultTheme } from 'styled-components';
import {
defaultLightTheme,
LIGHT_GRAY_300,
LIGHT_GRAY_600,
BLACK,
} from '../lightThemeDefault';
const SECONDARY_200 = '#285e28';
const SECONDARY_300 = 'rgba(75, 219, 75, 0.1)';
const SECONDARY_400 = '#4BDB4B';
const SECONDARY_500 = '#36a337';
const SECONDARY_600 = '#2d832d';
const SECONDARY_700 = '#246d25';
const SECONDARY_800 = '#174f1a';
const SECONDARY_900 = '#0e3311';
const GRAY_400 = '#101010';
export const lightTheme: DefaultTheme = {
...defaultLightTheme,
textColor: LIGHT_GRAY_300,
dimTextColor: LIGHT_GRAY_600,
colors: {
...defaultLightTheme.colors,
positive: SECONDARY_400,
primary: SECONDARY_400,
primaryDark: SECONDARY_400,
secondary: SECONDARY_400,
secondaryDark: SECONDARY_400,
},
chart: [
SECONDARY_400,
SECONDARY_500,
SECONDARY_600,
SECONDARY_700,
SECONDARY_800,
SECONDARY_900,
GRAY_400,
],
header: {
backgroundColor: BLACK,
textColor: SECONDARY_400,
},
messageBox: {
borderColor: SECONDARY_400,
backgroundColor: SECONDARY_300,
textColor: SECONDARY_200,
linkColor: SECONDARY_400,
},
// errorTextColor: BASE_RED_200,
// positiveTextColor: SECONDARY_400,
// pointColor: SECONDARY_400,
};
|
import { isFunction } from '@redesign-system/theme'
import { FormRadioInterface } from './formRadio.types'
export function formRadioTheme(props: FormRadioInterface) {
const {
theme: { FormRadio },
} = props
return {
position: 'relative',
...(isFunction(FormRadio) ? FormRadio(props)?.default : FormRadio?.default),
}
}
|
@php
$aboutme = get_field('personal_info');
@endphp
<section class="section section-about">
<div class="container">
<div class="row justify-content-between">
<div class="col-lg-4 mb-3 mb-lg-0">
<div class="about-img-frame">
<div class="about-img">
@if ($aboutme['personal_img'])
<img src="{{ $aboutme['personal_img'] }}" alt="foto">
@endif
</div>
</div>
</div>
<div class="col-lg-7">
<div class="about-description">
<p class="about-description-heading">
{!! $aboutme['personal_title'] !!}<br>
{!! $aboutme['personal_caption'] !!}
</p>
<div class="about-description-text">
{!! $aboutme['personal_content'] !!}
</div>
</div>
</div>
</div>
</div>
</section>
|
package net.dinkla.raytracer.math
import org.junit.jupiter.api.Test
import org.junit.jupiter.api.Assertions.*
internal class Point2DTest {
private val p = Point2D(1.0, 2.0)
@Test
fun `add a vector`() {
val v = Vector2D(-1.0, -2.0)
assertEquals(Point2D.ORIGIN, p + v)
}
@Test
fun `subtract a vector`() {
val v = Vector2D(1.0, 2.0)
assertEquals(Point2D.ORIGIN, p - v)
}
@Test
fun equals() {
assertEquals(p, Point2D(1.0, 2.0))
assertNotEquals(p, Point2D(1.0, 1.98))
assertNotEquals(p, Point2D(0.98, 2.0))
assertNotEquals(p, 3.0)
assertNotEquals(p, null)
}
@Test
fun unaryMinus() {
assertEquals(-p, Point2D(-1.0, -2.0))
}
}
|
package chapter7
import io.reactivex.Observable
fun main(args: Array<String>) {
val publisher = Observable.create<Int> {
for (i in 1..10_000) {
it.onNext(i)
}
}
publisher.subscribe {
println("S1 $it")
}
publisher.subscribe {
println("S2 $it")
}
}
|
package main
import (
"flag"
"github.com/serverless-ss/slss"
log "github.com/sirupsen/logrus"
)
var (
configFilePath string
)
func main() {
flag.StringVar(&configFilePath, "c", "./config.json", "path to the configuration file")
flag.Parse()
config, err := slss.LoadConfig(configFilePath)
if err != nil {
slss.PrintErrorAndExit(err)
}
funcConfig, err := slss.LoadFuncConfig("./lambda/functions/slss/function.json")
if err != nil {
slss.PrintErrorAndExit(err)
}
log.WithFields(log.Fields{
"AWS.access_key_id": config.AWS.AccessKeyID,
"AWS.secret_access_key": config.AWS.AccessKeyID,
"AWS.region": config.AWS.Region,
"shadowsocks.local_port": config.Shadowsocks.LocalPort,
"shadowsocks.timeout": config.Shadowsocks.Timeout,
"shadowsocks.method": config.Shadowsocks.Method,
"shadowsocks.password": config.Shadowsocks.Password,
"ngrok.auth_token": config.Ngrok.AuthToken,
}).Info("[slss] Config:")
log.WithFields(log.Fields{
"name": funcConfig.Name,
"description": funcConfig.Description,
"runtime": funcConfig.Runtime,
"memory": funcConfig.Memory,
"timeout": funcConfig.Timeout,
}).Info("[slss] Lambda function config:")
if err := slss.UpdateProjectConfigRole(config.AWS.Role); err != nil {
slss.PrintErrorAndExit(err)
}
slss.Init(config, funcConfig)
}
|
###
# Copyright (c) 2015, Upnext Technologies Sp. z o.o.
# All rights reserved.
#
# This source code is licensed under the BSD 3-Clause License found in the
# LICENSE.txt file in the root directory of this source tree.
###
module KontaktIo
module Resource
#
# Kontakt.io Beacon object
#
class Venue < Base
attribute :image, String # "http://kontakt.io/venue/2fca7d99-07a7-46d7-91c0-70e93526bdfb/image",
attribute :lng, String # null,
attribute :access, String # "OWNER",
attribute :devices_count, Integer # 4,
attribute :id, String # "2fca7d99-07a7-46d7-91c0-70e93526bdfb",
attribute :description, String # "Main entrance",
attribute :manager_id, String # "65a0c6d5-9d3b-422e-a4de-81308ea7b876",
attribute :name, String # "Entrance 1",
attribute :priv, Boolean # true,
attribute :coverType, String # null,
attribute :lat, String # null,
#
# Compares +KontaktIo::Resource::Beacon+ with AR Beacon based on:
# * +unique_id+ field from Kontakt.io, or
# * +proximity,major,minor+ fields compacted to +proximity_id+
#
# ==== Parameters
#
# +* +db_beacon+ - Beacon from database to compare with
#
def ==(local)
unique_id == local.unique_id ||
proximity_id.to_s == local.proximity_id.to_s
end
end
end
end
|
namespace codingfreaks.blogsamples.MvvmSample.Ui.TestConsole
{
using System;
using System.ComponentModel;
using System.Linq;
internal class Program
{
#region methods
private static void Main(string[] args)
{
TestPropertyChanged();
Console.ReadKey();
}
/// <summary>
/// Tests the functionallity of <see cref="INotifyPropertyChanged" />.
/// </summary>
private static void TestPropertyChanged()
{
var test = new TestClass();
test.PropertyChanged += (s, e) =>
{
Console.WriteLine($"Property {e.PropertyName} has changed.");
};
test.SomeProperty = "Hello World!"; // expected to fire the event
test.SomeProperty = "Hello World!"; // do not fire the event
test.SomeProperty = "Hello World again!"; // should fire
test.SomeSecretProperty = true;
test.SomeSecretProperty = false;
}
#endregion
}
}
|
# SocialPsychologyOverview
## 背景:
社会心理学是心理学的重要分支,有大量大牛和高阶模型出没;
在微信、抖音等大热的app中,也有大量社会心理学的应用;
做这个项目能获得阳老师亲自指导,还有和“行为分析”课程进行横向对比,获得高质量压力测试的机会。
## 项目初步思路:
1. 通过通识读本获得时空大框架;
2. 定位领域大牛及其贡献;
3. 积累领域内高阶模型及其商业领域的实际应用;
## 项目目标:
1. 完成一份结业报告;
2. 完成一份ppt,并参加路演。
## 协作方式:
1. 信息积累和流通,通过Github issue;
2. 项目进度,通过Github Project看板;
3. 项目呈现:
1. 通过Github code(markdown文件呈现最终报告);
2. PPT通过谷歌云盘呈现。
## 技能需求:
1. Github Markdown写作(标题、加粗、斜体、引用、分割、表格等常规应用);
2. Github project基础使用方法及工作流;
Changelog:
2018-12-27 陈晨Evan initiate
测试ccccc
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.