text
stringlengths 2
97.5k
| meta
dict |
|---|---|
/*BEGIN_LEGAL
Intel Open Source License
Copyright (c) 2002-2015 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer. Redistributions
in binary form must reproduce the above copyright notice, this list of
conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution. Neither the name of
the Intel Corporation nor the names of its contributors may be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL OR
ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
END_LEGAL */
#include "pin.H"
#include <iostream>
#include <fstream>
ofstream dis("operand.out");
VOID Instruction(INS ins, VOID *v)
{
INT32 count = INS_OperandCount(ins);
for (INT32 i = 0; i < 5; i++)
{
if (i >= count)
{
dis << " ";
continue;
}
else if (INS_OperandIsAddressGenerator(ins, i))
dis << "AGN";
else if (INS_OperandIsMemory(ins, i))
{
dis << "MEM";
dis << " " << REG_StringShort(INS_OperandMemoryBaseReg(ins, i));
}
else if (INS_OperandIsReg(ins, i))
dis << "REG";
else if (INS_OperandIsImmediate(ins, i))
dis << "IMM";
else if (INS_OperandIsBranchDisplacement(ins, i))
dis << "DSP";
else
dis << "XXX";
if (INS_OperandIsImplicit(ins, i))
dis << ":IMP ";
else
dis << " ";
}
dis << INS_Disassemble(ins) << endl;
}
int main(int argc, char * argv[])
{
PIN_Init(argc, argv);
INS_AddInstrumentFunction(Instruction, 0);
// Never returns
PIN_StartProgram();
return 0;
}
|
{
"pile_set_name": "Github"
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using Microsoft.Xml;
namespace System.ServiceModel.Security
{
public abstract class TrustVersion
{
private readonly XmlDictionaryString _trustNamespace;
private readonly XmlDictionaryString _prefix;
internal TrustVersion(XmlDictionaryString ns, XmlDictionaryString prefix)
{
_trustNamespace = ns;
_prefix = prefix;
}
public XmlDictionaryString Namespace
{
get
{
return _trustNamespace;
}
}
public XmlDictionaryString Prefix
{
get
{
return _prefix;
}
}
public static TrustVersion Default
{
get { return WSTrustFeb2005; }
}
public static TrustVersion WSTrustFeb2005
{
get { return WSTrustVersionFeb2005.Instance; }
}
public static TrustVersion WSTrust13
{
get { return WSTrustVersion13.Instance; }
}
internal class WSTrustVersionFeb2005 : TrustVersion
{
private static readonly WSTrustVersionFeb2005 s_instance = new WSTrustVersionFeb2005();
protected WSTrustVersionFeb2005()
: base(XD.TrustFeb2005Dictionary.Namespace, XD.TrustFeb2005Dictionary.Prefix)
{
}
public static TrustVersion Instance
{
get
{
return s_instance;
}
}
}
internal class WSTrustVersion13 : TrustVersion
{
private static readonly WSTrustVersion13 s_instance = new WSTrustVersion13();
protected WSTrustVersion13()
: base(DXD.TrustDec2005Dictionary.Namespace, DXD.TrustDec2005Dictionary.Prefix)
{
}
public static TrustVersion Instance
{
get
{
return s_instance;
}
}
}
}
}
|
{
"pile_set_name": "Github"
}
|
package as3hx.parsers;
import as3hx.As3;
import as3hx.Tokenizer;
import as3hx.Parser;
class StructureParser {
public static function parse(tokenizer:Tokenizer, types:Types, cfg:Config, kwd:String) : Expr {
var parseExpr = ExprParser.parse.bind(tokenizer, types, cfg);
var parseExprList = ExprParser.parseList.bind(tokenizer, types, cfg);
var parseType = TypeParser.parse.bind(tokenizer, types, cfg);
var parseFunction = FunctionParser.parse.bind(tokenizer, types, cfg);
var parseCaseBlock = CaseBlockParser.parse.bind(tokenizer, types, cfg);
Debug.dbgln("parseStructure(" + kwd + ")", tokenizer.line);
return switch(kwd) {
case "if":
var f:Expr->Expr = null;
f = function(ex) {
return switch(ex) {
case ENL(e): f(e);
case EBlock(_): ex;
default: EBlock([ex]);
}
}
tokenizer.ensure(TPOpen);
var cond = parseExpr(false);
tokenizer.ensure(TPClose);
var e1 = parseExpr(false);
e1 = f(e1);
tokenizer.end();
var elseExpr = if(ParserUtils.opt(tokenizer, TId("else"), true)) parseExpr(false) else null;
if(elseExpr != null) elseExpr = f(elseExpr);
switch(cond) {
case ECondComp(v, e, e2):
//corner case, the condition is an AS3 preprocessor
//directive, it must contain the block to wrap it
//in Haxe #if #end preprocessor directive
ECondComp(v, e1, elseExpr);
default:
//regular if statement,,check for an "else" block
EIf(cond, e1, elseExpr);
}
case "var", "const":
var vars = [];
while( true ) {
var name = tokenizer.id(), t = null, val = null;
name = ParserUtils.escapeName(name);
if( ParserUtils.opt(tokenizer, TColon) )
t = parseType();
if( ParserUtils.opt(tokenizer, TOp("=")) )
val = ETypedExpr(parseExpr(false), t);
vars.push( { name : name, t : t, val : val } );
if( !ParserUtils.opt(tokenizer, TComma) )
break;
}
EVars(vars);
case "while":
tokenizer.ensure(TPOpen);
var econd = parseExpr(false);
tokenizer.ensure(TPClose);
var e = parseExpr(false);
EWhile(econd,e, false);
case "for":
if( ParserUtils.opt(tokenizer, TId("each")) ) {
tokenizer.ensure(TPOpen);
var ev = parseExpr(false);
switch(ev) {
case EBinop(op, e1, e2, n):
if(op == "in") {
tokenizer.ensure(TPClose);
return EForEach(e1, e2, parseExpr(false));
}
ParserUtils.unexpected(TId(op));
default:
ParserUtils.unexpected(TId(Std.string(ev)));
}
} else {
tokenizer.ensure(TPOpen);
var inits = [];
if( !ParserUtils.opt(tokenizer, TSemicolon) ) {
var e = parseExpr(false);
switch(e) {
case EBinop(op, e1, e2, n):
if(op == "in") {
tokenizer.ensure(TPClose);
return EForIn(e1, e2, parseExpr(false));
}
default:
}
if( ParserUtils.opt(tokenizer, TComma) ) {
inits = parseExprList(TSemicolon);
inits.unshift(e);
} else {
tokenizer.ensure(TSemicolon);
inits = [e];
}
}
var conds = parseExprList(TSemicolon);
var incrs = parseExprList(TPClose);
EFor(inits, conds, incrs, parseExpr(false));
}
case "break":
var label = switch( tokenizer.peek() ) {
case TId(n): tokenizer.token(); n;
default: null;
};
EBreak(label);
case "continue": EContinue;
case "else": ParserUtils.unexpected(TId(kwd));
case "function":
var name = switch(tokenizer.peek()) {
case TId(n):
tokenizer.token();
n;
default: null;
};
EFunction(parseFunction(false), name);
case "return":
var t = tokenizer.peek();
var e = switch(t) {
case TSemicolon | TBrClose: null;
case _: parseExpr(false);
}
EReturn(e);
case "new":
if(ParserUtils.opt(tokenizer, TOp("<"))) {
// o = new <VectorType>[a,b,c..]
var t = parseType();
tokenizer.ensure(TOp(">"));
if(tokenizer.peek() != TBkOpen)
ParserUtils.unexpected(tokenizer.peek());
ECall(EVector(t), [parseExpr(false)]);
} else {
var t = parseType();
// o = new (iconOrLabel as Class)() as DisplayObject
var cc = switch(t) {
case TComplex(e1) :
switch (e1) {
case EBinop(op, e2, e3, n):
if (op == "as") {
switch (e2) {
case ECall(e4, a):
EBinop(op, ECall(EField(EIdent("Type"), "createInstance"), [e4, EArrayDecl(a)]), e3, n);
default: null;
}
}
return null;
default: null;
}
default: null;
}
if (cc != null) cc; else ENew(t,if( ParserUtils.opt(tokenizer, TPOpen) ) parseExprList(TPClose) else []);
}
case "throw":
EThrow( parseExpr(false) );
case "try":
var e = parseExpr(false);
var catches = new Array();
while( ParserUtils.opt(tokenizer, TId("catch")) ) {
tokenizer.ensure(TPOpen);
var name = tokenizer.id();
tokenizer.ensure(TColon);
var t = parseType();
tokenizer.ensure(TPClose);
var e = parseExpr(false);
catches.push( { name : name, t : t, e : e } );
}
ETry(e, catches);
case "switch":
tokenizer.ensure(TPOpen);
var e = EParent(parseExpr(false));
tokenizer.ensure(TPClose);
var def:SwitchDefault = null, cl = [], meta = [];
tokenizer.ensure(TBrOpen);
//parse all "case" and "default"
while(true) {
var tk = tokenizer.token();
switch (tk) {
case TBrClose: //end of switch
break;
case TId(s):
if (s == "default") {
tokenizer.ensure(TColon);
def = { el : parseCaseBlock(), meta : meta, before: null };
meta = [];
}
else if (s == "case"){
var val = parseExpr(false);
tokenizer.ensure(TColon);
var el = parseCaseBlock();
// default already set, and is empty
// we assign this case to default
if(def != null && def.el.length == 0) {
def.el = el;
def.meta = def.meta.concat(meta);
if(def.vals == null) def.vals = [];
def.vals.push(val);
}
// default already set, and has same
// content as this case
else if(def != null && def.el == el){
def.meta = def.meta.concat(meta);
def.el = el;
if(def.vals == null) def.vals = [];
def.vals.push(val);
}
// normal case, default not set yet, or differs
else {
var caseObj = { val : val, el : el, meta : meta }
// default already set, but case follows it
// mark that default is before this case
if(def != null && def.before == null) {
def.before = caseObj;
}
cl.push(caseObj);
}
//reset for next case or default
meta = [];
}
else {
ParserUtils.unexpected(tk);
}
case TNL(t): //keep newline as meta for a case/default
tokenizer.add(t);
meta.push(ENL(null));
case TCommented(s,b,t): //keep comment as meta for a case/default
tokenizer.add(t);
meta.push(ECommented(s,b,false,null));
default:
ParserUtils.unexpected(tk);
}
}
ESwitch(e, cl, def);
case "do":
var e = parseExpr(false);
tokenizer.ensure(TId("while"));
var cond = parseExpr(false);
EWhile(cond, e, true);
case "typeof":
var e = parseExpr(false);
switch(e) {
case EBinop(_, _, _, _):
//if(op != "==" && op != "!=")
// ParserUtils.unexpected(TOp(op));
case EParent(_):
case EIdent(_):
case EConst(_):
default: ParserUtils.unexpected(TId(Std.string(e)));
}
ETypeof(e);
case "delete":
var e = parseExpr(false);
tokenizer.end();
EDelete(e);
case "getQualifiedClassName":
tokenizer.ensure(TPOpen);
var e = parseExpr(false);
e = switch(e) {
case EIdent(v) if(v == "this"): ECall(EField(EIdent("Type"), "getClass"), [e]);
default: e;
}
tokenizer.ensure(TPClose);
ECall(EField(EIdent("Type"), "getClassName"), [e]);
case "getQualifiedSuperclassName":
tokenizer.ensure(TPOpen);
var e = parseExpr(false);
tokenizer.ensure(TPClose);
ECall(EField(EIdent("Type"), "getClassName"), [ECall(EField(EIdent("Type"), "getSuperClass"), [e])]);
case "getDefinitionByName":
tokenizer.ensure(TPOpen);
var e = parseExpr(false);
tokenizer.ensure(TPClose);
ECall(EField(EIdent("Type"), "resolveClass"), [e]);
case "getTimer":
//consume the parenthesis from the getTimer AS3 call
while(!ParserUtils.opt(tokenizer, TPClose)) {
tokenizer.token();
}
ECall(EField(EIdent("Math"), "round"), [EBinop("*", ECall(EField(EIdent("haxe.Timer"), "stamp"), []), EConst(CInt("1000")), false)]);
case "setTimeout" | "setInterval":
var params = getParams(tokenizer, parseExpr);
if(params != null) return ECall(EField(EIdent("as3hx.Compat"), kwd), params);
return null;
case "clearTimeout" | "clearInterval":
tokenizer.ensure(TPOpen);
var e = parseExpr(false);
tokenizer.ensure(TPClose);
ECall(EField(EIdent("as3hx.Compat"), kwd), [e]);
case "parseInt" | "parseFloat" if(cfg.useCompat):
var params = getParams(tokenizer, parseExpr);
if(params != null) return ECall(EField(EIdent("as3hx.Compat"), kwd), params);
null;
case "parseInt" | "parseFloat":
tokenizer.ensure(TPOpen);
var e = parseExpr(false);
tokenizer.ensure(TPClose);
ECall(EField(EIdent("Std"), kwd), [e]);
case "navigateToURL":
var params = getParams(tokenizer, parseExpr);
if(params != null) return ECall(EField(EIdent("flash.Lib"), "getURL"), params);
return null;
default: null;
}
}
static function getParams(tokenizer:Tokenizer, parseExpr) {
return switch(tokenizer.token()) {
case TPOpen:
var params = [];
var parCount = 1;
while(parCount > 0) {
var t = tokenizer.token();
switch(t) {
case TPOpen: parCount++;
case TPClose:
parCount--;
if(params.length > 0) params[params.length - 1] = EParent(params[params.length - 1]);
case TComma:
case TOp(op) if(params.length > 0):
params[params.length - 1] = ParserUtils.makeBinop(tokenizer, op, params[params.length - 1], parseExpr(false));
case _:
tokenizer.add(t);
if(params.length < 2) params.push(parseExpr(false));
else {
if(params.length == 2) params.push(EArrayDecl([]));
switch(params[2]) {
case EArrayDecl(e): e.push(parseExpr(false));
case _:
}
}
}
}
params;
case _: null;
}
}
}
|
{
"pile_set_name": "Github"
}
|
/*
Copyright (C) 2014, The University of Texas at Austin
This file is part of libflame and is available under the 3-Clause
BSD license, which can be found in the LICENSE file at the top-level
directory, or at http://opensource.org/licenses/BSD-3-Clause
*/
#include "FLAME.h"
FLA_Error FLA_Trmm_rlh( FLA_Diag diag, FLA_Obj alpha, FLA_Obj A, FLA_Obj B, fla_trmm_t* cntl )
{
FLA_Error r_val = FLA_SUCCESS;
if ( FLA_Cntl_variant( cntl ) == FLA_SUBPROBLEM )
{
r_val = FLA_Trmm_rlh_task( diag, alpha, A, B, cntl );
}
#ifdef FLA_ENABLE_NON_CRITICAL_CODE
else if ( FLA_Cntl_variant( cntl ) == FLA_BLOCKED_VARIANT1 )
{
r_val = FLA_Trmm_rlh_blk_var1( diag, alpha, A, B, cntl );
}
#endif
else if ( FLA_Cntl_variant( cntl ) == FLA_BLOCKED_VARIANT2 )
{
r_val = FLA_Trmm_rlh_blk_var2( diag, alpha, A, B, cntl );
}
else if ( FLA_Cntl_variant( cntl ) == FLA_BLOCKED_VARIANT3 )
{
r_val = FLA_Trmm_rlh_blk_var3( diag, alpha, A, B, cntl );
}
#ifdef FLA_ENABLE_NON_CRITICAL_CODE
else if ( FLA_Cntl_variant( cntl ) == FLA_BLOCKED_VARIANT4 )
{
r_val = FLA_Trmm_rlh_blk_var4( diag, alpha, A, B, cntl );
}
#endif
#ifdef FLA_ENABLE_NON_CRITICAL_CODE
else if ( FLA_Cntl_variant( cntl ) == FLA_UNBLOCKED_VARIANT1 )
{
r_val = FLA_Trmm_rlh_unb_var1( diag, alpha, A, B );
}
else if ( FLA_Cntl_variant( cntl ) == FLA_UNBLOCKED_VARIANT2 )
{
r_val = FLA_Trmm_rlh_unb_var2( diag, alpha, A, B );
}
else if ( FLA_Cntl_variant( cntl ) == FLA_UNBLOCKED_VARIANT3 )
{
r_val = FLA_Trmm_rlh_unb_var3( diag, alpha, A, B );
}
else if ( FLA_Cntl_variant( cntl ) == FLA_UNBLOCKED_VARIANT4 )
{
r_val = FLA_Trmm_rlh_unb_var4( diag, alpha, A, B );
}
#endif
else
{
r_val = FLA_Check_error_code( FLA_NOT_YET_IMPLEMENTED );
}
return r_val;
}
|
{
"pile_set_name": "Github"
}
|
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
type NodeMetricsExpansion interface{}
type PodMetricsExpansion interface{}
|
{
"pile_set_name": "Github"
}
|
<TS language="sah" version="2.0">
<context>
<name>AddressBookPage</name>
</context>
<context>
<name>AddressTableModel</name>
</context>
<context>
<name>AskPassphraseDialog</name>
</context>
<context>
<name>BitcoinGUI</name>
</context>
<context>
<name>ClientModel</name>
</context>
<context>
<name>CoinControlDialog</name>
</context>
<context>
<name>EditAddressDialog</name>
</context>
<context>
<name>FreespaceChecker</name>
</context>
<context>
<name>HelpMessageDialog</name>
</context>
<context>
<name>Intro</name>
</context>
<context>
<name>OpenURIDialog</name>
</context>
<context>
<name>OptionsDialog</name>
</context>
<context>
<name>OverviewPage</name>
</context>
<context>
<name>PaymentServer</name>
</context>
<context>
<name>PeerTableModel</name>
</context>
<context>
<name>QObject</name>
</context>
<context>
<name>QRImageWidget</name>
</context>
<context>
<name>RPCConsole</name>
</context>
<context>
<name>ReceiveCoinsDialog</name>
</context>
<context>
<name>ReceiveRequestDialog</name>
</context>
<context>
<name>RecentRequestsTableModel</name>
</context>
<context>
<name>SendCoinsDialog</name>
</context>
<context>
<name>SendCoinsEntry</name>
</context>
<context>
<name>ShutdownWindow</name>
</context>
<context>
<name>SignVerifyMessageDialog</name>
</context>
<context>
<name>SplashScreen</name>
</context>
<context>
<name>TrafficGraphWidget</name>
</context>
<context>
<name>TransactionDesc</name>
</context>
<context>
<name>TransactionDescDialog</name>
</context>
<context>
<name>TransactionTableModel</name>
</context>
<context>
<name>TransactionView</name>
</context>
<context>
<name>UnitDisplayStatusBarControl</name>
</context>
<context>
<name>WalletFrame</name>
</context>
<context>
<name>WalletModel</name>
</context>
<context>
<name>WalletView</name>
</context>
<context>
<name>bitcoin-core</name>
</context>
</TS>
|
{
"pile_set_name": "Github"
}
|
String s = "a, b c ,,d "; // Despite the bad formatting,
String[] p = splitTokens(s, ", "); // the data is parsed correctly
println(p[0]); // Prints "a"
println(p[1]); // Prints "b"
println(p[2]); // Prints "c"
println(p[3]); // Prints "d"
|
{
"pile_set_name": "Github"
}
|
import 'package:sqflite_crud/models/todo.dart';
import 'package:sqflite_crud/repository/database_creator.dart';
class RepositoryServiceTodo {
static Future<List<Todo>> getAllTodos() async {
final sql = '''SELECT * FROM ${DatabaseCreator.todoTable}
WHERE ${DatabaseCreator.isDeleted} = 0''';
final data = await db.rawQuery(sql);
List<Todo> todos = List();
for (final node in data) {
final todo = Todo.fromJson(node);
todos.add(todo);
}
return todos;
}
static Future<Todo> getTodo(int id) async {
//final sql = '''SELECT * FROM ${DatabaseCreator.todoTable}
//WHERE ${DatabaseCreator.id} = $id''';
//final data = await db.rawQuery(sql);
final sql = '''SELECT * FROM ${DatabaseCreator.todoTable}
WHERE ${DatabaseCreator.id} = ?''';
List<dynamic> params = [id];
final data = await db.rawQuery(sql, params);
final todo = Todo.fromJson(data.first);
return todo;
}
static Future<void> addTodo(Todo todo) async {
/*final sql = '''INSERT INTO ${DatabaseCreator.todoTable}
(
${DatabaseCreator.id},
${DatabaseCreator.name},
${DatabaseCreator.info},
${DatabaseCreator.isDeleted}
)
VALUES
(
${todo.id},
"${todo.name}",
"${todo.info}",
${todo.isDeleted ? 1 : 0}
)''';*/
final sql = '''INSERT INTO ${DatabaseCreator.todoTable}
(
${DatabaseCreator.id},
${DatabaseCreator.name},
${DatabaseCreator.info},
${DatabaseCreator.isDeleted}
)
VALUES (?,?,?,?)''';
List<dynamic> params = [todo.id, todo.name, todo.info, todo.isDeleted ? 1 : 0];
final result = await db.rawInsert(sql, params);
DatabaseCreator.databaseLog('Add todo', sql, null, result, params);
}
static Future<void> deleteTodo(Todo todo) async {
/*final sql = '''UPDATE ${DatabaseCreator.todoTable}
SET ${DatabaseCreator.isDeleted} = 1
WHERE ${DatabaseCreator.id} = ${todo.id}
''';*/
final sql = '''UPDATE ${DatabaseCreator.todoTable}
SET ${DatabaseCreator.isDeleted} = 1
WHERE ${DatabaseCreator.id} = ?
''';
List<dynamic> params = [todo.id];
final result = await db.rawUpdate(sql, params);
DatabaseCreator.databaseLog('Delete todo', sql, null, result, params);
}
static Future<void> updateTodo(Todo todo) async {
/*final sql = '''UPDATE ${DatabaseCreator.todoTable}
SET ${DatabaseCreator.name} = "${todo.name}"
WHERE ${DatabaseCreator.id} = ${todo.id}
''';*/
final sql = '''UPDATE ${DatabaseCreator.todoTable}
SET ${DatabaseCreator.name} = ?
WHERE ${DatabaseCreator.id} = ?
''';
List<dynamic> params = [todo.name, todo.id];
final result = await db.rawUpdate(sql, params);
DatabaseCreator.databaseLog('Update todo', sql, null, result, params);
}
static Future<int> todosCount() async {
final data = await db.rawQuery('''SELECT COUNT(*) FROM ${DatabaseCreator.todoTable}''');
int count = data[0].values.elementAt(0);
int idForNewItem = count++;
return idForNewItem;
}
}
|
{
"pile_set_name": "Github"
}
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import unittest
import os
from contextlib import contextmanager
from hashlib import md5
import time
import pickle
import mock
from six.moves import urllib
from swift.common import direct_client
from swift.common.direct_client import DirectClientException
from swift.common.exceptions import ClientException
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import Timestamp, quote
from swift.common.swob import RESPONSE_REASONS
from swift.common.storage_policy import POLICIES
from six.moves.http_client import HTTPException
from test.unit import patch_policies, debug_logger
class FakeConn(object):
def __init__(self, status, headers=None, body='', **kwargs):
self.status = status
try:
self.reason = RESPONSE_REASONS[self.status][0]
except Exception:
self.reason = 'Fake'
self.body = body
self.resp_headers = HeaderKeyDict()
if headers:
self.resp_headers.update(headers)
self.etag = None
def _update_raw_call_args(self, *args, **kwargs):
capture_attrs = ('host', 'port', 'method', 'path', 'req_headers',
'query_string')
for attr, value in zip(capture_attrs, args[:len(capture_attrs)]):
setattr(self, attr, value)
return self
def getresponse(self):
if self.etag:
self.resp_headers['etag'] = str(self.etag.hexdigest())
if isinstance(self.status, Exception):
raise self.status
return self
def getheader(self, header, default=None):
return self.resp_headers.get(header, default)
def getheaders(self):
return self.resp_headers.items()
def read(self, amt=None):
if isinstance(self.body, io.BytesIO):
return self.body.read(amt)
elif amt is None:
return self.body
else:
return Exception('Not a StringIO entry')
def send(self, data):
if not self.etag:
self.etag = md5()
self.etag.update(data)
@contextmanager
def mocked_http_conn(*args, **kwargs):
fake_conn = FakeConn(*args, **kwargs)
mock_http_conn = lambda *args, **kwargs: \
fake_conn._update_raw_call_args(*args, **kwargs)
with mock.patch('swift.common.bufferedhttp.http_connect_raw',
new=mock_http_conn):
yield fake_conn
@patch_policies
class TestDirectClient(unittest.TestCase):
def setUp(self):
self.node = json.loads(json.dumps({ # json roundtrip to ring-like
'ip': '1.2.3.4', 'port': '6200', 'device': 'sda',
'replication_ip': '1.2.3.5', 'replication_port': '7000'}))
self.part = '0'
self.account = u'\u062a account'
self.container = u'\u062a container'
self.obj = u'\u062a obj/name'
self.account_path = '/sda/0/%s' % urllib.parse.quote(
self.account.encode('utf-8'))
self.container_path = '/sda/0/%s/%s' % tuple(
urllib.parse.quote(p.encode('utf-8')) for p in (
self.account, self.container))
self.obj_path = '/sda/0/%s/%s/%s' % tuple(
urllib.parse.quote(p.encode('utf-8')) for p in (
self.account, self.container, self.obj))
self.user_agent = 'direct-client %s' % os.getpid()
class FakeTimeout(BaseException):
def __enter__(self):
return self
def __exit__(self, typ, value, tb):
pass
patcher = mock.patch.object(direct_client, 'Timeout', FakeTimeout)
patcher.start()
self.addCleanup(patcher.stop)
def test_gen_headers(self):
stub_user_agent = 'direct-client %s' % os.getpid()
headers = direct_client.gen_headers(add_ts=False)
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'X-Backend-Allow-Reserved-Names': 'true',
})
with mock.patch('swift.common.utils.Timestamp.now',
return_value=Timestamp('123.45')):
headers = direct_client.gen_headers()
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'X-Backend-Allow-Reserved-Names': 'true',
'X-Timestamp': '0000000123.45000',
})
headers = direct_client.gen_headers(hdrs_in={'x-timestamp': '15'})
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'X-Backend-Allow-Reserved-Names': 'true',
'X-Timestamp': '15',
})
with mock.patch('swift.common.utils.Timestamp.now',
return_value=Timestamp('12345.6789')):
headers = direct_client.gen_headers(hdrs_in={'foo-bar': '63'})
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'Foo-Bar': '63',
'X-Backend-Allow-Reserved-Names': 'true',
'X-Timestamp': '0000012345.67890',
})
hdrs_in = {'foo-bar': '55'}
headers = direct_client.gen_headers(hdrs_in, add_ts=False)
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'Foo-Bar': '55',
'X-Backend-Allow-Reserved-Names': 'true',
})
with mock.patch('swift.common.utils.Timestamp.now',
return_value=Timestamp('12345')):
headers = direct_client.gen_headers(hdrs_in={'user-agent': '32'})
self.assertEqual(dict(headers), {
'User-Agent': '32',
'X-Backend-Allow-Reserved-Names': 'true',
'X-Timestamp': '0000012345.00000',
})
hdrs_in = {'user-agent': '47'}
headers = direct_client.gen_headers(hdrs_in, add_ts=False)
self.assertEqual(dict(headers), {
'User-Agent': '47',
'X-Backend-Allow-Reserved-Names': 'true',
})
for policy in POLICIES:
for add_ts in (True, False):
with mock.patch('swift.common.utils.Timestamp.now',
return_value=Timestamp('123456789')):
headers = direct_client.gen_headers(
{'X-Backend-Storage-Policy-Index': policy.idx},
add_ts=add_ts)
expected = {
'User-Agent': stub_user_agent,
'X-Backend-Storage-Policy-Index': str(policy.idx),
'X-Backend-Allow-Reserved-Names': 'true',
}
if add_ts:
expected['X-Timestamp'] = '0123456789.00000'
self.assertEqual(dict(headers), expected)
def test_direct_get_account(self):
def do_test(req_params):
stub_headers = HeaderKeyDict({
'X-Account-Container-Count': '1',
'X-Account-Object-Count': '1',
'X-Account-Bytes-Used': '1',
'X-Timestamp': '1234567890',
'X-PUT-Timestamp': '1234567890'})
body = b'[{"count": 1, "bytes": 20971520, "name": "c1"}]'
with mocked_http_conn(200, stub_headers, body) as conn:
resp_headers, resp = direct_client.direct_get_account(
self.node, self.part, self.account, **req_params)
try:
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.account_path)
self.assertEqual(conn.req_headers['user-agent'],
self.user_agent)
self.assertEqual(resp_headers, stub_headers)
self.assertEqual(json.loads(body), resp)
self.assertIn('format=json', conn.query_string)
for k, v in req_params.items():
if v is None:
self.assertNotIn('&%s' % k, conn.query_string)
else:
self.assertIn('&%s=%s' % (k, v), conn.query_string)
except AssertionError as err:
self.fail('Failed with params %s: %s' % (req_params, err))
test_params = (dict(marker=marker, prefix=prefix, delimiter=delimiter,
limit=limit, end_marker=end_marker, reverse=reverse)
for marker in (None, 'my-marker')
for prefix in (None, 'my-prefix')
for delimiter in (None, 'my-delimiter')
for limit in (None, 1000)
for end_marker in (None, 'my-endmarker')
for reverse in (None, 'on'))
for params in test_params:
do_test(params)
def test_direct_client_exception(self):
stub_headers = {'X-Trans-Id': 'txb5f59485c578460f8be9e-0053478d09'}
body = 'a server error has occurred'
with mocked_http_conn(500, stub_headers, body):
with self.assertRaises(ClientException) as raised:
direct_client.direct_get_account(self.node, self.part,
self.account)
self.assertEqual(raised.exception.http_status, 500)
expected_err_msg_parts = (
'Account server %s:%s' % (self.node['ip'], self.node['port']),
'GET %r' % self.account_path,
'status 500',
)
for item in expected_err_msg_parts:
self.assertIn(item, str(raised.exception))
self.assertEqual(raised.exception.http_host, self.node['ip'])
self.assertEqual(raised.exception.http_port, self.node['port'])
self.assertEqual(raised.exception.http_device, self.node['device'])
self.assertEqual(raised.exception.http_status, 500)
self.assertEqual(raised.exception.http_reason, 'Internal Error')
self.assertEqual(raised.exception.http_headers, stub_headers)
def test_direct_get_account_no_content_does_not_parse_body(self):
headers = {
'X-Account-Container-Count': '1',
'X-Account-Object-Count': '1',
'X-Account-Bytes-Used': '1',
'X-Timestamp': '1234567890',
'X-Put-Timestamp': '1234567890'}
with mocked_http_conn(204, headers) as conn:
resp_headers, resp = direct_client.direct_get_account(
self.node, self.part, self.account)
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.account_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertDictEqual(resp_headers, headers)
self.assertEqual([], resp)
def test_direct_get_account_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_get_account(
self.node, self.part, self.account)
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.account_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('GET' in str(raised.exception))
def test_direct_delete_account(self):
part = '0'
account = 'a'
mock_path = 'swift.common.bufferedhttp.http_connect_raw'
with mock.patch(mock_path) as fake_connect:
fake_connect.return_value.getresponse.return_value.status = 200
direct_client.direct_delete_account(self.node, part, account)
args, kwargs = fake_connect.call_args
ip = args[0]
self.assertEqual(self.node['ip'], ip)
port = args[1]
self.assertEqual(self.node['port'], port)
method = args[2]
self.assertEqual('DELETE', method)
path = args[3]
self.assertEqual('/sda/0/a', path)
headers = args[4]
self.assertIn('X-Timestamp', headers)
self.assertIn('User-Agent', headers)
def test_direct_delete_account_replication_net(self):
part = '0'
account = 'a'
mock_path = 'swift.common.bufferedhttp.http_connect_raw'
with mock.patch(mock_path) as fake_connect:
fake_connect.return_value.getresponse.return_value.status = 200
direct_client.direct_delete_account(
self.node, part, account,
headers={'X-Backend-Use-Replication-Network': 't'})
args, kwargs = fake_connect.call_args
ip = args[0]
self.assertEqual(self.node['replication_ip'], ip)
self.assertNotEqual(self.node['ip'], ip)
port = args[1]
self.assertEqual(self.node['replication_port'], port)
self.assertNotEqual(self.node['port'], port)
method = args[2]
self.assertEqual('DELETE', method)
path = args[3]
self.assertEqual('/sda/0/a', path)
headers = args[4]
self.assertIn('X-Timestamp', headers)
self.assertIn('User-Agent', headers)
def test_direct_delete_account_failure(self):
part = '0'
account = 'a'
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_delete_account(self.node, part, account)
self.assertEqual(self.node['ip'], conn.host)
self.assertEqual(self.node['port'], conn.port)
self.assertEqual('DELETE', conn.method)
self.assertEqual('/sda/0/a', conn.path)
self.assertIn('X-Timestamp', conn.req_headers)
self.assertIn('User-Agent', conn.req_headers)
self.assertEqual(raised.exception.http_status, 500)
def test_direct_head_container(self):
headers = HeaderKeyDict(key='value')
with mocked_http_conn(200, headers) as conn:
resp = direct_client.direct_head_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'],
self.user_agent)
self.assertEqual(headers, resp)
def test_direct_head_container_replication_net(self):
headers = HeaderKeyDict(key='value')
with mocked_http_conn(200, headers) as conn:
resp = direct_client.direct_head_container(
self.node, self.part, self.account, self.container,
headers={'X-Backend-Use-Replication-Network': 'on'})
self.assertEqual(conn.host, self.node['replication_ip'])
self.assertEqual(conn.port, self.node['replication_port'])
self.assertNotEqual(conn.host, self.node['ip'])
self.assertNotEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'],
self.user_agent)
self.assertEqual(headers, resp)
def test_direct_head_container_error(self):
headers = HeaderKeyDict(key='value')
with mocked_http_conn(503, headers) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_head_container(
self.node, self.part, self.account, self.container)
# check request
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(raised.exception.http_status, 503)
self.assertEqual(raised.exception.http_headers, headers)
self.assertTrue('HEAD' in str(raised.exception))
def test_direct_head_container_deleted(self):
important_timestamp = Timestamp.now().internal
headers = HeaderKeyDict({'X-Backend-Important-Timestamp':
important_timestamp})
with mocked_http_conn(404, headers) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_head_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(raised.exception.http_status, 404)
self.assertEqual(raised.exception.http_headers, headers)
def test_direct_get_container(self):
def do_test(req_params):
headers = HeaderKeyDict({'key': 'value'})
body = (b'[{"hash": "8f4e3", "last_modified": "317260", '
b'"bytes": 209}]')
with mocked_http_conn(200, headers, body) as conn:
resp_headers, resp = direct_client.direct_get_container(
self.node, self.part, self.account, self.container,
**req_params)
try:
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'],
self.user_agent)
self.assertEqual(headers, resp_headers)
self.assertEqual(json.loads(body), resp)
self.assertIn('format=json', conn.query_string)
for k, v in req_params.items():
if v is None:
self.assertNotIn('&%s' % k, conn.query_string)
else:
self.assertIn('&%s=%s' % (k, v), conn.query_string)
except AssertionError as err:
self.fail('Failed with params %s: %s' % (req_params, err))
test_params = (dict(marker=marker, prefix=prefix, delimiter=delimiter,
limit=limit, end_marker=end_marker, reverse=reverse)
for marker in (None, 'my-marker')
for prefix in (None, 'my-prefix')
for delimiter in (None, 'my-delimiter')
for limit in (None, 1000)
for end_marker in (None, 'my-endmarker')
for reverse in (None, 'on'))
for params in test_params:
do_test(params)
def test_direct_get_container_no_content_does_not_decode_body(self):
headers = {}
body = ''
with mocked_http_conn(204, headers, body) as conn:
resp_headers, resp = direct_client.direct_get_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(headers, resp_headers)
self.assertEqual([], resp)
def test_direct_delete_container(self):
with mocked_http_conn(200) as conn:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
def test_direct_delete_container_replication_net(self):
with mocked_http_conn(200) as conn:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container,
headers={'X-Backend-Use-Replication-Network': '1'})
self.assertEqual(conn.host, self.node['replication_ip'])
self.assertEqual(conn.port, self.node['replication_port'])
self.assertNotEqual(conn.host, self.node['ip'])
self.assertNotEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
def test_direct_delete_container_with_timestamp(self):
# ensure timestamp is different from any that might be auto-generated
timestamp = Timestamp(time.time() - 100)
headers = {'X-Timestamp': timestamp.internal}
with mocked_http_conn(200) as conn:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container,
headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
self.assertTrue('X-Timestamp' in conn.req_headers)
self.assertEqual(timestamp, conn.req_headers['X-Timestamp'])
def test_direct_delete_container_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('DELETE' in str(raised.exception))
def test_direct_put_container(self):
body = b'Let us begin with a quick introduction'
headers = {'x-foo': 'bar', 'Content-Length': str(len(body)),
'Content-Type': 'application/json',
'User-Agent': 'my UA'}
with mocked_http_conn(204) as conn:
rv = direct_client.direct_put_container(
self.node, self.part, self.account, self.container,
contents=body, headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['Content-Length'],
str(len(body)))
self.assertEqual(conn.req_headers['Content-Type'],
'application/json')
self.assertEqual(conn.req_headers['User-Agent'], 'my UA')
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertEqual(md5(body).hexdigest(), conn.etag.hexdigest())
self.assertIsNone(rv)
def test_direct_put_container_chunked(self):
body = b'Let us begin with a quick introduction'
headers = {'x-foo': 'bar', 'Content-Type': 'application/json'}
with mocked_http_conn(204) as conn:
rv = direct_client.direct_put_container(
self.node, self.part, self.account, self.container,
contents=body, headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['Transfer-Encoding'], 'chunked')
self.assertEqual(conn.req_headers['Content-Type'],
'application/json')
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertNotIn('Content-Length', conn.req_headers)
expected_sent = b'%0x\r\n%s\r\n0\r\n\r\n' % (len(body), body)
self.assertEqual(md5(expected_sent).hexdigest(),
conn.etag.hexdigest())
self.assertIsNone(rv)
def test_direct_put_container_fail(self):
with mock.patch('swift.common.bufferedhttp.http_connect_raw',
side_effect=Exception('conn failed')):
with self.assertRaises(Exception) as cm:
direct_client.direct_put_container(
self.node, self.part, self.account, self.container)
self.assertEqual('conn failed', str(cm.exception))
with mocked_http_conn(Exception('resp failed')):
with self.assertRaises(Exception) as cm:
direct_client.direct_put_container(
self.node, self.part, self.account, self.container)
self.assertEqual('resp failed', str(cm.exception))
def test_direct_put_container_object(self):
headers = {'x-foo': 'bar'}
with mocked_http_conn(204) as conn:
rv = direct_client.direct_put_container_object(
self.node, self.part, self.account, self.container, self.obj,
headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertIsNone(rv)
def test_direct_put_container_object_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_put_container_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('PUT' in str(raised.exception))
def test_direct_delete_container_object(self):
with mocked_http_conn(204) as conn:
rv = direct_client.direct_delete_container_object(
self.node, self.part, self.account, self.container, self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertIsNone(rv)
def test_direct_delete_container_obj_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_delete_container_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('DELETE' in str(raised.exception))
def test_direct_head_object(self):
headers = HeaderKeyDict({'x-foo': 'bar'})
with mocked_http_conn(200, headers) as conn:
resp = direct_client.direct_head_object(
self.node, self.part, self.account, self.container,
self.obj, headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertIn('x-timestamp', conn.req_headers)
self.assertEqual(headers, resp)
def test_direct_head_object_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_head_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('HEAD' in str(raised.exception))
def test_direct_head_object_not_found(self):
important_timestamp = Timestamp.now().internal
stub_headers = {'X-Backend-Important-Timestamp': important_timestamp}
with mocked_http_conn(404, headers=stub_headers) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_head_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 404)
self.assertEqual(
raised.exception.http_headers['x-backend-important-timestamp'],
important_timestamp)
def test_direct_get_object(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(200, body=contents) as conn:
resp_header, obj_body = direct_client.direct_get_object(
self.node, self.part, self.account, self.container, self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(obj_body, contents.getvalue())
def test_direct_get_object_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_get_object(
self.node, self.part,
self.account, self.container, self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('GET' in str(raised.exception))
def test_direct_get_object_chunks(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(200, body=contents) as conn:
resp_header, obj_body = direct_client.direct_get_object(
self.node, self.part, self.account, self.container, self.obj,
resp_chunk_size=2)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual('GET', conn.method)
self.assertEqual(self.obj_path, conn.path)
self.assertEqual([b'12', b'34', b'56'], list(obj_body))
def test_direct_post_object(self):
headers = {'Key': 'value'}
resp_headers = []
with mocked_http_conn(200, resp_headers) as conn:
direct_client.direct_post_object(
self.node, self.part, self.account, self.container, self.obj,
headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'POST')
self.assertEqual(conn.path, self.obj_path)
for header in headers:
self.assertEqual(conn.req_headers[header], headers[header])
def test_direct_post_object_error(self):
headers = {'Key': 'value'}
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_post_object(
self.node, self.part, self.account, self.container,
self.obj, headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'POST')
self.assertEqual(conn.path, self.obj_path)
for header in headers:
self.assertEqual(conn.req_headers[header], headers[header])
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('POST' in str(raised.exception))
def test_direct_delete_object(self):
with mocked_http_conn(200) as conn:
resp = direct_client.direct_delete_object(
self.node, self.part, self.account, self.container, self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertIsNone(resp)
def test_direct_delete_object_with_timestamp(self):
# ensure timestamp is different from any that might be auto-generated
timestamp = Timestamp(time.time() - 100)
headers = {'X-Timestamp': timestamp.internal}
with mocked_http_conn(200) as conn:
direct_client.direct_delete_object(
self.node, self.part, self.account, self.container, self.obj,
headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertTrue('X-Timestamp' in conn.req_headers)
self.assertEqual(timestamp, conn.req_headers['X-Timestamp'])
def test_direct_delete_object_error(self):
with mocked_http_conn(503) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_delete_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 503)
self.assertTrue('DELETE' in str(raised.exception))
def test_direct_get_suffix_hashes(self):
data = {'a83': 'c130a2c17ed45102aada0f4eee69494ff'}
body = pickle.dumps(data)
with mocked_http_conn(200, {}, body) as conn:
resp = direct_client.direct_get_suffix_hashes(self.node,
self.part, ['a83'])
self.assertEqual(conn.method, 'REPLICATE')
self.assertEqual(conn.path, '/sda/0/a83')
self.assertEqual(conn.host, self.node['replication_ip'])
self.assertEqual(conn.port, self.node['replication_port'])
self.assertEqual(data, resp)
def _test_direct_get_suffix_hashes_fail(self, status_code):
with mocked_http_conn(status_code):
with self.assertRaises(DirectClientException) as cm:
direct_client.direct_get_suffix_hashes(
self.node, self.part, ['a83', 'b52'])
self.assertIn('REPLICATE', cm.exception.args[0])
self.assertIn(quote('/%s/%s/a83-b52'
% (self.node['device'], self.part)),
cm.exception.args[0])
self.assertIn(self.node['replication_ip'], cm.exception.args[0])
self.assertIn(self.node['replication_port'], cm.exception.args[0])
self.assertEqual(self.node['replication_ip'], cm.exception.http_host)
self.assertEqual(self.node['replication_port'], cm.exception.http_port)
self.assertEqual(self.node['device'], cm.exception.http_device)
self.assertEqual(status_code, cm.exception.http_status)
def test_direct_get_suffix_hashes_503(self):
self._test_direct_get_suffix_hashes_fail(503)
def test_direct_get_suffix_hashes_507(self):
self._test_direct_get_suffix_hashes_fail(507)
def test_direct_put_object_with_content_length(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents, 6)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(md5(b'123456').hexdigest(), resp)
def test_direct_put_object_fail(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_put_object(
self.node, self.part, self.account, self.container,
self.obj, contents)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
def test_direct_put_object_chunked(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(md5(b'6\r\n123456\r\n0\r\n\r\n').hexdigest(), resp)
def test_direct_put_object_args(self):
# One test to cover all missing checks
contents = ""
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents, etag="testing-etag", content_type='Text')
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual('PUT', conn.method)
self.assertEqual(self.obj_path, conn.path)
self.assertEqual(conn.req_headers['Content-Length'], '0')
self.assertEqual(conn.req_headers['Content-Type'], 'Text')
self.assertEqual(md5(b'0\r\n\r\n').hexdigest(), resp)
def test_direct_put_object_header_content_length(self):
contents = io.BytesIO(b'123456')
stub_headers = HeaderKeyDict({
'Content-Length': '6'})
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents, headers=stub_headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual('PUT', conn.method)
self.assertEqual(conn.req_headers['Content-length'], '6')
self.assertEqual(md5(b'123456').hexdigest(), resp)
def test_retry(self):
headers = HeaderKeyDict({'key': 'value'})
with mocked_http_conn(200, headers) as conn:
attempts, resp = direct_client.retry(
direct_client.direct_head_object, self.node, self.part,
self.account, self.container, self.obj)
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(headers, resp)
self.assertEqual(attempts, 1)
def test_retry_client_exception(self):
logger = debug_logger('direct-client-test')
with mock.patch('swift.common.direct_client.sleep') as mock_sleep, \
mocked_http_conn(500) as conn:
with self.assertRaises(direct_client.ClientException) as err_ctx:
direct_client.retry(direct_client.direct_delete_object,
self.node, self.part,
self.account, self.container, self.obj,
retries=2, error_log=logger.error)
self.assertEqual('DELETE', conn.method)
self.assertEqual(err_ctx.exception.http_status, 500)
self.assertIn('DELETE', err_ctx.exception.args[0])
self.assertIn(self.obj_path,
err_ctx.exception.args[0])
self.assertIn(self.node['ip'], err_ctx.exception.args[0])
self.assertIn(self.node['port'], err_ctx.exception.args[0])
self.assertEqual(self.node['ip'], err_ctx.exception.http_host)
self.assertEqual(self.node['port'], err_ctx.exception.http_port)
self.assertEqual(self.node['device'], err_ctx.exception.http_device)
self.assertEqual(500, err_ctx.exception.http_status)
self.assertEqual([mock.call(1), mock.call(2)],
mock_sleep.call_args_list)
error_lines = logger.get_lines_for_level('error')
self.assertEqual(3, len(error_lines))
for line in error_lines:
self.assertIn('500 Internal Error', line)
def test_retry_http_exception(self):
logger = debug_logger('direct-client-test')
with mock.patch('swift.common.direct_client.sleep') as mock_sleep, \
mocked_http_conn(HTTPException('Kaboom!')) as conn:
with self.assertRaises(HTTPException) as err_ctx:
direct_client.retry(direct_client.direct_delete_object,
self.node, self.part,
self.account, self.container, self.obj,
retries=2, error_log=logger.error)
self.assertEqual('DELETE', conn.method)
self.assertEqual('Kaboom!', str(err_ctx.exception))
self.assertEqual([mock.call(1), mock.call(2)],
mock_sleep.call_args_list)
error_lines = logger.get_lines_for_level('error')
self.assertEqual(3, len(error_lines))
for line in error_lines:
self.assertIn('Kaboom!', line)
class TestUTF8DirectClient(TestDirectClient):
def setUp(self):
super(TestUTF8DirectClient, self).setUp()
self.account = self.account.encode('utf-8')
self.container = self.container.encode('utf-8')
self.obj = self.obj.encode('utf-8')
if __name__ == '__main__':
unittest.main()
|
{
"pile_set_name": "Github"
}
|
/* apps/apps.h */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
/* ====================================================================
* Copyright (c) 1998-2001 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* (eay@cryptsoft.com). This product includes software written by Tim
* Hudson (tjh@cryptsoft.com).
*
*/
#ifndef HEADER_APPS_H
# define HEADER_APPS_H
# include "e_os.h"
# include <openssl/bio.h>
# include <openssl/x509.h>
# include <openssl/lhash.h>
# include <openssl/conf.h>
# include <openssl/txt_db.h>
# ifndef OPENSSL_NO_ENGINE
# include <openssl/engine.h>
# endif
# ifndef OPENSSL_NO_OCSP
# include <openssl/ocsp.h>
# endif
# include <openssl/ossl_typ.h>
int app_RAND_load_file(const char *file, BIO *bio_e, int dont_warn);
int app_RAND_write_file(const char *file, BIO *bio_e);
/*
* When `file' is NULL, use defaults. `bio_e' is for error messages.
*/
void app_RAND_allow_write_file(void);
long app_RAND_load_files(char *file); /* `file' is a list of files to read,
* separated by LIST_SEPARATOR_CHAR
* (see e_os.h). The string is
* destroyed! */
# ifndef MONOLITH
# define MAIN(a,v) main(a,v)
# ifndef NON_MAIN
CONF *config = NULL;
BIO *bio_err = NULL;
# else
extern CONF *config;
extern BIO *bio_err;
# endif
# else
# define MAIN(a,v) PROG(a,v)
extern CONF *config;
extern char *default_config_file;
extern BIO *bio_err;
# endif
# ifndef OPENSSL_SYS_NETWARE
# include <signal.h>
# endif
# ifdef SIGPIPE
# define do_pipe_sig() signal(SIGPIPE,SIG_IGN)
# else
# define do_pipe_sig()
# endif
# ifdef OPENSSL_NO_COMP
# define zlib_cleanup()
# else
# define zlib_cleanup() COMP_zlib_cleanup()
# endif
# if defined(MONOLITH) && !defined(OPENSSL_C)
# define apps_startup() \
do_pipe_sig()
# define apps_shutdown()
# else
# ifndef OPENSSL_NO_ENGINE
# define apps_startup() \
do { do_pipe_sig(); CRYPTO_malloc_init(); \
ERR_load_crypto_strings(); OpenSSL_add_all_algorithms(); \
ENGINE_load_builtin_engines(); setup_ui_method(); } while(0)
# define apps_shutdown() \
do { CONF_modules_unload(1); destroy_ui_method(); \
OBJ_cleanup(); EVP_cleanup(); ENGINE_cleanup(); \
CRYPTO_cleanup_all_ex_data(); ERR_remove_thread_state(NULL); \
RAND_cleanup(); \
ERR_free_strings(); zlib_cleanup();} while(0)
# else
# define apps_startup() \
do { do_pipe_sig(); CRYPTO_malloc_init(); \
ERR_load_crypto_strings(); OpenSSL_add_all_algorithms(); \
setup_ui_method(); } while(0)
# define apps_shutdown() \
do { CONF_modules_unload(1); destroy_ui_method(); \
OBJ_cleanup(); EVP_cleanup(); \
CRYPTO_cleanup_all_ex_data(); ERR_remove_thread_state(NULL); \
RAND_cleanup(); \
ERR_free_strings(); zlib_cleanup(); } while(0)
# endif
# endif
# if defined(OPENSSL_SYSNAME_WIN32) || defined(OPENSSL_SYSNAME_WINCE)
# define openssl_fdset(a,b) FD_SET((unsigned int)a, b)
# else
# define openssl_fdset(a,b) FD_SET(a, b)
# endif
typedef struct args_st {
char **data;
int count;
} ARGS;
# define PW_MIN_LENGTH 4
typedef struct pw_cb_data {
const void *password;
const char *prompt_info;
} PW_CB_DATA;
int password_callback(char *buf, int bufsiz, int verify, PW_CB_DATA *cb_data);
int setup_ui_method(void);
void destroy_ui_method(void);
int should_retry(int i);
int args_from_file(char *file, int *argc, char **argv[]);
int str2fmt(char *s);
void program_name(char *in, char *out, int size);
int chopup_args(ARGS *arg, char *buf, int *argc, char **argv[]);
# ifdef HEADER_X509_H
int dump_cert_text(BIO *out, X509 *x);
void print_name(BIO *out, const char *title, X509_NAME *nm,
unsigned long lflags);
# endif
int set_cert_ex(unsigned long *flags, const char *arg);
int set_name_ex(unsigned long *flags, const char *arg);
int set_ext_copy(int *copy_type, const char *arg);
int copy_extensions(X509 *x, X509_REQ *req, int copy_type);
int app_passwd(BIO *err, char *arg1, char *arg2, char **pass1, char **pass2);
int add_oid_section(BIO *err, CONF *conf);
X509 *load_cert(BIO *err, const char *file, int format,
const char *pass, ENGINE *e, const char *cert_descrip);
X509_CRL *load_crl(const char *infile, int format);
int load_cert_crl_http(const char *url, BIO *err,
X509 **pcert, X509_CRL **pcrl);
EVP_PKEY *load_key(BIO *err, const char *file, int format, int maybe_stdin,
const char *pass, ENGINE *e, const char *key_descrip);
EVP_PKEY *load_pubkey(BIO *err, const char *file, int format, int maybe_stdin,
const char *pass, ENGINE *e, const char *key_descrip);
STACK_OF(X509) *load_certs(BIO *err, const char *file, int format,
const char *pass, ENGINE *e,
const char *cert_descrip);
STACK_OF(X509_CRL) *load_crls(BIO *err, const char *file, int format,
const char *pass, ENGINE *e,
const char *cert_descrip);
X509_STORE *setup_verify(BIO *bp, char *CAfile, char *CApath);
ENGINE *setup_engine(BIO *err, const char *engine, int debug);
void release_engine(ENGINE *e);
# ifndef OPENSSL_NO_OCSP
OCSP_RESPONSE *process_responder(BIO *err, OCSP_REQUEST *req,
const char *host, const char *path,
const char *port, int use_ssl,
const STACK_OF(CONF_VALUE) *headers,
int req_timeout);
# endif
int load_config(BIO *err, CONF *cnf);
char *make_config_name(void);
/* Functions defined in ca.c and also used in ocsp.c */
int unpack_revinfo(ASN1_TIME **prevtm, int *preason, ASN1_OBJECT **phold,
ASN1_GENERALIZEDTIME **pinvtm, const char *str);
# define DB_type 0
# define DB_exp_date 1
# define DB_rev_date 2
# define DB_serial 3 /* index - unique */
# define DB_file 4
# define DB_name 5 /* index - unique when active and not
* disabled */
# define DB_NUMBER 6
# define DB_TYPE_REV 'R'
# define DB_TYPE_EXP 'E'
# define DB_TYPE_VAL 'V'
typedef struct db_attr_st {
int unique_subject;
} DB_ATTR;
typedef struct ca_db_st {
DB_ATTR attributes;
TXT_DB *db;
} CA_DB;
BIGNUM *load_serial(char *serialfile, int create, ASN1_INTEGER **retai);
int save_serial(char *serialfile, char *suffix, BIGNUM *serial,
ASN1_INTEGER **retai);
int rotate_serial(char *serialfile, char *new_suffix, char *old_suffix);
int rand_serial(BIGNUM *b, ASN1_INTEGER *ai);
CA_DB *load_index(char *dbfile, DB_ATTR *dbattr);
int index_index(CA_DB *db);
int save_index(const char *dbfile, const char *suffix, CA_DB *db);
int rotate_index(const char *dbfile, const char *new_suffix,
const char *old_suffix);
void free_index(CA_DB *db);
# define index_name_cmp_noconst(a, b) \
index_name_cmp((const OPENSSL_CSTRING *)CHECKED_PTR_OF(OPENSSL_STRING, a), \
(const OPENSSL_CSTRING *)CHECKED_PTR_OF(OPENSSL_STRING, b))
int index_name_cmp(const OPENSSL_CSTRING *a, const OPENSSL_CSTRING *b);
int parse_yesno(const char *str, int def);
X509_NAME *parse_name(char *str, long chtype, int multirdn);
int args_verify(char ***pargs, int *pargc,
int *badarg, BIO *err, X509_VERIFY_PARAM **pm);
void policies_print(BIO *out, X509_STORE_CTX *ctx);
int bio_to_mem(unsigned char **out, int maxlen, BIO *in);
int pkey_ctrl_string(EVP_PKEY_CTX *ctx, const char *value);
int init_gen_str(BIO *err, EVP_PKEY_CTX **pctx,
const char *algname, ENGINE *e, int do_param);
int do_X509_sign(BIO *err, X509 *x, EVP_PKEY *pkey, const EVP_MD *md,
STACK_OF(OPENSSL_STRING) *sigopts);
int do_X509_REQ_sign(BIO *err, X509_REQ *x, EVP_PKEY *pkey, const EVP_MD *md,
STACK_OF(OPENSSL_STRING) *sigopts);
int do_X509_CRL_sign(BIO *err, X509_CRL *x, EVP_PKEY *pkey, const EVP_MD *md,
STACK_OF(OPENSSL_STRING) *sigopts);
# ifndef OPENSSL_NO_PSK
extern char *psk_key;
# endif
# ifndef OPENSSL_NO_JPAKE
void jpake_client_auth(BIO *out, BIO *conn, const char *secret);
void jpake_server_auth(BIO *out, BIO *conn, const char *secret);
# endif
# ifndef OPENSSL_NO_TLSEXT
unsigned char *next_protos_parse(unsigned short *outlen, const char *in);
# endif /* ndef OPENSSL_NO_TLSEXT */
void print_cert_checks(BIO *bio, X509 *x,
const char *checkhost,
const char *checkemail, const char *checkip);
void store_setup_crl_download(X509_STORE *st);
# define FORMAT_UNDEF 0
# define FORMAT_ASN1 1
# define FORMAT_TEXT 2
# define FORMAT_PEM 3
# define FORMAT_NETSCAPE 4
# define FORMAT_PKCS12 5
# define FORMAT_SMIME 6
# define FORMAT_ENGINE 7
# define FORMAT_IISSGC 8 /* XXX this stupid macro helps us to avoid
* adding yet another param to load_*key() */
# define FORMAT_PEMRSA 9 /* PEM RSAPubicKey format */
# define FORMAT_ASN1RSA 10 /* DER RSAPubicKey format */
# define FORMAT_MSBLOB 11 /* MS Key blob format */
# define FORMAT_PVK 12 /* MS PVK file format */
# define FORMAT_HTTP 13 /* Download using HTTP */
# define EXT_COPY_NONE 0
# define EXT_COPY_ADD 1
# define EXT_COPY_ALL 2
# define NETSCAPE_CERT_HDR "certificate"
# define APP_PASS_LEN 1024
# define SERIAL_RAND_BITS 64
int app_isdir(const char *);
int fileno_stdin(void);
int fileno_stdout(void);
int raw_read_stdin(void *, int);
int raw_write_stdout(const void *, int);
# define TM_START 0
# define TM_STOP 1
double app_tminterval(int stop, int usertime);
# define OPENSSL_NO_SSL_INTERN
#endif
|
{
"pile_set_name": "Github"
}
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_phishstats
# Purpose: Spiderfoot plugin to search PhishStats API
# to determine if an IP is malicious
#
# Author: Krishnasis Mandal <krishnasis@hotmail.com>
#
# Created: 18/05/2020
# Copyright: (c) Steve Micallef
# Licence: GPL
# -------------------------------------------------------------------------------
import json
import urllib.error
import urllib.parse
import urllib.request
from netaddr import IPNetwork
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_phishstats(SpiderFootPlugin):
meta = {
'name': "PhishStats",
'summary': "Determine if an IP Address is malicious",
'flags': [""],
'useCases': ["Investigate", "Passive"],
'categories': ["Reputation Systems"],
'dataSource': {
'website': "https://phishstats.info/",
'model': "FREE_NOAUTH_UNLIMITED",
'references': [
"https://phishstats.info/#apidoc"
],
'favIcon': "https://phishstats.info/phish.ico",
'logo': "",
'description': "PhishStats - is a real time Phishing database that gathers phishing URLs from several sources.",
}
}
opts = {
'checkaffiliates': True,
'subnetlookup': False,
'netblocklookup': True,
'maxnetblock': 24,
'maxsubnet': 24
}
# Option descriptions. Delete any options not applicable to this module.
optdescs = {
'checkaffiliates': "Check affiliates?",
'subnetlookup': "Look up all IPs on subnets which your target is a part of?",
'netblocklookup': "Look up all IPs on netblocks deemed to be owned by your target for possible blacklisted hosts on the same target subdomain/domain?",
'maxnetblock': "If looking up owned netblocks, the maximum netblock size to look up all IPs within (CIDR value, 24 = /24, 16 = /16, etc.)",
'maxsubnet': "If looking up subnets, the maximum subnet size to look up all the IPs within (CIDR value, 24 = /24, 16 = /16, etc.)"
}
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
# For a list of all events, check sfdb.py.
def watchedEvents(self):
return [
"IP_ADDRESS",
"NETBLOCK_OWNER",
"NETBLOCK_MEMBER",
"AFFILIATE_IPADDR"
]
# What events this module produces
def producedEvents(self):
return [
"IP_ADDRESS",
"MALICIOUS_IPADDR",
"RAW_RIR_DATA",
"MALICIOUS_AFFILIATE_IPADDR"
]
# Check whether the IP Address is malicious using Phishstats API
# https://phishstats.info/
def queryIPAddress(self, qry):
params = {
'_where': "(ip,eq," + qry.encode('raw_unicode_escape').decode("ascii", errors='replace') + ")",
'_size': 1
}
headers = {
'Accept': "application/json",
}
res = self.sf.fetchUrl(
'https://phishstats.info:2096/api/phishing?' + urllib.parse.urlencode(params),
headers=headers,
timeout=15,
useragent=self.opts['_useragent']
)
if not res['code'] == "200":
self.sf.debug("No information found from Phishstats for IP Address")
return None
try:
return json.loads(res['content'])
except Exception as e:
self.sf.error(f"Error processing JSON response: {e}")
return None
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.errorState:
return None
self.sf.debug(f"Received event, {eventName}, from {srcModuleName}")
# Don't look up stuff twice
if eventData in self.results:
self.sf.debug(f"Skipping {eventData}, already checked.")
return
self.results[eventData] = True
if eventName == 'NETBLOCK_OWNER':
if not self.opts['netblocklookup']:
return
if IPNetwork(eventData).prefixlen < self.opts['maxnetblock']:
self.sf.debug("Network size bigger than permitted: "
+ str(IPNetwork(eventData).prefixlen) + " > "
+ str(self.opts['maxnetblock']))
return
if eventName == 'NETBLOCK_MEMBER':
if not self.opts['subnetlookup']:
return
if IPNetwork(eventData).prefixlen < self.opts['maxsubnet']:
self.sf.debug("Network size bigger than permitted: "
+ str(IPNetwork(eventData).prefixlen) + " > "
+ str(self.opts['maxsubnet']))
return
qrylist = list()
if eventName.startswith("NETBLOCK_"):
for ipaddr in IPNetwork(eventData):
qrylist.append(str(ipaddr))
self.results[str(ipaddr)] = True
else:
# If user has enabled affiliate checking
if eventName == "AFFILIATE_IPADDR" and not self.opts['checkaffiliates']:
return
qrylist.append(eventData)
for addr in qrylist:
if self.checkForStop():
return
data = self.queryIPAddress(addr)
if data is None:
break
try:
maliciousIP = data[0].get('ip')
except Exception:
# If ArrayIndex is out of bounds then data doesn't exist
continue
if maliciousIP is None:
continue
if addr != maliciousIP:
self.sf.error("Reported address doesn't match requested, skipping")
continue
# Data is reported about the IP Address
if eventName.startswith("NETBLOCK_"):
ipEvt = SpiderFootEvent("IP_ADDRESS", addr, self.__name__, event)
self.notifyListeners(ipEvt)
if eventName.startswith("NETBLOCK_"):
evt = SpiderFootEvent("RAW_RIR_DATA", str(data), self.__name__, ipEvt)
self.notifyListeners(evt)
else:
evt = SpiderFootEvent("RAW_RIR_DATA", str(data), self.__name__, event)
self.notifyListeners(evt)
maliciousIPDesc = f"Phishstats [{maliciousIP}]\n"
maliciousIPDescHash = self.sf.hashstring(maliciousIPDesc)
if maliciousIPDescHash in self.results:
continue
self.results[maliciousIPDescHash] = True
if eventName.startswith("NETBLOCK_"):
evt = SpiderFootEvent("MALICIOUS_IPADDR", maliciousIPDesc, self.__name__, ipEvt)
elif eventName.startswith("AFFILIATE_"):
evt = SpiderFootEvent("MALICIOUS_AFFILIATE_IPADDR", maliciousIPDesc, self.__name__, event)
else:
evt = SpiderFootEvent("MALICIOUS_IPADDR", maliciousIPDesc, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_phishstats class
|
{
"pile_set_name": "Github"
}
|
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 1997-2014 Oracle and/or its affiliates. All rights reserved.
*
* Oracle and Java are registered trademarks of Oracle and/or its affiliates.
* Other names may be trademarks of their respective owners.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common
* Development and Distribution License("CDDL") (collectively, the
* "License"). You may not use this file except in compliance with the
* License. You can obtain a copy of the License at
* http://www.netbeans.org/cddl-gplv2.html
* or nbbuild/licenses/CDDL-GPL-2-CP. See the License for the
* specific language governing permissions and limitations under the
* License. When distributing the software, include this License Header
* Notice in each file and include the License file at
* nbbuild/licenses/CDDL-GPL-2-CP. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the
* License Header, with the fields enclosed by brackets [] replaced by
* your own identifying information:
* "Portions Copyrighted [year] [name of copyright owner]"
*
* Contributor(s):
* The Original Software is NetBeans. The Initial Developer of the Original
* Software is Sun Microsystems, Inc. Portions Copyright 1997-2006 Sun
* Microsystems, Inc. All Rights Reserved.
*
* If you wish your version of this file to be governed by only the CDDL
* or only the GPL Version 2, indicate your decision by adding
* "[Contributor] elects to include this software in this distribution
* under the [CDDL or GPL Version 2] license." If you do not indicate a
* single choice of license, a recipient has the option to distribute
* your version of this file under either the CDDL, the GPL Version 2 or
* to extend the choice of license to its licensees as provided above.
* However, if you add GPL Version 2 code and therefore, elected the GPL
* Version 2 license, then the option applies only if the new code is
* made subject to such option by the copyright holder.
*/
package org.graalvm.visualvm.lib.ui.swing;
import java.awt.Component;
import java.awt.Graphics;
import java.awt.Insets;
import javax.swing.Action;
import javax.swing.Icon;
import javax.swing.JButton;
import javax.swing.JToolBar;
import org.graalvm.visualvm.lib.ui.UIUtils;
/**
*
* @author Jiri Sedlacek
*/
public class SmallButton extends JButton {
protected static final Icon NO_ICON = new Icon() {
public int getIconWidth() { return 0; }
public int getIconHeight() { return 16; }
public void paintIcon(Component c, Graphics g, int x, int y) {}
};
{
setDefaultCapable(false);
if (UIUtils.isWindowsLookAndFeel()) setOpaque(false);
}
public SmallButton() { this(null, null); }
public SmallButton(Icon icon) { this(null, icon); }
public SmallButton(String text) { this(text, null); }
public SmallButton(Action a) { super(a); }
public SmallButton(String text, Icon icon) { setText(text); setIcon(icon); }
public void setIcon(Icon defaultIcon) {
boolean noIcon = defaultIcon == null;
if (defaultIcon == null) {
defaultIcon = NO_ICON;
setIconTextGap(0);
}
super.setIcon(defaultIcon);
if (!noIcon) putClientProperty("JComponent.sizeVariant", "regular"); // NOI18N
}
public Insets getMargin() {
Insets margin = super.getMargin();
if (margin != null) {
if (getParent() instanceof JToolBar) {
if (UIUtils.isNimbus()) {
margin.left = margin.top + 3;
margin.right = margin.top + 3;
}
} else {
if (UIUtils.isNimbus()) {
margin.left = margin.top - 6;
margin.right = margin.top - 6;
} else {
margin.left = margin.top + 3;
margin.right = margin.top + 3;
}
}
}
return margin;
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.type;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import lombok.extern.slf4j.Slf4j;
import org.apache.gobblin.metadata.types.Metadata;
/**
* Utilities to work with MIME content-types
*/
@Slf4j
public class ContentTypeUtils {
private static final ContentTypeUtils INSTANCE = new ContentTypeUtils();
public static ContentTypeUtils getInstance() {
return INSTANCE;
}
private ConcurrentHashMap<String, String> knownCharsets;
/**
* Check which character set a given content-type corresponds to.
* @param contentType Content-type to check
* @return Charset the mimetype represents. "BINARY" if binary data.
*/
public String getCharset(String contentType) {
String charSet = knownCharsets.get(contentType);
if (charSet != null) {
return charSet;
}
// Special cases
if (contentType.startsWith("text/") || contentType.endsWith("+json") || contentType.endsWith("+xml")) {
return "UTF-8";
}
return "BINARY";
}
/**
* Heuristic to infer if content is printable from metadata.
*/
public boolean inferPrintableFromMetadata(Metadata md) {
String inferredCharset = "BINARY";
List<String> transferEncoding = md.getGlobalMetadata().getTransferEncoding();
if (transferEncoding != null) {
inferredCharset = getCharset(transferEncoding.get(transferEncoding.size() - 1));
} else if (md.getGlobalMetadata().getContentType() != null) {
inferredCharset = getCharset(md.getGlobalMetadata().getContentType());
}
return inferredCharset.equals("UTF-8");
}
/**
* Register a new contentType to charSet mapping.
* @param contentType Content-type to register
* @param charSet charSet associated with the content-type
*/
public void registerCharsetMapping(String contentType, String charSet) {
if (knownCharsets.contains(contentType)) {
log.warn("{} is already registered; re-registering");
}
knownCharsets.put(contentType, charSet);
}
private ContentTypeUtils() {
knownCharsets = new ConcurrentHashMap<>();
knownCharsets.put("base64", "UTF-8");
knownCharsets.put("aes_rotating", "UTF-8");
knownCharsets.put("gzip", "BINARY");
knownCharsets.put("application/xml", "UTF-8");
knownCharsets.put("application/json", "UTF-8");
}
}
|
{
"pile_set_name": "Github"
}
|
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for decoding protocol buffer data to construct in-memory representations.
*/
import (
"errors"
"fmt"
"io"
"os"
"reflect"
)
// errOverflow is returned when an integer is too large to be represented.
var errOverflow = errors.New("proto: integer overflow")
// ErrInternalBadWireType is returned by generated code when an incorrect
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
// The fundamental decoders that interpret bytes on the wire.
// Those that take integer types all return uint64 and are
// therefore of type valueDecoder.
// DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or
// zero if there is not enough.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
}
b := uint64(buf[n])
n++
x |= (b & 0x7F) << shift
if (b & 0x80) == 0 {
return x, n
}
}
// The number is too large to represent in a 64-bit value.
return 0, 0
}
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
i := p.index
l := len(p.buf)
for shift := uint(0); shift < 64; shift += 7 {
if i >= l {
err = io.ErrUnexpectedEOF
return
}
b := p.buf[i]
i++
x |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
p.index = i
return
}
}
// The number is too large to represent in a 64-bit value.
err = errOverflow
return
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
i := p.index
buf := p.buf
if i >= len(buf) {
return 0, io.ErrUnexpectedEOF
} else if buf[i] < 0x80 {
p.index++
return uint64(buf[i]), nil
} else if len(buf)-i < 10 {
return p.decodeVarintSlow()
}
var b uint64
// we already checked the first byte
x = uint64(buf[i]) - 0x80
i++
b = uint64(buf[i])
i++
x += b << 7
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 7
b = uint64(buf[i])
i++
x += b << 14
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 14
b = uint64(buf[i])
i++
x += b << 21
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 21
b = uint64(buf[i])
i++
x += b << 28
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 28
b = uint64(buf[i])
i++
x += b << 35
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 35
b = uint64(buf[i])
i++
x += b << 42
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 42
b = uint64(buf[i])
i++
x += b << 49
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 49
b = uint64(buf[i])
i++
x += b << 56
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 56
b = uint64(buf[i])
i++
x += b << 63
if b&0x80 == 0 {
goto done
}
// x -= 0x80 << 63 // Always zero.
return 0, errOverflow
done:
p.index = i
return x, nil
}
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
// x, err already 0
i := p.index + 8
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-8])
x |= uint64(p.buf[i-7]) << 8
x |= uint64(p.buf[i-6]) << 16
x |= uint64(p.buf[i-5]) << 24
x |= uint64(p.buf[i-4]) << 32
x |= uint64(p.buf[i-3]) << 40
x |= uint64(p.buf[i-2]) << 48
x |= uint64(p.buf[i-1]) << 56
return
}
// DecodeFixed32 reads a 32-bit integer from the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
// x, err already 0
i := p.index + 4
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-4])
x |= uint64(p.buf[i-3]) << 8
x |= uint64(p.buf[i-2]) << 16
x |= uint64(p.buf[i-1]) << 24
return
}
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
// from the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
return
}
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
// from the Buffer.
// This is the format used for the sint32 protocol buffer type.
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
return
}
// These are not ValueDecoders: they produce an array of bytes or a string.
// bytes, embedded messages
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
n, err := p.DecodeVarint()
if err != nil {
return nil, err
}
nb := int(n)
if nb < 0 {
return nil, fmt.Errorf("proto: bad byte length %d", nb)
}
end := p.index + nb
if end < p.index || end > len(p.buf) {
return nil, io.ErrUnexpectedEOF
}
if !alloc {
// todo: check if can get more uses of alloc=false
buf = p.buf[p.index:end]
p.index += nb
return
}
buf = make([]byte, nb)
copy(buf, p.buf[p.index:])
p.index += nb
return
}
// DecodeStringBytes reads an encoded string from the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) DecodeStringBytes() (s string, err error) {
buf, err := p.DecodeRawBytes(false)
if err != nil {
return
}
return string(buf), nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
// If the protocol buffer has extensions, and the field matches, add it as an extension.
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
oi := o.index
err := o.skip(t, tag, wire)
if err != nil {
return err
}
if !unrecField.IsValid() {
return nil
}
ptr := structPointer_Bytes(base, unrecField)
// Add the skipped field to struct field
obuf := o.buf
o.buf = *ptr
o.EncodeVarint(uint64(tag<<3 | wire))
*ptr = append(o.buf, obuf[oi:o.index]...)
o.buf = obuf
return nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
var u uint64
var err error
switch wire {
case WireVarint:
_, err = o.DecodeVarint()
case WireFixed64:
_, err = o.DecodeFixed64()
case WireBytes:
_, err = o.DecodeRawBytes(false)
case WireFixed32:
_, err = o.DecodeFixed32()
case WireStartGroup:
for {
u, err = o.DecodeVarint()
if err != nil {
break
}
fwire := int(u & 0x7)
if fwire == WireEndGroup {
break
}
ftag := int(u >> 3)
err = o.skip(t, ftag, fwire)
if err != nil {
break
}
}
default:
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
}
return err
}
// Unmarshaler is the interface representing objects that can
// unmarshal themselves. The method should reset the receiver before
// decoding starts. The argument points to data that may be
// overwritten, so implementations should not keep references to the
// buffer.
type Unmarshaler interface {
Unmarshal([]byte) error
}
// Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// Unmarshal resets pb before starting to unmarshal, so any
// existing data in pb is always removed. Use UnmarshalMerge
// to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error {
pb.Reset()
return UnmarshalMerge(buf, pb)
}
// UnmarshalMerge parses the protocol buffer representation in buf and
// writes the decoded result to pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
}
// DecodeMessage reads a count-delimited message from the Buffer.
func (p *Buffer) DecodeMessage(pb Message) error {
enc, err := p.DecodeRawBytes(false)
if err != nil {
return err
}
return NewBuffer(enc).Unmarshal(pb)
}
// DecodeGroup reads a tag-delimited group from the Buffer.
func (p *Buffer) DecodeGroup(pb Message) error {
typ, base, err := getbase(pb)
if err != nil {
return err
}
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
}
// Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
//
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
err := u.Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
typ, base, err := getbase(pb)
if err != nil {
return err
}
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
if collectStats {
stats.Decode++
}
return err
}
// unmarshalType does the work of unmarshaling a structure.
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
var state errorState
required, reqFields := prop.reqCount, uint64(0)
var err error
for err == nil && o.index < len(o.buf) {
oi := o.index
var u uint64
u, err = o.DecodeVarint()
if err != nil {
break
}
wire := int(u & 0x7)
if wire == WireEndGroup {
if is_group {
if required > 0 {
// Not enough information to determine the exact field.
// (See below.)
return &RequiredNotSetError{"{Unknown}"}
}
return nil // input is satisfied
}
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
}
tag := int(u >> 3)
if tag <= 0 {
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
}
fieldnum, ok := prop.decoderTags.get(tag)
if !ok {
// Maybe it's an extension?
if prop.extendable {
if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil {
extmap := e.extensionsWrite()
ext := extmap[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
extmap[int32(tag)] = ext
}
continue
}
}
// Maybe it's a oneof?
if prop.oneofUnmarshaler != nil {
m := structPointer_Interface(base, st).(Message)
// First return value indicates whether tag is a oneof field.
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
if err == ErrInternalBadWireType {
// Map the error to something more descriptive.
// Do the formatting here to save generated code space.
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
}
if ok {
continue
}
}
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
continue
}
p := prop.Prop[fieldnum]
if p.dec == nil {
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
continue
}
dec := p.dec
if wire != WireStartGroup && wire != p.WireType {
if wire == WireBytes && p.packedDec != nil {
// a packable field
dec = p.packedDec
} else {
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
continue
}
}
decErr := dec(o, p, base)
if decErr != nil && !state.shouldContinue(decErr, p) {
err = decErr
}
if err == nil && p.Required {
// Successfully decoded a required field.
if tag <= 64 {
// use bitmap for fields 1-64 to catch field reuse.
var mask uint64 = 1 << uint64(tag-1)
if reqFields&mask == 0 {
// new required field
reqFields |= mask
required--
}
} else {
// This is imprecise. It can be fooled by a required field
// with a tag > 64 that is encoded twice; that's very rare.
// A fully correct implementation would require allocating
// a data structure, which we would like to avoid.
required--
}
}
}
if err == nil {
if is_group {
return io.ErrUnexpectedEOF
}
if state.err != nil {
return state.err
}
if required > 0 {
// Not enough information to determine the exact field. If we use extra
// CPU, we could determine the field only if the missing required field
// has a tag <= 64 and we check reqFields.
return &RequiredNotSetError{"{Unknown}"}
}
}
return err
}
// Individual type decoders
// For each,
// u is the decoded value,
// v is a pointer to the field (pointer) in the struct
// Sizes of the pools to allocate inside the Buffer.
// The goal is modest amortization and allocation
// on at least 16-byte boundaries.
const (
boolPoolSize = 16
uint32PoolSize = 8
uint64PoolSize = 4
)
// Decode a bool.
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
if len(o.bools) == 0 {
o.bools = make([]bool, boolPoolSize)
}
o.bools[0] = u != 0
*structPointer_Bool(base, p.field) = &o.bools[0]
o.bools = o.bools[1:]
return nil
}
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
*structPointer_BoolVal(base, p.field) = u != 0
return nil
}
// Decode an int32.
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
return nil
}
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
return nil
}
// Decode an int64.
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64_Set(structPointer_Word64(base, p.field), o, u)
return nil
}
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
return nil
}
// Decode a string.
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_String(base, p.field) = &s
return nil
}
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_StringVal(base, p.field) = s
return nil
}
// Decode a slice of bytes ([]byte).
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
*structPointer_Bytes(base, p.field) = b
return nil
}
// Decode a slice of bools ([]bool).
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
v := structPointer_BoolSlice(base, p.field)
*v = append(*v, u != 0)
return nil
}
// Decode a slice of bools ([]bool) in packed format.
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
v := structPointer_BoolSlice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded bools
fin := o.index + nb
if fin < o.index {
return errOverflow
}
y := *v
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
y = append(y, u != 0)
}
*v = y
return nil
}
// Decode a slice of int32s ([]int32).
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word32Slice(base, p.field).Append(uint32(u))
return nil
}
// Decode a slice of int32s ([]int32) in packed format.
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
v := structPointer_Word32Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int32s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(uint32(u))
}
return nil
}
// Decode a slice of int64s ([]int64).
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word64Slice(base, p.field).Append(u)
return nil
}
// Decode a slice of int64s ([]int64) in packed format.
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
v := structPointer_Word64Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int64s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(u)
}
return nil
}
// Decode a slice of strings ([]string).
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
v := structPointer_StringSlice(base, p.field)
*v = append(*v, s)
return nil
}
// Decode a slice of slice of bytes ([][]byte).
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
v := structPointer_BytesSlice(base, p.field)
*v = append(*v, b)
return nil
}
// Decode a map field.
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
oi := o.index // index at the end of this map entry
o.index -= len(raw) // move buffer back to start of map entry
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
if mptr.Elem().IsNil() {
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
}
v := mptr.Elem() // map[K]V
// Prepare addressable doubly-indirect placeholders for the key and value types.
// See enc_new_map for why.
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
keybase := toStructPointer(keyptr.Addr()) // **K
var valbase structPointer
var valptr reflect.Value
switch p.mtype.Elem().Kind() {
case reflect.Slice:
// []byte
var dummy []byte
valptr = reflect.ValueOf(&dummy) // *[]byte
valbase = toStructPointer(valptr) // *[]byte
case reflect.Ptr:
// message; valptr is **Msg; need to allocate the intermediate pointer
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valptr.Set(reflect.New(valptr.Type().Elem()))
valbase = toStructPointer(valptr)
default:
// everything else
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valbase = toStructPointer(valptr.Addr()) // **V
}
// Decode.
// This parses a restricted wire format, namely the encoding of a message
// with two fields. See enc_new_map for the format.
for o.index < oi {
// tagcode for key and value properties are always a single byte
// because they have tags 1 and 2.
tagcode := o.buf[o.index]
o.index++
switch tagcode {
case p.mkeyprop.tagcode[0]:
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
return err
}
case p.mvalprop.tagcode[0]:
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
return err
}
default:
// TODO: Should we silently skip this instead?
return fmt.Errorf("proto: bad map data tag %d", raw[0])
}
}
keyelem, valelem := keyptr.Elem(), valptr.Elem()
if !keyelem.IsValid() {
keyelem = reflect.Zero(p.mtype.Key())
}
if !valelem.IsValid() {
valelem = reflect.Zero(p.mtype.Elem())
}
v.SetMapIndex(keyelem, valelem)
return nil
}
// Decode a group.
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
return o.unmarshalType(p.stype, p.sprop, true, bas)
}
// Decode an embedded message.
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
raw, e := o.DecodeRawBytes(false)
if e != nil {
return e
}
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := structPointer_Interface(bas, p.stype)
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, false, bas)
o.buf = obuf
o.index = oi
return err
}
// Decode a slice of embedded messages.
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, false, base)
}
// Decode a slice of embedded groups.
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, true, base)
}
// Decode a slice of structs ([]*struct).
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
v := reflect.New(p.stype)
bas := toStructPointer(v)
structPointer_StructPointerSlice(base, p.field).Append(bas)
if is_group {
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
return err
}
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := v.Interface()
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
o.buf = obuf
o.index = oi
return err
}
|
{
"pile_set_name": "Github"
}
|
package types
// bank module event types
const (
EventTypeTransfer = "transfer"
AttributeKeyRecipient = "recipient"
AttributeKeySender = "sender"
AttributeValueCategory = ModuleName
)
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.context.event;
import org.springframework.boot.ConfigurableBootstrapContext;
import org.springframework.boot.SpringApplication;
import org.springframework.core.env.ConfigurableEnvironment;
import org.springframework.core.env.Environment;
/**
* Event published when a {@link SpringApplication} is starting up and the
* {@link Environment} is first available for inspection and modification.
*
* @author Dave Syer
* @since 1.0.0
*/
@SuppressWarnings("serial")
public class ApplicationEnvironmentPreparedEvent extends SpringApplicationEvent {
private final ConfigurableBootstrapContext bootstrapContext;
private final ConfigurableEnvironment environment;
/**
* Create a new {@link ApplicationEnvironmentPreparedEvent} instance.
* @param application the current application
* @param args the arguments the application is running with
* @param environment the environment that was just created
* @deprecated since 2.4.0 in favor of
* {@link #ApplicationEnvironmentPreparedEvent(ConfigurableBootstrapContext, SpringApplication, String[], ConfigurableEnvironment)}
*/
@Deprecated
public ApplicationEnvironmentPreparedEvent(SpringApplication application, String[] args,
ConfigurableEnvironment environment) {
this(null, application, args, environment);
}
/**
* Create a new {@link ApplicationEnvironmentPreparedEvent} instance.
* @param bootstrapContext the bootstrap context
* @param application the current application
* @param args the arguments the application is running with
* @param environment the environment that was just created
*/
public ApplicationEnvironmentPreparedEvent(ConfigurableBootstrapContext bootstrapContext,
SpringApplication application, String[] args, ConfigurableEnvironment environment) {
super(application, args);
this.bootstrapContext = bootstrapContext;
this.environment = environment;
}
/**
* Return the bootstap context.
* @return the bootstrap context
* @since 2.4.0
*/
public ConfigurableBootstrapContext getBootstrapContext() {
return this.bootstrapContext;
}
/**
* Return the environment.
* @return the environment
*/
public ConfigurableEnvironment getEnvironment() {
return this.environment;
}
}
|
{
"pile_set_name": "Github"
}
|
var assert = require('assert');
var Traverse = require('traverse');
exports.mutate = function () {
var obj = { a : 1, b : 2, c : [ 3, 4 ] };
var res = Traverse(obj).forEach(function (x) {
if (typeof x === 'number' && x % 2 === 0) {
this.update(x * 10);
}
});
assert.deepEqual(obj, res);
assert.deepEqual(obj, { a : 1, b : 20, c : [ 3, 40 ] });
};
exports.mutateT = function () {
var obj = { a : 1, b : 2, c : [ 3, 4 ] };
var res = Traverse.forEach(obj, function (x) {
if (typeof x === 'number' && x % 2 === 0) {
this.update(x * 10);
}
});
assert.deepEqual(obj, res);
assert.deepEqual(obj, { a : 1, b : 20, c : [ 3, 40 ] });
};
exports.map = function () {
var obj = { a : 1, b : 2, c : [ 3, 4 ] };
var res = Traverse(obj).map(function (x) {
if (typeof x === 'number' && x % 2 === 0) {
this.update(x * 10);
}
});
assert.deepEqual(obj, { a : 1, b : 2, c : [ 3, 4 ] });
assert.deepEqual(res, { a : 1, b : 20, c : [ 3, 40 ] });
};
exports.mapT = function () {
var obj = { a : 1, b : 2, c : [ 3, 4 ] };
var res = Traverse.map(obj, function (x) {
if (typeof x === 'number' && x % 2 === 0) {
this.update(x * 10);
}
});
assert.deepEqual(obj, { a : 1, b : 2, c : [ 3, 4 ] });
assert.deepEqual(res, { a : 1, b : 20, c : [ 3, 40 ] });
};
exports.clone = function () {
var obj = { a : 1, b : 2, c : [ 3, 4 ] };
var res = Traverse(obj).clone();
assert.deepEqual(obj, res);
assert.ok(obj !== res);
obj.a ++;
assert.deepEqual(res.a, 1);
obj.c.push(5);
assert.deepEqual(res.c, [ 3, 4 ]);
};
exports.cloneT = function () {
var obj = { a : 1, b : 2, c : [ 3, 4 ] };
var res = Traverse.clone(obj);
assert.deepEqual(obj, res);
assert.ok(obj !== res);
obj.a ++;
assert.deepEqual(res.a, 1);
obj.c.push(5);
assert.deepEqual(res.c, [ 3, 4 ]);
};
exports.reduce = function () {
var obj = { a : 1, b : 2, c : [ 3, 4 ] };
var res = Traverse(obj).reduce(function (acc, x) {
if (this.isLeaf) acc.push(x);
return acc;
}, []);
assert.deepEqual(obj, { a : 1, b : 2, c : [ 3, 4 ] });
assert.deepEqual(res, [ 1, 2, 3, 4 ]);
};
exports.reduceInit = function () {
var obj = { a : 1, b : 2, c : [ 3, 4 ] };
var res = Traverse(obj).reduce(function (acc, x) {
if (this.isRoot) assert.fail('got root');
return acc;
});
assert.deepEqual(obj, { a : 1, b : 2, c : [ 3, 4 ] });
assert.deepEqual(res, obj);
};
exports.remove = function () {
var obj = { a : 1, b : 2, c : [ 3, 4 ] };
Traverse(obj).forEach(function (x) {
if (this.isLeaf && x % 2 == 0) this.remove();
});
assert.deepEqual(obj, { a : 1, c : [ 3 ] });
};
exports.removeMap = function () {
var obj = { a : 1, b : 2, c : [ 3, 4 ] };
var res = Traverse(obj).map(function (x) {
if (this.isLeaf && x % 2 == 0) this.remove();
});
assert.deepEqual(obj, { a : 1, b : 2, c : [ 3, 4 ] });
assert.deepEqual(res, { a : 1, c : [ 3 ] });
};
exports.delete = function () {
var obj = { a : 1, b : 2, c : [ 3, 4 ] };
Traverse(obj).forEach(function (x) {
if (this.isLeaf && x % 2 == 0) this.delete();
});
assert.ok(!Traverse.deepEqual(
obj, { a : 1, c : [ 3, undefined ] }
));
assert.ok(Traverse.deepEqual(
obj, { a : 1, c : [ 3 ] }
));
assert.ok(!Traverse.deepEqual(
obj, { a : 1, c : [ 3, null ] }
));
};
exports.deleteRedux = function () {
var obj = { a : 1, b : 2, c : [ 3, 4, 5 ] };
Traverse(obj).forEach(function (x) {
if (this.isLeaf && x % 2 == 0) this.delete();
});
assert.ok(!Traverse.deepEqual(
obj, { a : 1, c : [ 3, undefined, 5 ] }
));
assert.ok(Traverse.deepEqual(
obj, { a : 1, c : [ 3 ,, 5 ] }
));
assert.ok(!Traverse.deepEqual(
obj, { a : 1, c : [ 3, null, 5 ] }
));
assert.ok(!Traverse.deepEqual(
obj, { a : 1, c : [ 3, 5 ] }
));
};
exports.deleteMap = function () {
var obj = { a : 1, b : 2, c : [ 3, 4 ] };
var res = Traverse(obj).map(function (x) {
if (this.isLeaf && x % 2 == 0) this.delete();
});
assert.ok(Traverse.deepEqual(
obj,
{ a : 1, b : 2, c : [ 3, 4 ] }
));
var xs = [ 3, 4 ];
delete xs[1];
assert.ok(Traverse.deepEqual(
res, { a : 1, c : xs }
));
assert.ok(Traverse.deepEqual(
res, { a : 1, c : [ 3, ] }
));
assert.ok(Traverse.deepEqual(
res, { a : 1, c : [ 3 ] }
));
};
exports.deleteMapRedux = function () {
var obj = { a : 1, b : 2, c : [ 3, 4, 5 ] };
var res = Traverse(obj).map(function (x) {
if (this.isLeaf && x % 2 == 0) this.delete();
});
assert.ok(Traverse.deepEqual(
obj,
{ a : 1, b : 2, c : [ 3, 4, 5 ] }
));
var xs = [ 3, 4, 5 ];
delete xs[1];
assert.ok(Traverse.deepEqual(
res, { a : 1, c : xs }
));
assert.ok(!Traverse.deepEqual(
res, { a : 1, c : [ 3, 5 ] }
));
assert.ok(Traverse.deepEqual(
res, { a : 1, c : [ 3 ,, 5 ] }
));
};
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="utf-8"?>
<!-- Automatically generated file. Do not edit. -->
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Линия по 2 точкам</title>
<link rel="stylesheet" type="text/css" href="../../../../../style.css" />
<meta http-equiv="content-type" content="text/html; charset=UTF-8" /></head>
<body style="font-family:Arial, Helvetica, sans-serif">
<p style="text-align:right;"><a href="/doc/qcad/latest/reference/ru/index.php?page=scripts/Draw/Line/Line2P/doc/Line2P">      </a></p>
<p style="font-style: italic;">Это автоматический перевод.</p>
<div class="nobreak">
<h2>Линия по 2 точкам</h2>
<p class="toolinfo">
<b>Панель инструментов / Иконка:</b>
<br /><img src="../../doc/Line.png" width="40" height="40" />
   
<img src="../doc/Line2P.png" width="40" height="40" />
   
<br/>
<b>Меню:</b> <font face="courier new">Начертить > Линия > Линия по 2 точкам</font>
<br /><b>Горячая клавиша:</b> <font face="courier new">L, I</font>
<br /><b>Команды:</b> <font face="courier new">line | ln | li | l</font>
</p>
</div>
<h3>Описание</h3>
<p>This tool lets you draw a sequence of one or more straight lines.</p>
<h3>Использование</h3>
<ol>
<li>Specify the start point of the first line segment. You can use the mouse
or enter a coordinate in the console.</li>
<li>Specify the endpoint of the first line segment.</li>
<li>Specify the endpoints of additional line segments. Click the 'Close'
button in the options tool bar to close the sequence:
<br />
<img width="40" height="40" src="Close.png" />
<br />If you need to undo a single line segment, you can do so by clicking
the 'Undo' button:
<br />
<img width="40" height="40" src="Undo.png" /></li>
</ol>
</body>
</html>
|
{
"pile_set_name": "Github"
}
|
/**
* Copyright (c) 2010-2020 Contributors to the openHAB project
*
* See the NOTICE file(s) distributed with this work for additional
* information.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
*/
package org.openhab.core.config.core.internal.normalization;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Common base class for all normalizers, doing the specific type conversion.
*
* @author Simon Kaufmann - Initial contribution
* @author Thomas Höfer - renamed normalizer interface and added javadoc
*/
abstract class AbstractNormalizer implements Normalizer {
protected final Logger logger = LoggerFactory.getLogger(AbstractNormalizer.class);
@Override
public final Object normalize(Object value) {
if (value == null) {
return null;
}
if (value instanceof String && "".equals(value)) {
return "";
}
return doNormalize(value);
}
/**
* Executes the concrete normalization of the given value.
*
* @param value the value to be normalized
* @return the normalized value or the given value, if it was not possible to normalize it
*/
abstract Object doNormalize(Object value);
}
|
{
"pile_set_name": "Github"
}
|
#
# Copyright 2020 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::emc::unisphere::restapi::mode::pools;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use storage::emc::unisphere::restapi::mode::components::resources qw($health_status);
use centreon::plugins::templates::catalog_functions qw(catalog_status_threshold catalog_status_calc);
sub custom_status_output {
my ($self, %options) = @_;
my $msg = 'status : ' . $self->{result_values}->{status};
return $msg;
}
sub custom_usage_output {
my ($self, %options) = @_;
my ($total_size_value, $total_size_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{total_space});
my ($total_used_value, $total_used_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{used_space});
my ($total_free_value, $total_free_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{free_space});
my $msg = sprintf('space usage total: %s used: %s (%.2f%%) free: %s (%.2f%%)',
$total_size_value . " " . $total_size_unit,
$total_used_value . " " . $total_used_unit, $self->{result_values}->{prct_used_space},
$total_free_value . " " . $total_free_unit, $self->{result_values}->{prct_free_space}
);
return $msg;
}
sub custom_subscribed_output {
my ($self, %options) = @_;
my ($total_size_value, $total_size_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{total_space});
my ($total_used_value, $total_used_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{used_sub});
$self->{result_values}->{free_sub} = 0 if ($self->{result_values}->{free_sub} < 0);
$self->{result_values}->{prct_free_sub} = 0 if ($self->{result_values}->{prct_free_sub} < 0);
my ($total_free_value, $total_free_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{free_sub});
my $msg = sprintf('subscribed usage total: %s used: %s (%.2f%%) free: %s (%.2f%%)',
$total_size_value . " " . $total_size_unit,
$total_used_value . " " . $total_used_unit, $self->{result_values}->{prct_used_sub},
$total_free_value . " " . $total_free_unit, $self->{result_values}->{prct_free_sub}
);
return $msg;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'pool', type => 1, cb_prefix_output => 'prefix_pool_output', message_multiple => 'All pools are ok' },
];
$self->{maps_counters}->{pool} = [
{ label => 'status', threshold => 0, set => {
key_values => [ { name => 'status' }, { name => 'display' } ],
closure_custom_calc => \&catalog_status_calc,
closure_custom_output => $self->can('custom_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold,
}
},
{ label => 'usage', nlabel => 'pool.space.usage.bytes', set => {
key_values => [ { name => 'used_space' }, { name => 'free_space' }, { name => 'prct_used_space' }, { name => 'prct_free_space' }, { name => 'total_space' }, { name => 'display' }, ],
closure_custom_output => $self->can('custom_usage_output'),
perfdatas => [
{ value => 'used_space', template => '%d', min => 0, max => 'total_space',
unit => 'B', cast_int => 1, label_extra_instance => 1, instance_use => 'display' },
],
}
},
{ label => 'usage-free', nlabel => 'pool.space.free.bytes', display_ok => 0, set => {
key_values => [ { name => 'free_space' }, { name => 'used_space' }, { name => 'prct_used_space' }, { name => 'prct_free_space' }, { name => 'total_space' }, { name => 'display' }, ],
closure_custom_output => $self->can('custom_usage_output'),
perfdatas => [
{ value => 'free_space', template => '%d', min => 0, max => 'total_space',
unit => 'B', cast_int => 1, label_extra_instance => 1, instance_use => 'display' },
],
}
},
{ label => 'usage-prct', nlabel => 'pool.space.usage.percentage', display_ok => 0, set => {
key_values => [ { name => 'prct_used_space' }, { name => 'display' } ],
output_template => 'used : %.2f %%',
perfdatas => [
{ value => 'prct_used_space', template => '%.2f', min => 0, max => 100,
unit => '%', label_extra_instance => 1, instance_use => 'display' },
],
}
},
{ label => 'subscribed', nlabel => 'pool.subscribed.usage.bytes', display_ok => 0, set => {
key_values => [ { name => 'used_sub' }, { name => 'free_sub' }, { name => 'prct_used_sub' }, { name => 'prct_free_sub' }, { name => 'total_space' }, { name => 'display' }, ],
closure_custom_output => $self->can('custom_subscribed_output'),
perfdatas => [
{ value => 'used_sub', template => '%d', min => 0, max => 'total_space',
unit => 'B', cast_int => 1, label_extra_instance => 1, instance_use => 'display' },
],
}
},
{ label => 'subscribed-prct', display_ok => 0, nlabel => 'pool.subscribed.usage.percentage', set => {
key_values => [ { name => 'prct_used_sub' }, { name => 'display' } ],
output_template => 'subcribed used : %.2f %%',
perfdatas => [
{ value => 'prct_used_sub', template => '%.2f', min => 0, max => 100,
unit => '%', label_extra_instance => 1, instance_use => 'display' },
],
}
},
];
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, force_new_perfdata => 1);
bless $self, $class;
$options{options}->add_options(arguments => {
'filter-name:s' => { name => 'filter_name' },
'unknown-status:s' => { name => 'unknown_status', default => '%{status} =~ /unknown/i' },
'warning-status:s' => { name => 'warning_status', default => '%{status} =~ /ok_but|degraded|minor/i' },
'critical-status:s' => { name => 'critical_status', default => '%{status} =~ /major|criticalnon_recoverable/i' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
$self->change_macros(macros => ['warning_status', 'critical_status', 'unknown_status']);
}
sub prefix_pool_output {
my ($self, %options) = @_;
return "Pool '" . $options{instance_value}->{display} . "' ";
}
sub manage_selection {
my ($self, %options) = @_;
my $results = $options{custom}->request_api(url_path => '/api/types/pool/instances?fields=name,sizeFree,sizeSubscribed,sizeTotal,health');
$self->{pool} = {};
foreach (@{$results->{entries}}) {
if (defined($self->{option_results}->{filter_name}) && $self->{option_results}->{filter_name} ne '' &&
$_->{content}->{name} !~ /$self->{option_results}->{filter_name}/) {
$self->{output}->output_add(long_msg => "skipping pool '" . $_->{content}->{name} . "': no matching filter.", debug => 1);
next;
}
my $used = $_->{content}->{sizeTotal} - $_->{content}->{sizeFree};
$self->{pool}->{$_->{content}->{id}} = {
display => $_->{content}->{name},
status => $health_status->{ $_->{content}->{health}->{value} },
total_space => $_->{content}->{sizeTotal},
used_space => $used,
free_space => $_->{content}->{sizeFree},
prct_used_space => $used * 100 / $_->{content}->{sizeTotal},
prct_free_space => $_->{content}->{sizeFree} * 100 / $_->{content}->{sizeTotal},
used_sub => $_->{content}->{sizeSubscribed},
free_sub => $_->{content}->{sizeTotal} - $_->{content}->{sizeSubscribed},
prct_used_sub => $_->{content}->{sizeSubscribed} * 100 / $_->{content}->{sizeTotal},
prct_free_sub => 100 - ($_->{content}->{sizeSubscribed} * 100 / $_->{content}->{sizeTotal}),
};
}
if (scalar(keys %{$self->{pool}}) <= 0) {
$self->{output}->add_option_msg(short_msg => "No pool found");
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check pool usages.
=over 8
=item B<--filter-counters>
Only display some counters (regexp can be used).
Example: --filter-counters='^usage$'
=item B<--filter-name>
Filter pool name (can be a regexp).
=item B<--unknown-status>
Set warning threshold for status (Default: '%{status} =~ /unknown/i').
Can used special variables like: %{status}, %{display}
=item B<--warning-status>
Set warning threshold for status (Default: '%{status} =~ /ok_but|degraded|minor/i').
Can used special variables like: %{status}, %{display}
=item B<--critical-status>
Set critical threshold for status (Default: '%{status} =~ /major|criticalnon_recoverable/i').
Can used special variables like: %{status}, %{display}
=item B<--warning-*> B<--critical-*>
Thresholds.
Can be: 'usage' (B), 'usage-free' (B), 'usage-prct' (%),
'subscribed', 'subscribed-prct'.
=back
=cut
|
{
"pile_set_name": "Github"
}
|
kernel:
# image: "linuxkit/kernel:4.9.x"
image: "rancher/os-kernel:4.9.26-rancher"
# cmdline: "debug ignore_loglevel log_buf_len=10M print_fatal_signals=1 LOGLEVEL=8 earlyprintk sched_debug initcall_debug option.debug=Y break=y console=ttyS0 console=tty0 console=tty1 page_poison=1 printk.devkmsg=on rancher.debug=true rancher.password=rancher rancher.autologin=ttyS0 rancher.autologin=tty0 rancher.autologin=tty1"
cmdline: 'printk.devkmsg=on rancher.debug=true rancher.password=rancher console=ttyS0 rancher.autologin=ttyS0 console=tty0 rancher.autologin=tty0 console=tty1 rancher.autologin=tty1 rancher.state.dev=LABEL=RANCHER_STATE rancher.state.autoformat=[/dev/sda,/dev/vda] rancher.rm_usr'
init:
- zombie/os
# - rancher/os-installer
# - rancher/os-initrd
#outputs:
# - format: kernel+initrd
# - format: iso-bios
# - format: iso-efi
# - format: gcp-img
|
{
"pile_set_name": "Github"
}
|
#!/usr/bin/elfsh
load /bin/ls
modload modremap
findrel
remap 0x11223344
save /tmp/ls.remapped
exec /tmp/ls.remapped
quit
|
{
"pile_set_name": "Github"
}
|
/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
public class A {
static private class Inner {
static private class Inner2 extends B {
}
}
}
|
{
"pile_set_name": "Github"
}
|
const exprParser = require('../../../util/exprParser');
exports = module.exports = function() {
this.register('template-parse-ast-attr-:style', function parseBindStyle({ item, expr }) {
let exprObj = exprParser.str2obj(expr);
item.bindStyle = Object.keys(exprObj).map(name => {
// eslint-disable-next-line
let exp = exprObj[name].replace(/\'/gi, '\\\'').replace(/\"/gi, '\\"');
// add brackets to fix priority of "+" operator.
if (/^\(.*\)$/.test(exp) === false) {
exp = `(${exp})`;
}
// eslint-disable-next-line
name = name.replace(/\'/gi, '\\\'').replace(/\"/gi, '\\"');
name = exprParser.hyphenate(name);
return `'${name}:' + ${exp} + ';'`;
});
// return {} means remove :class
return { attrs: {} };
});
};
|
{
"pile_set_name": "Github"
}
|
<?php
/*
* This file is part of CacheTool.
*
* (c) Samuel Gordalina <samuel.gordalina@gmail.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace CacheTool\Adapter\Http;
class FileGetContents extends AbstractHttp
{
public function fetch($filename)
{
$url = "{$this->baseUrl}/{$filename}";
$contents = @file_get_contents($url);
if (false === $contents) {
return serialize([
'result' => false,
'errors' => [
[
'no' => 0,
'str' => "file_get_contents() call failed with url: ${url}",
],
],
]);
}
return $contents;
}
}
|
{
"pile_set_name": "Github"
}
|
{
"system": {
},
"modules": {
"Core": {
"SiteName": "Afterlogic WebMail Lite PHP",
"ProductName": "Afterlogic WebMail Lite PHP"
},
"CoreWebclient": {
"HeaderModulesOrder": ["mail", "contacts"],
"DefaultAnonymScreenHash": "login",
"DefaultUserScreenHash": "mail",
"ThemeList": ["Default", "DeepForest", "Funny", "Sand"]
},
"SessionTimeoutWebclient": {
"Disabled": true
},
"Dropbox": {
"Disabled": true
},
"DropboxAuthWebclient": {
"Scopes": "auth"
},
"Mail": {
"MessagesSortBy": {
"Allow": true,
"List": [],
"DefaultSortBy": "arrival",
"DefaultSortOrder": "desc"
},
"AllowDefaultAccountForUser": true,
"AutocreateMailAccountOnNewUserFirstLogin": true
},
"OAuthIntegratorWebclient": {
"AllowNewUsersRegister": false
},
"ChangePasswordWebclient": {
"Disabled": true
},
"MailChangePasswordPoppassdPlugin": {
"Disabled": true
},
"MailSaveMessageAsPdfPlugin": {
"Disabled": true
},
"MailTnefWebclientPlugin": {
"Disabled": true
},
"MailAuthCpanel": {
"Disabled": true
},
"CpanelIntegrator": {
"Disabled": true
},
"StandardLoginFormWebclient": {
"UseDropdownLanguagesView": true,
"BottomInfoHtmlText": "Powered by <a href=\"https:\/\/afterlogic.org\/webmail-lite\" target=\"_blank\">Afterlogic WebMail Lite</a>"
},
"RecaptchaWebclientPlugin": {
"Disabled": true
},
"MailMasterPassword": {
"Disabled": true
},
"MailNotesPlugin": {
"Disabled": true
},
"MailLoginFormWebclient": {
"Disabled": true,
"UseDropdownLanguagesView": true,
"BottomInfoHtmlText": "Powered by <a href=\"https:\/\/afterlogic.com\/webmail-lite\" target=\"_blank\">Afterlogic WebMail Lite</a>"
},
"OverrideUserSettings": {
"Disabled": true
},
"Contacts": {
"ImportContactsLink": "https://afterlogic.com/docs/webmail-lite-8/frequently-asked-questions/importing-contacts"
}
}
}
|
{
"pile_set_name": "Github"
}
|
#pragma once
namespace slade
{
class ParseTreeNode;
namespace game
{
struct ArgValue
{
string name;
int value;
};
struct Arg
{
enum Type
{
Number = 0,
YesNo,
NoYes,
Angle,
Choice,
Flags,
Speed,
};
typedef std::map<string, Arg> SpecialMap;
string name;
string desc;
int type = Number;
vector<ArgValue> custom_values;
vector<ArgValue> custom_flags;
Arg() {}
Arg(string_view name) : name{ name } {}
string valueString(int value) const;
string speedLabel(int value) const;
void parse(ParseTreeNode* node, SpecialMap* shared_args);
};
struct ArgSpec
{
Arg args[5];
int count;
ArgSpec() : args{ { "Arg1" }, { "Arg2" }, { "Arg3" }, { "Arg4" }, { "Arg5" } }, count{ 0 } {}
Arg& operator[](int index) { return args[index]; }
const Arg& operator[](int index) const { return args[index]; }
string stringDesc(const int values[5], string values_str[2]) const;
};
} // namespace game
} // namespace slade
|
{
"pile_set_name": "Github"
}
|
#include "bindings.h"
#pragma once
#if defined(ON_PYTHON_COMPILE)
typedef pybind11::object BND_UUID;
#else
typedef std::string BND_UUID;
#endif
BND_UUID ON_UUID_to_Binding(const ON_UUID& id);
ON_UUID Binding_to_ON_UUID(const BND_UUID& id);
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0"?>
<!--
Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
<window xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<style xmlns="http://www.w3.org/1999/xhtml"><![CDATA[
window { padding: 8px; }
image {
border: 1px dashed gray;
padding: 1px;
object-fit: fill;
image-rendering: -moz-crisp-edges;
float: left;
}
.bigWide {
width: 52px;
height: 36px;
}
.bigTall {
width: 36px;
height: 52px;
}
.small {
width: 12px;
height: 12px;
}
br { clear: both; }
.tr { object-position: top right }
.bl { object-position: bottom left }
.tl { object-position: top 25% left 25% }
.br { object-position: bottom 1px right 2px }
.tc { object-position: top 3px center }
.cr { object-position: center right 25% }
]]></style>
<hbox>
<!-- big/wide: -->
<image src="colors-16x8.png" class="bigWide tr"/>
<image src="colors-16x8.png" class="bigWide bl"/>
<image src="colors-16x8.png" class="bigWide tl"/>
<image src="colors-16x8.png" class="bigWide br"/>
<image src="colors-16x8.png" class="bigWide tc"/>
<image src="colors-16x8.png" class="bigWide cr"/>
<image src="colors-16x8.png" class="bigWide"/>
</hbox>
<hbox>
<!-- big/tall: -->
<image src="colors-16x8.png" class="bigTall tr"/>
<image src="colors-16x8.png" class="bigTall bl"/>
<image src="colors-16x8.png" class="bigTall tl"/>
<image src="colors-16x8.png" class="bigTall br"/>
<image src="colors-16x8.png" class="bigTall tc"/>
<image src="colors-16x8.png" class="bigTall cr"/>
<image src="colors-16x8.png" class="bigTall"/>
</hbox>
<hbox>
<!-- small: -->
<image src="colors-16x8.png" class="small tr"/>
<image src="colors-16x8.png" class="small bl"/>
<image src="colors-16x8.png" class="small tl"/>
<image src="colors-16x8.png" class="small br"/>
<image src="colors-16x8.png" class="small tc"/>
<image src="colors-16x8.png" class="small cr"/>
<image src="colors-16x8.png" class="small"/>
</hbox>
</window>
|
{
"pile_set_name": "Github"
}
|
/**
* Markdown styles copied from Github
*/
.markdown-body {
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";
font-size: 16px;
line-height: 1.5;
word-wrap: break-word
}
.markdown-body::before {
display: table;
content: ""
}
.markdown-body::after {
display: table;
clear: both;
content: ""
}
.markdown-body > *:first-child {
margin-top: 0 !important
}
.markdown-body > *:last-child {
margin-bottom: 0 !important
}
.markdown-body a:not([href]) {
color: inherit;
text-decoration: none
}
.markdown-body .absent {
color: #cb2431
}
.markdown-body .anchor {
float: left;
padding-right: 4px;
margin-left: -20px;
line-height: 1
}
.markdown-body .anchor:focus {
outline: none
}
.markdown-body p, .markdown-body blockquote, .markdown-body ul, .markdown-body ol, .markdown-body dl, .markdown-body table, .markdown-body pre {
margin-top: 0;
margin-bottom: 16px
}
.markdown-body hr {
height: .25em;
padding: 0;
margin: 24px 0;
background-color: #e1e4e8;
border: 0
}
.markdown-body blockquote {
padding: 0 1em;
color: #6a737d;
border-left: 0.25em solid #dfe2e5
}
.markdown-body blockquote > :first-child {
margin-top: 0
}
.markdown-body blockquote > :last-child {
margin-bottom: 0
}
.markdown-body kbd {
display: inline-block;
padding: 3px 5px;
font-size: 11px;
line-height: 10px;
color: #444d56;
vertical-align: middle;
background-color: #fafbfc;
border: solid 1px #c6cbd1;
border-bottom-color: #959da5;
border-radius: 3px;
box-shadow: inset 0 -1px 0 #959da5
}
.markdown-body h1, .markdown-body h2, .markdown-body h3, .markdown-body h4, .markdown-body h5, .markdown-body h6 {
margin-top: 24px;
margin-bottom: 16px;
font-weight: 600;
line-height: 1.25
}
.markdown-body h1 .octicon-link, .markdown-body h2 .octicon-link, .markdown-body h3 .octicon-link, .markdown-body h4 .octicon-link, .markdown-body h5 .octicon-link, .markdown-body h6 .octicon-link {
color: #1b1f23;
vertical-align: middle;
visibility: hidden
}
.markdown-body h1:hover .anchor, .markdown-body h2:hover .anchor, .markdown-body h3:hover .anchor, .markdown-body h4:hover .anchor, .markdown-body h5:hover .anchor, .markdown-body h6:hover .anchor {
text-decoration: none
}
.markdown-body h1:hover .anchor .octicon-link, .markdown-body h2:hover .anchor .octicon-link, .markdown-body h3:hover .anchor .octicon-link, .markdown-body h4:hover .anchor .octicon-link, .markdown-body h5:hover .anchor .octicon-link, .markdown-body h6:hover .anchor .octicon-link {
visibility: visible
}
.markdown-body h1 tt, .markdown-body h1 code, .markdown-body h2 tt, .markdown-body h2 code, .markdown-body h3 tt, .markdown-body h3 code, .markdown-body h4 tt, .markdown-body h4 code, .markdown-body h5 tt, .markdown-body h5 code, .markdown-body h6 tt, .markdown-body h6 code {
font-size: inherit
}
.markdown-body h1 {
padding-bottom: 0.3em;
font-size: 2em;
border-bottom: 1px solid #eaecef
}
.markdown-body h2 {
padding-bottom: 0.3em;
font-size: 1.5em;
border-bottom: 1px solid #eaecef
}
.markdown-body h3 {
font-size: 1.25em
}
.markdown-body h4 {
font-size: 1em
}
.markdown-body h5 {
font-size: 0.875em
}
.markdown-body h6 {
font-size: 0.85em;
color: #6a737d
}
.markdown-body ul, .markdown-body ol {
padding-left: 2em
}
.markdown-body ul.no-list, .markdown-body ol.no-list {
padding: 0;
list-style-type: none
}
.markdown-body ul ul, .markdown-body ul ol, .markdown-body ol ol, .markdown-body ol ul {
margin-top: 0;
margin-bottom: 0
}
.markdown-body li {
word-wrap: break-all;
}
.markdown-body li > p {
margin-top: 16px
}
.markdown-body li + li {
margin-top: .25em
}
.markdown-body dl {
padding: 0
}
.markdown-body dl dt {
padding: 0;
margin-top: 16px;
font-size: 1em;
font-style: italic;
font-weight: 600
}
.markdown-body dl dd {
padding: 0 16px;
margin-bottom: 16px
}
.markdown-body table {
display: block;
width: 100%;
overflow: auto
}
.markdown-body table th {
font-weight: 600
}
.markdown-body table th, .markdown-body table td {
padding: 6px 13px;
border: 1px solid #dfe2e5
}
.markdown-body table tr {
background-color: #fff;
border-top: 1px solid #c6cbd1
}
.markdown-body table tr:nth-child(2n) {
background-color: #f6f8fa
}
.markdown-body table img {
background-color: transparent
}
.markdown-body img {
max-width: 100%;
box-sizing: content-box;
background-color: #fff
}
.markdown-body img[align=right] {
padding-left: 20px
}
.markdown-body img[align=left] {
padding-right: 20px
}
.markdown-body .emoji {
max-width: none;
vertical-align: text-top;
background-color: transparent
}
.markdown-body span.frame {
display: block;
overflow: hidden
}
.markdown-body span.frame > span {
display: block;
float: left;
width: auto;
padding: 7px;
margin: 13px 0 0;
overflow: hidden;
border: 1px solid #dfe2e5
}
.markdown-body span.frame span img {
display: block;
float: left
}
.markdown-body span.frame span span {
display: block;
padding: 5px 0 0;
clear: both;
color: #24292e
}
.markdown-body span.align-center {
display: block;
overflow: hidden;
clear: both
}
.markdown-body span.align-center > span {
display: block;
margin: 13px auto 0;
overflow: hidden;
text-align: center
}
.markdown-body span.align-center span img {
margin: 0 auto;
text-align: center
}
.markdown-body span.align-right {
display: block;
overflow: hidden;
clear: both
}
.markdown-body span.align-right > span {
display: block;
margin: 13px 0 0;
overflow: hidden;
text-align: right
}
.markdown-body span.align-right span img {
margin: 0;
text-align: right
}
.markdown-body span.float-left {
display: block;
float: left;
margin-right: 13px;
overflow: hidden
}
.markdown-body span.float-left span {
margin: 13px 0 0
}
.markdown-body span.float-right {
display: block;
float: right;
margin-left: 13px;
overflow: hidden
}
.markdown-body span.float-right > span {
display: block;
margin: 13px auto 0;
overflow: hidden;
text-align: right
}
.markdown-body code, .markdown-body tt {
padding: 0.2em 0.4em;
margin: 0;
font-size: 85%;
background-color: rgba(27, 31, 35, 0.05);
border-radius: 3px
}
.markdown-body code br, .markdown-body tt br {
display: none
}
.markdown-body del code {
text-decoration: inherit
}
.markdown-body pre {
word-wrap: normal
}
.markdown-body pre > code {
padding: 0;
margin: 0;
font-size: 100%;
word-break: normal;
white-space: pre;
background: transparent;
border: 0
}
.markdown-body .highlight {
margin-bottom: 16px
}
.markdown-body .highlight pre {
margin-bottom: 0;
word-break: normal
}
.markdown-body .highlight pre, .markdown-body pre {
padding: 16px;
overflow: auto;
font-size: 85%;
line-height: 1.45;
background-color: #f6f8fa;
border-radius: 3px
}
.markdown-body pre code, .markdown-body pre tt {
display: inline;
max-width: auto;
padding: 0;
margin: 0;
overflow: visible;
line-height: inherit;
word-wrap: normal;
background-color: transparent;
border: 0
}
.markdown-body .csv-data td, .markdown-body .csv-data th {
padding: 5px;
overflow: hidden;
font-size: 12px;
line-height: 1;
text-align: left;
white-space: nowrap
}
.markdown-body .csv-data .blob-num {
padding: 10px 8px 9px;
text-align: right;
background: #fff;
border: 0
}
.markdown-body .csv-data tr {
border-top: 0
}
.markdown-body .csv-data th {
font-weight: 600;
background: #f6f8fa;
border-top: 0
}
|
{
"pile_set_name": "Github"
}
|
/*===-- llvm-c/Analysis.h - Analysis Library C Interface --------*- C++ -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header declares the C interface to libLLVMAnalysis.a, which *|
|* implements various analyses of the LLVM IR. *|
|* *|
|* Many exotic languages can interoperate with C code but have a harder time *|
|* with C++ due to name mangling. So in addition to C, this interface enables *|
|* tools written in such languages. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_ANALYSIS_H
#define LLVM_C_ANALYSIS_H
#include "llvm-c/Core.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCAnalysis Analysis
* @ingroup LLVMC
*
* @{
*/
typedef enum {
LLVMAbortProcessAction, /* verifier will print to stderr and abort() */
LLVMPrintMessageAction, /* verifier will print to stderr and return 1 */
LLVMReturnStatusAction /* verifier will just return 1 */
} LLVMVerifierFailureAction;
/* Verifies that a module is valid, taking the specified action if not.
Optionally returns a human-readable description of any invalid constructs.
OutMessage must be disposed with LLVMDisposeMessage. */
LLVMBool LLVMVerifyModule(LLVMModuleRef M, LLVMVerifierFailureAction Action,
char **OutMessage);
/* Verifies that a single function is valid, taking the specified action. Useful
for debugging. */
LLVMBool LLVMVerifyFunction(LLVMValueRef Fn, LLVMVerifierFailureAction Action);
/* Open up a ghostview window that displays the CFG of the current function.
Useful for debugging. */
void LLVMViewFunctionCFG(LLVMValueRef Fn);
void LLVMViewFunctionCFGOnly(LLVMValueRef Fn);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif
|
{
"pile_set_name": "Github"
}
|
// cgo -godefs types_openbsd.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build 386,openbsd
package unix
const (
SizeofPtr = 0x4
SizeofShort = 0x2
SizeofInt = 0x4
SizeofLong = 0x4
SizeofLongLong = 0x8
)
type (
_C_short int16
_C_int int32
_C_long int32
_C_long_long int64
)
type Timespec struct {
Sec int64
Nsec int32
}
type Timeval struct {
Sec int64
Usec int32
}
type Rusage struct {
Utime Timeval
Stime Timeval
Maxrss int32
Ixrss int32
Idrss int32
Isrss int32
Minflt int32
Majflt int32
Nswap int32
Inblock int32
Oublock int32
Msgsnd int32
Msgrcv int32
Nsignals int32
Nvcsw int32
Nivcsw int32
}
type Rlimit struct {
Cur uint64
Max uint64
}
type _Gid_t uint32
type Stat_t struct {
Mode uint32
Dev int32
Ino uint64
Nlink uint32
Uid uint32
Gid uint32
Rdev int32
Atim Timespec
Mtim Timespec
Ctim Timespec
Size int64
Blocks int64
Blksize uint32
Flags uint32
Gen uint32
X__st_birthtim Timespec
}
type Statfs_t struct {
F_flags uint32
F_bsize uint32
F_iosize uint32
F_blocks uint64
F_bfree uint64
F_bavail int64
F_files uint64
F_ffree uint64
F_favail int64
F_syncwrites uint64
F_syncreads uint64
F_asyncwrites uint64
F_asyncreads uint64
F_fsid Fsid
F_namemax uint32
F_owner uint32
F_ctime uint64
F_fstypename [16]int8
F_mntonname [90]int8
F_mntfromname [90]int8
F_mntfromspec [90]int8
Pad_cgo_0 [2]byte
Mount_info [160]byte
}
type Flock_t struct {
Start int64
Len int64
Pid int32
Type int16
Whence int16
}
type Dirent struct {
Fileno uint64
Off int64
Reclen uint16
Type uint8
Namlen uint8
X__d_padding [4]uint8
Name [256]int8
}
type Fsid struct {
Val [2]int32
}
const (
PathMax = 0x400
)
type RawSockaddrInet4 struct {
Len uint8
Family uint8
Port uint16
Addr [4]byte /* in_addr */
Zero [8]int8
}
type RawSockaddrInet6 struct {
Len uint8
Family uint8
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
type RawSockaddrUnix struct {
Len uint8
Family uint8
Path [104]int8
}
type RawSockaddrDatalink struct {
Len uint8
Family uint8
Index uint16
Type uint8
Nlen uint8
Alen uint8
Slen uint8
Data [24]int8
}
type RawSockaddr struct {
Len uint8
Family uint8
Data [14]int8
}
type RawSockaddrAny struct {
Addr RawSockaddr
Pad [92]int8
}
type _Socklen uint32
type Linger struct {
Onoff int32
Linger int32
}
type Iovec struct {
Base *byte
Len uint32
}
type IPMreq struct {
Multiaddr [4]byte /* in_addr */
Interface [4]byte /* in_addr */
}
type IPv6Mreq struct {
Multiaddr [16]byte /* in6_addr */
Interface uint32
}
type Msghdr struct {
Name *byte
Namelen uint32
Iov *Iovec
Iovlen uint32
Control *byte
Controllen uint32
Flags int32
}
type Cmsghdr struct {
Len uint32
Level int32
Type int32
}
type Inet6Pktinfo struct {
Addr [16]byte /* in6_addr */
Ifindex uint32
}
type IPv6MTUInfo struct {
Addr RawSockaddrInet6
Mtu uint32
}
type ICMPv6Filter struct {
Filt [8]uint32
}
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
SizeofSockaddrAny = 0x6c
SizeofSockaddrUnix = 0x6a
SizeofSockaddrDatalink = 0x20
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPv6Mreq = 0x14
SizeofMsghdr = 0x1c
SizeofCmsghdr = 0xc
SizeofInet6Pktinfo = 0x14
SizeofIPv6MTUInfo = 0x20
SizeofICMPv6Filter = 0x20
)
const (
PTRACE_TRACEME = 0x0
PTRACE_CONT = 0x7
PTRACE_KILL = 0x8
)
type Kevent_t struct {
Ident uint32
Filter int16
Flags uint16
Fflags uint32
Data int64
Udata *byte
}
type FdSet struct {
Bits [32]uint32
}
const (
SizeofIfMsghdr = 0xec
SizeofIfData = 0xd4
SizeofIfaMsghdr = 0x18
SizeofIfAnnounceMsghdr = 0x1a
SizeofRtMsghdr = 0x60
SizeofRtMetrics = 0x38
)
type IfMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Hdrlen uint16
Index uint16
Tableid uint16
Pad1 uint8
Pad2 uint8
Addrs int32
Flags int32
Xflags int32
Data IfData
}
type IfData struct {
Type uint8
Addrlen uint8
Hdrlen uint8
Link_state uint8
Mtu uint32
Metric uint32
Pad uint32
Baudrate uint64
Ipackets uint64
Ierrors uint64
Opackets uint64
Oerrors uint64
Collisions uint64
Ibytes uint64
Obytes uint64
Imcasts uint64
Omcasts uint64
Iqdrops uint64
Noproto uint64
Capabilities uint32
Lastchange Timeval
Mclpool [7]Mclpool
}
type IfaMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Hdrlen uint16
Index uint16
Tableid uint16
Pad1 uint8
Pad2 uint8
Addrs int32
Flags int32
Metric int32
}
type IfAnnounceMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Hdrlen uint16
Index uint16
What uint16
Name [16]int8
}
type RtMsghdr struct {
Msglen uint16
Version uint8
Type uint8
Hdrlen uint16
Index uint16
Tableid uint16
Priority uint8
Mpls uint8
Addrs int32
Flags int32
Fmask int32
Pid int32
Seq int32
Errno int32
Inits uint32
Rmx RtMetrics
}
type RtMetrics struct {
Pksent uint64
Expire int64
Locks uint32
Mtu uint32
Refcnt uint32
Hopcount uint32
Recvpipe uint32
Sendpipe uint32
Ssthresh uint32
Rtt uint32
Rttvar uint32
Pad uint32
}
type Mclpool struct {
Grown int32
Alive uint16
Hwm uint16
Cwm uint16
Lwm uint16
}
const (
SizeofBpfVersion = 0x4
SizeofBpfStat = 0x8
SizeofBpfProgram = 0x8
SizeofBpfInsn = 0x8
SizeofBpfHdr = 0x14
)
type BpfVersion struct {
Major uint16
Minor uint16
}
type BpfStat struct {
Recv uint32
Drop uint32
}
type BpfProgram struct {
Len uint32
Insns *BpfInsn
}
type BpfInsn struct {
Code uint16
Jt uint8
Jf uint8
K uint32
}
type BpfHdr struct {
Tstamp BpfTimeval
Caplen uint32
Datalen uint32
Hdrlen uint16
Pad_cgo_0 [2]byte
}
type BpfTimeval struct {
Sec uint32
Usec uint32
}
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]uint8
Ispeed int32
Ospeed int32
}
type Winsize struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}
const (
AT_FDCWD = -0x64
AT_SYMLINK_NOFOLLOW = 0x2
)
type PollFd struct {
Fd int32
Events int16
Revents int16
}
const (
POLLERR = 0x8
POLLHUP = 0x10
POLLIN = 0x1
POLLNVAL = 0x20
POLLOUT = 0x4
POLLPRI = 0x2
POLLRDBAND = 0x80
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
)
type Utsname struct {
Sysname [256]byte
Nodename [256]byte
Release [256]byte
Version [256]byte
Machine [256]byte
}
const SizeofUvmexp = 0x158
type Uvmexp struct {
Pagesize int32
Pagemask int32
Pageshift int32
Npages int32
Free int32
Active int32
Inactive int32
Paging int32
Wired int32
Zeropages int32
Reserve_pagedaemon int32
Reserve_kernel int32
Anonpages int32
Vnodepages int32
Vtextpages int32
Freemin int32
Freetarg int32
Inactarg int32
Wiredmax int32
Anonmin int32
Vtextmin int32
Vnodemin int32
Anonminpct int32
Vtextminpct int32
Vnodeminpct int32
Nswapdev int32
Swpages int32
Swpginuse int32
Swpgonly int32
Nswget int32
Nanon int32
Nanonneeded int32
Nfreeanon int32
Faults int32
Traps int32
Intrs int32
Swtch int32
Softs int32
Syscalls int32
Pageins int32
Obsolete_swapins int32
Obsolete_swapouts int32
Pgswapin int32
Pgswapout int32
Forks int32
Forks_ppwait int32
Forks_sharevm int32
Pga_zerohit int32
Pga_zeromiss int32
Zeroaborts int32
Fltnoram int32
Fltnoanon int32
Fltnoamap int32
Fltpgwait int32
Fltpgrele int32
Fltrelck int32
Fltrelckok int32
Fltanget int32
Fltanretry int32
Fltamcopy int32
Fltnamap int32
Fltnomap int32
Fltlget int32
Fltget int32
Flt_anon int32
Flt_acow int32
Flt_obj int32
Flt_prcopy int32
Flt_przero int32
Pdwoke int32
Pdrevs int32
Pdswout int32
Pdfreed int32
Pdscans int32
Pdanscan int32
Pdobscan int32
Pdreact int32
Pdbusy int32
Pdpageouts int32
Pdpending int32
Pddeact int32
Pdreanon int32
Pdrevnode int32
Pdrevtext int32
Fpswtch int32
Kmapent int32
}
|
{
"pile_set_name": "Github"
}
|
/* Copyright (C) 2005 Analog Devices */
/**
@file vq_bfin.h
@author Jean-Marc Valin
@brief Blackfin-optimized vq routine
*/
/*
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of the Xiph.org Foundation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define OVERRIDE_VQ_NBEST
void vq_nbest(spx_word16_t *in, const spx_word16_t *codebook, int len, int entries, spx_word32_t *E, int N, int *nbest, spx_word32_t *best_dist, char *stack)
{
if (N==1)
{
best_dist[0] = 2147483647;
{
spx_word32_t dist;
__asm__ __volatile__
(
"LC0 = %8;\n\t"
"R2 = 0;\n\t"
"I0 = %6;\n\t"
"B0 = %6;\n\t"
"L0 = %9;\n\t"
"LOOP entries_loop%= LC0;\n\t"
"LOOP_BEGIN entries_loop%=;\n\t"
"%0 = [%4++];\n\t"
"%0 >>= 1;\n\t"
"A0 = %0;\n\t"
"R0.L = W[%1++%7] || R1.L = W[I0++];\n\t"
"LOOP vq_loop%= LC1 = %5;\n\t"
"LOOP_BEGIN vq_loop%=;\n\t"
"%0 = (A0 -= R0.L*R1.L) (IS) || R0.L = W[%1++%7] || R1.L = W[I0++];\n\t"
"LOOP_END vq_loop%=;\n\t"
"%0 = (A0 -= R0.L*R1.L) (IS);\n\t"
"cc = %0 < %2;\n\t"
"if cc %2 = %0;\n\t"
"if cc %3 = R2;\n\t"
"R2 += 1;\n\t"
"LOOP_END entries_loop%=;\n\t"
: "=&D" (dist), "=&a" (codebook), "=&d" (best_dist[0]), "=&d" (nbest[0]), "=&a" (E)
: "a" (len-1), "a" (in), "a" (2), "d" (entries), "d" (len<<1), "1" (codebook), "4" (E), "2" (best_dist[0]), "3" (nbest[0])
: "R0", "R1", "R2", "I0", "L0", "B0", "A0", "cc", "memory"
);
}
} else {
int i,k,used;
used = 0;
for (i=0;i<entries;i++)
{
spx_word32_t dist;
__asm__
(
"%0 >>= 1;\n\t"
"A0 = %0;\n\t"
"I0 = %3;\n\t"
"L0 = 0;\n\t"
"R0.L = W[%1++%4] || R1.L = W[I0++];\n\t"
"LOOP vq_loop%= LC0 = %2;\n\t"
"LOOP_BEGIN vq_loop%=;\n\t"
"%0 = (A0 -= R0.L*R1.L) (IS) || R0.L = W[%1++%4] || R1.L = W[I0++];\n\t"
"LOOP_END vq_loop%=;\n\t"
"%0 = (A0 -= R0.L*R1.L) (IS);\n\t"
: "=D" (dist), "=a" (codebook)
: "a" (len-1), "a" (in), "a" (2), "1" (codebook), "0" (E[i])
: "R0", "R1", "I0", "L0", "A0"
);
if (i<N || dist<best_dist[N-1])
{
for (k=N-1; (k >= 1) && (k > used || dist < best_dist[k-1]); k--)
{
best_dist[k]=best_dist[k-1];
nbest[k] = nbest[k-1];
}
best_dist[k]=dist;
nbest[k]=i;
used++;
}
}
}
}
|
{
"pile_set_name": "Github"
}
|
<?php include(erLhcoreClassDesign::designtpl('lhchatbox/getstatus/options_variable_page.tpl.php')); ?>
var lhc_ChatboxPage = {
JSON : {
parse: window.JSON && (window.JSON.parse || window.JSON.decode) || String.prototype.evalJSON && function(str){return String(str).evalJSON();} || $.parseJSON || $.evalJSON,
stringify: Object.toJSON || window.JSON && (window.JSON.stringify || window.JSON.encode) || $.toJSON
},
cookieData : {},
showVotingForm : function() {
var locationCurrent = encodeURIComponent(window.location.href.substring(window.location.protocol.length));
this.initial_iframe_url = "<?php echo erLhcoreClassModelChatConfig::fetch('explicit_http_mode')->current_value?>//<?php echo $_SERVER['HTTP_HOST']?><?php echo erLhcoreClassDesign::baseurl('chatbox/chatwidget')?>/(chat_height)/<?php echo $heightchatcontent;?><?php $theme !== false ? print '/(theme)/'.$theme : ''?>/(mode)/embed/(identifier)/"+<?php echo $chatboxOptionsVariablePage;?>.identifier+'/(hashchatbox)/'+<?php echo $chatboxOptionsVariablePage;?>.hashchatbox+this.getAppendCookieArguments()+'?URLReferer='+locationCurrent+this.getAppendRequestArguments();
this.iframe_html = '<iframe id="lhc_sizing_chatbox_page" allowTransparency="true" scrolling="no" frameborder="0" ' +
( this.initial_iframe_url != '' ? ' src="' + this.initial_iframe_url + '"' : '' ) +
' width="100%"' +
' height="300"' +
' style="width: 100%; height: 300px;"></iframe>';
document.getElementById('lhc_chatbox_embed_container').innerHTML = this.iframe_html;
},
getAppendRequestArguments : function() {
var nickOption = (typeof <?php echo $chatboxOptionsVariablePage;?>.nick !== 'undefined') ? '&nick='+encodeURIComponent(<?php echo $chatboxOptionsVariablePage;?>.nick) : (this.cookieData.nick ? '&nick='+encodeURIComponent(this.cookieData.nick) : '');
var disableOption = (typeof <?php echo $chatboxOptionsVariablePage;?>.disable_nick_change !== 'undefined') ? '&dnc=true' : '';
var chatboxName = (typeof <?php echo $chatboxOptionsVariablePage;?>.chatbox_name !== 'undefined') ? '&chtbx_name='+encodeURIComponent(<?php echo $chatboxOptionsVariablePage;?>.chatbox_name) : '';
return nickOption+disableOption+chatboxName;
},
getAppendCookieArguments : function() {
var soundOption = this.cookieData.s ? '/(sound)/'+this.cookieData.s : '';
var nickOption = this.cookieData.n ? '/(nick)/'+this.cookieData.n : '';
return soundOption+nickOption;
},
handleMessage : function(e) {
if (typeof e.data !== 'string') { return; }
var action = e.data.split(':')[0];
if (action == 'lhc_sizing_chatbox_page') {
var height = e.data.split(':')[1];
var elementObject = document.getElementById('lhc_sizing_chatbox_page');
elementObject.height = height;
elementObject.style.height = height+'px';
} else if (action == 'lhc_ch') {
var parts = e.data.split(':');
if (parts[1] != '' && parts[2] != '') {
lhc_ChatboxPage.addCookieAttribute(parts[1],parts[2]);
}
} else if (action == 'lhc_chb') {
var parts = e.data.split(':');
if (parts[1] != '' && parts[2] != '') {
lhc_ChatboxPage.addCookieAttribute(parts[1],parts[2]);
}
}
},
removeCookieAttr : function(attr){
if (this.cookieData[attr]) {
delete this.cookieData[attr];
this.storeSesCookie();
}
},
storeSesCookie : function(){
if (localStorage) {
localStorage.setItem('lhc_chb',this.JSON.stringify(this.cookieData));
}
},
initSessionStorage : function(){
if (localStorage && localStorage.getItem('lhc_chb')) {
this.cookieData = this.JSON.parse(localStorage.getItem('lhc_chb'));
}
},
addCookieAttribute : function(attr, value){
if (!this.cookieData[attr] || this.cookieData[attr] != value){
this.cookieData[attr] = value;
this.storeSesCookie();
}
}
};
lhc_ChatboxPage.initSessionStorage();
lhc_ChatboxPage.showVotingForm();
if ( window.attachEvent ) {
// IE
window.attachEvent("onmessage",function(e){lhc_ChatboxPage.handleMessage(e);});
};
if ( document.attachEvent ) {
// IE
document.attachEvent("onmessage",function(e){lhc_ChatboxPage.handleMessage(e);});
};
if ( window.addEventListener ){
// FF
window.addEventListener("message",function(e){lhc_ChatboxPage.handleMessage(e);}, false);
};
|
{
"pile_set_name": "Github"
}
|
package org.hotswap.agent.plugin.mojarra;
public class MojarraConstants {
private MojarraConstants() {
// prevent instantiation..
}
public static final String MANAGED_BEAN_ANNOTATION = "javax.faces.bean.ManagedBean";
public static final String BEAN_MANAGER_CLASS = "com.sun.faces.mgbean.BeanManager";
public static final String LIFECYCLE_IMPL_CLASS = "com.sun.faces.lifecycle.LifecycleImpl";
}
|
{
"pile_set_name": "Github"
}
|
<?php
namespace Tests\Behat\Gherkin\Keywords;
use Behat\Gherkin\Keywords\ArrayKeywords;
use Behat\Gherkin\Keywords\KeywordsDumper;
use PHPUnit\Framework\TestCase;
class KeywordsDumperTest extends TestCase
{
private $keywords;
protected function setUp()
{
$this->keywords = new ArrayKeywords(array(
'en' => array(
'feature' => 'Feature',
'background' => 'Background',
'scenario' => 'Scenario',
'scenario_outline' => 'Scenario Outline|Scenario Template',
'examples' => 'Examples|Scenarios',
'given' => 'Given',
'when' => 'When',
'then' => 'Then',
'and' => 'And',
'but' => 'But'
),
'ru' => array(
'feature' => 'Функционал|Фича',
'background' => 'Предыстория|Бэкграунд',
'scenario' => 'Сценарий|История',
'scenario_outline' => 'Структура сценария|Аутлайн',
'examples' => 'Примеры',
'given' => 'Допустим',
'when' => 'Если|@',
'then' => 'То',
'and' => 'И',
'but' => 'Но'
)
));
}
public function testEnKeywordsDumper()
{
$dumper = new KeywordsDumper($this->keywords);
$dumped = $dumper->dump('en');
$etalon = <<<GHERKIN
Feature: Internal operations
In order to stay secret
As a secret organization
We need to be able to erase past agents' memory
Background:
Given there is agent A
And there is agent B
Scenario: Erasing agent memory
Given there is agent J
And there is agent K
When I erase agent K's memory
Then there should be agent J
But there should not be agent K
(Scenario Outline|Scenario Template): Erasing other agents' memory
Given there is agent <agent1>
And there is agent <agent2>
When I erase agent <agent2>'s memory
Then there should be agent <agent1>
But there should not be agent <agent2>
(Examples|Scenarios):
| agent1 | agent2 |
| D | M |
GHERKIN;
$this->assertEquals($etalon, $dumped);
}
public function testRuKeywordsDumper()
{
$dumper = new KeywordsDumper($this->keywords);
$dumped = $dumper->dump('ru');
$etalon = <<<GHERKIN
# language: ru
(Функционал|Фича): Internal operations
In order to stay secret
As a secret organization
We need to be able to erase past agents' memory
(Предыстория|Бэкграунд):
Допустим there is agent A
И there is agent B
(Сценарий|История): Erasing agent memory
Допустим there is agent J
И there is agent K
(Если|@) I erase agent K's memory
То there should be agent J
Но there should not be agent K
(Структура сценария|Аутлайн): Erasing other agents' memory
Допустим there is agent <agent1>
И there is agent <agent2>
(Если|@) I erase agent <agent2>'s memory
То there should be agent <agent1>
Но there should not be agent <agent2>
Примеры:
| agent1 | agent2 |
| D | M |
GHERKIN;
$this->assertEquals($etalon, $dumped);
}
public function testRuKeywordsCustomKeywordsDumper()
{
$dumper = new KeywordsDumper($this->keywords);
$dumper->setKeywordsDumperFunction(function ($keywords) {
return '<keyword>'.implode(', ', $keywords).'</keyword>';
});
$dumped = $dumper->dump('ru');
$etalon = <<<GHERKIN
# language: ru
<keyword>Функционал, Фича</keyword>: Internal operations
In order to stay secret
As a secret organization
We need to be able to erase past agents' memory
<keyword>Предыстория, Бэкграунд</keyword>:
<keyword>Допустим</keyword> there is agent A
<keyword>И</keyword> there is agent B
<keyword>Сценарий, История</keyword>: Erasing agent memory
<keyword>Допустим</keyword> there is agent J
<keyword>И</keyword> there is agent K
<keyword>Если, @</keyword> I erase agent K's memory
<keyword>То</keyword> there should be agent J
<keyword>Но</keyword> there should not be agent K
<keyword>Структура сценария, Аутлайн</keyword>: Erasing other agents' memory
<keyword>Допустим</keyword> there is agent <agent1>
<keyword>И</keyword> there is agent <agent2>
<keyword>Если, @</keyword> I erase agent <agent2>'s memory
<keyword>То</keyword> there should be agent <agent1>
<keyword>Но</keyword> there should not be agent <agent2>
<keyword>Примеры</keyword>:
| agent1 | agent2 |
| D | M |
GHERKIN;
$this->assertEquals($etalon, $dumped);
}
public function testExtendedVersionDumper()
{
$dumper = new KeywordsDumper($this->keywords);
$dumped = $dumper->dump('ru', false);
$etalon = array(
<<<GHERKIN
# language: ru
Функционал: Internal operations
In order to stay secret
As a secret organization
We need to be able to erase past agents' memory
Предыстория:
Допустим there is agent A
И there is agent B
Сценарий: Erasing agent memory
Допустим there is agent J
И there is agent K
Если I erase agent K's memory
@ I erase agent K's memory
То there should be agent J
Но there should not be agent K
История: Erasing agent memory
Допустим there is agent J
И there is agent K
Если I erase agent K's memory
@ I erase agent K's memory
То there should be agent J
Но there should not be agent K
Структура сценария: Erasing other agents' memory
Допустим there is agent <agent1>
И there is agent <agent2>
Если I erase agent <agent2>'s memory
@ I erase agent <agent2>'s memory
То there should be agent <agent1>
Но there should not be agent <agent2>
Примеры:
| agent1 | agent2 |
| D | M |
Аутлайн: Erasing other agents' memory
Допустим there is agent <agent1>
И there is agent <agent2>
Если I erase agent <agent2>'s memory
@ I erase agent <agent2>'s memory
То there should be agent <agent1>
Но there should not be agent <agent2>
Примеры:
| agent1 | agent2 |
| D | M |
GHERKIN
, <<<GHERKIN
# language: ru
Фича: Internal operations
In order to stay secret
As a secret organization
We need to be able to erase past agents' memory
Предыстория:
Допустим there is agent A
И there is agent B
Сценарий: Erasing agent memory
Допустим there is agent J
И there is agent K
Если I erase agent K's memory
@ I erase agent K's memory
То there should be agent J
Но there should not be agent K
История: Erasing agent memory
Допустим there is agent J
И there is agent K
Если I erase agent K's memory
@ I erase agent K's memory
То there should be agent J
Но there should not be agent K
Структура сценария: Erasing other agents' memory
Допустим there is agent <agent1>
И there is agent <agent2>
Если I erase agent <agent2>'s memory
@ I erase agent <agent2>'s memory
То there should be agent <agent1>
Но there should not be agent <agent2>
Примеры:
| agent1 | agent2 |
| D | M |
Аутлайн: Erasing other agents' memory
Допустим there is agent <agent1>
И there is agent <agent2>
Если I erase agent <agent2>'s memory
@ I erase agent <agent2>'s memory
То there should be agent <agent1>
Но there should not be agent <agent2>
Примеры:
| agent1 | agent2 |
| D | M |
GHERKIN
);
$this->assertEquals($etalon, $dumped);
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "memcmp16.h"
// This linked against by assembly stubs, only.
#pragma GCC diagnostic ignored "-Wunused-function"
int32_t memcmp16_generic_static(const uint16_t* s0, const uint16_t* s1, size_t count);
int32_t memcmp16_generic_static(const uint16_t* s0, const uint16_t* s1, size_t count) {
for (size_t i = 0; i < count; i++) {
if (s0[i] != s1[i]) {
return static_cast<int32_t>(s0[i]) - static_cast<int32_t>(s1[i]);
}
}
return 0;
}
namespace art {
namespace testing {
int32_t MemCmp16Testing(const uint16_t* s0, const uint16_t* s1, size_t count) {
return MemCmp16(s0, s1, count);
}
}
} // namespace art
#pragma GCC diagnostic warning "-Wunused-function"
|
{
"pile_set_name": "Github"
}
|
# .NET Metadata Dumper
This samples uses the [.NET Metadata Reader][MDReader] to dump the contents
of the [ECMA-335] metadata contained in a .NET assembly (or module).
[MDReader]: http://www.nuget.org/packages/Microsoft.Bcl.Metadata
[ECMA-335]: http://www.ecma-international.org/publications/standards/Ecma-335.htm
|
{
"pile_set_name": "Github"
}
|
/*
* ProGuard -- shrinking, optimization, obfuscation, and preverification
* of Java bytecode.
*
* Copyright (c) 2002-2017 Eric Lafortune @ GuardSquare
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package proguard.io;
import proguard.classfile.ClassConstants;
import java.io.IOException;
import java.util.Map;
/**
* This DataEntryReader delegates to another DataEntryReader, renaming the
* data entries based on the given map. Entries whose name does not appear
* in the map may be passed to an alternative DataEntryReader.
*
* @author Eric Lafortune
*/
public class DataEntryRenamer implements DataEntryReader
{
private final Map nameMap;
private final DataEntryReader renamedDataEntryReader;
private final DataEntryReader missingDataEntryReader;
/**
* Creates a new DataEntryRenamer.
* @param nameMap the map from old names to new names.
* @param renamedDataEntryReader the DataEntryReader to which renamed data
* entries will be passed.
*/
public DataEntryRenamer(Map nameMap,
DataEntryReader renamedDataEntryReader)
{
this(nameMap, renamedDataEntryReader, null);
}
/**
* Creates a new DataEntryRenamer.
* @param nameMap the map from old names to new names.
* @param renamedDataEntryReader the DataEntryReader to which renamed data
* entries will be passed.
* @param missingDataEntryReader the optional DataEntryReader to which data
* entries that can't be renamed will be
* passed.
*/
public DataEntryRenamer(Map nameMap,
DataEntryReader renamedDataEntryReader,
DataEntryReader missingDataEntryReader)
{
this.nameMap = nameMap;
this.renamedDataEntryReader = renamedDataEntryReader;
this.missingDataEntryReader = missingDataEntryReader;
}
// Implementations for DataEntryReader.
public void read(DataEntry dataEntry) throws IOException
{
String name = dataEntry.getName();
// Add a directory separator if necessary.
if (dataEntry.isDirectory() &&
name.length() > 0)
{
name += ClassConstants.PACKAGE_SEPARATOR;
}
String newName = (String)nameMap.get(name);
if (newName != null)
{
// Remove the directory separator if necessary.
if (dataEntry.isDirectory() &&
newName.length() > 0)
{
newName = newName.substring(0, newName.length() - 1);
}
renamedDataEntryReader.read(new RenamedDataEntry(dataEntry, newName));
}
else if (missingDataEntryReader != null)
{
missingDataEntryReader.read(dataEntry);
}
}
}
|
{
"pile_set_name": "Github"
}
|
import firebase from 'firebase/app'
import 'firebase/auth'
const config = {
apiKey: 'AIzaSyADubq8Pf-_nC6cM52PGZJLAJ_yT4UkWto',
authDomain: 'reactzzaria-12649.firebaseapp.com',
databaseURL: 'https://reactzzaria-12649.firebaseio.com',
projectId: 'reactzzaria-12649',
storageBucket: 'reactzzaria-12649.appspot.com',
messagingSenderId: '190402590347'
}
firebase.initializeApp(config)
export default firebase
|
{
"pile_set_name": "Github"
}
|
<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<title></title>
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script src="angular_before.js"></script>
<script src="../../js/execute.js"></script>
<script src="../../js/jstat.min.js"></script>
</head>
<body>
<span></span>
<div id="message"></div>
<div ng-app="myApp" ng-controller="ExampleController">
</div>
</div>
</body>
<script>
var app = angular.module('myApp', []);
app.controller('ExampleController', function ($scope, $compile) {
$scope.null = {a: 42};
var f1 = function () {
for (var i = 0; i < 50000; i++) {
$scope.$eval("a.null.undefined.b", {a:{null:{undefined:{b: 1}}}});
$scope.$eval('null.a', {null: {a: 42}});
$scope.$eval('this.null.a');
}
};
var a = execute(f1, 10);
var mean = jStat(a).mean();
console.log(mean);
$.ajax({
url: 'http://localhost:8081',
data: JSON.stringify({'mark': 0, 'mean': mean}),
type: 'POST',
contentType: 'application/json',
success: function (data) {
$('#message').html('done, close browser');
console.log('Success: ')
},
error: function (xhr, status, error) {
console.log('Error: ' + error.message);
}
});
});
</script>
</html>
|
{
"pile_set_name": "Github"
}
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
*
* Generic Bluetooth HCI UART driver
*
* Copyright (C) 2015-2018 Intel Corporation
*/
#include <asm/unaligned.h>
struct h4_recv_pkt {
u8 type; /* Packet type */
u8 hlen; /* Header length */
u8 loff; /* Data length offset in header */
u8 lsize; /* Data length field size */
u16 maxlen; /* Max overall packet length */
int (*recv)(struct hci_dev *hdev, struct sk_buff *skb);
};
#define H4_RECV_ACL \
.type = HCI_ACLDATA_PKT, \
.hlen = HCI_ACL_HDR_SIZE, \
.loff = 2, \
.lsize = 2, \
.maxlen = HCI_MAX_FRAME_SIZE \
#define H4_RECV_SCO \
.type = HCI_SCODATA_PKT, \
.hlen = HCI_SCO_HDR_SIZE, \
.loff = 2, \
.lsize = 1, \
.maxlen = HCI_MAX_SCO_SIZE
#define H4_RECV_EVENT \
.type = HCI_EVENT_PKT, \
.hlen = HCI_EVENT_HDR_SIZE, \
.loff = 1, \
.lsize = 1, \
.maxlen = HCI_MAX_EVENT_SIZE
static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev,
struct sk_buff *skb,
const unsigned char *buffer,
int count,
const struct h4_recv_pkt *pkts,
int pkts_count)
{
/* Check for error from previous call */
if (IS_ERR(skb))
skb = NULL;
while (count) {
int i, len;
if (!skb) {
for (i = 0; i < pkts_count; i++) {
if (buffer[0] != (&pkts[i])->type)
continue;
skb = bt_skb_alloc((&pkts[i])->maxlen,
GFP_ATOMIC);
if (!skb)
return ERR_PTR(-ENOMEM);
hci_skb_pkt_type(skb) = (&pkts[i])->type;
hci_skb_expect(skb) = (&pkts[i])->hlen;
break;
}
/* Check for invalid packet type */
if (!skb)
return ERR_PTR(-EILSEQ);
count -= 1;
buffer += 1;
}
len = min_t(uint, hci_skb_expect(skb) - skb->len, count);
skb_put_data(skb, buffer, len);
count -= len;
buffer += len;
/* Check for partial packet */
if (skb->len < hci_skb_expect(skb))
continue;
for (i = 0; i < pkts_count; i++) {
if (hci_skb_pkt_type(skb) == (&pkts[i])->type)
break;
}
if (i >= pkts_count) {
kfree_skb(skb);
return ERR_PTR(-EILSEQ);
}
if (skb->len == (&pkts[i])->hlen) {
u16 dlen;
switch ((&pkts[i])->lsize) {
case 0:
/* No variable data length */
dlen = 0;
break;
case 1:
/* Single octet variable length */
dlen = skb->data[(&pkts[i])->loff];
hci_skb_expect(skb) += dlen;
if (skb_tailroom(skb) < dlen) {
kfree_skb(skb);
return ERR_PTR(-EMSGSIZE);
}
break;
case 2:
/* Double octet variable length */
dlen = get_unaligned_le16(skb->data +
(&pkts[i])->loff);
hci_skb_expect(skb) += dlen;
if (skb_tailroom(skb) < dlen) {
kfree_skb(skb);
return ERR_PTR(-EMSGSIZE);
}
break;
default:
/* Unsupported variable length */
kfree_skb(skb);
return ERR_PTR(-EILSEQ);
}
if (!dlen) {
/* No more data, complete frame */
(&pkts[i])->recv(hdev, skb);
skb = NULL;
}
} else {
/* Complete frame */
(&pkts[i])->recv(hdev, skb);
skb = NULL;
}
}
return skb;
}
|
{
"pile_set_name": "Github"
}
|
# mach: bpf
# as: --EB
# ld: --EB
# sim: -E big
# output: pass\nexit 0 (0x0)\n
;;; endbe.s
;;; Tests for BPF endianness-conversion instructions in simulator
;;; running in BIG ENDIAN
;;;
;;; Both 'be' and 'le' ISAs have both endbe and endle instructions.
.include "testutils.inc"
.text
.global main
.type main, @function
main:
lddw %r1, 0x12345678deadbeef
endle %r1, 64
fail_ne %r1, 0xefbeadde78563412
endle %r1, 64
fail_ne %r1, 0x12345678deadbeef
;; `bitsize` < 64 will truncate
endle %r1, 32
fail_ne %r1, 0xefbeadde
endle %r1, 32
fail_ne %r1, 0xdeadbeef
endle %r1, 16
fail_ne %r1, 0xefbe
endle %r1, 16
fail_ne %r1, 0xbeef
;; endbe on be should be noop (except truncate)
lddw %r1, 0x12345678deadbeef
endbe %r1, 64
fail_ne %r1, 0x12345678deadbeef
endbe %r1, 32
fail_ne %r1, 0xdeadbeef
endbe %r1, 16
fail_ne %r1, 0xbeef
pass
|
{
"pile_set_name": "Github"
}
|
<?xml version="1.0" encoding="UTF-8"?>
<project basedir="." default="test" name="Voxforge Tests">
<description>
This file is used to run the voxforge performance tests.
NOTE: Before running the tests, you must have already built
the sphinx-4 sources by typing "ant" in the top level
sphinx4 directory.
Run ant with -Dskip=5 to do the standard 'quick' version of a test.
</description>
<property name="classpath" value="../../../sphinx4-core/target/classes"/>
<property name="skip" value="40"/>
<property name="logger_props" value=""/>
<property name="initial_heap_size" value="512m"/>
<property name="maximum_heap_size" value="2048m"/>
<property name="jit" value="server"/>
<property name="gc_log_file" value="gc.txt"/>
<target name="compile" />
<target name="test"
description="trigram decoding of Voxforge test data">
<java classpath="${classpath}"
classname="edu.cmu.sphinx.tools.batch.BatchModeRecognizer"
fork="true">
<jvmarg value="-${jit}"/>
<jvmarg value="-ms${initial_heap_size}"/>
<jvmarg value="-mx${maximum_heap_size}"/>
<arg value="voxforge.config.xml"/>
<arg value="voxforge_en_test.batch"/>
<sysproperty key="batch[skip]" value="${skip}"/>
</java>
</target>
</project>
|
{
"pile_set_name": "Github"
}
|
<Type Name="DefaultSignalHandlerAttribute" FullName="GLib.DefaultSignalHandlerAttribute">
<TypeSignature Language="C#" Maintainer="auto" Value="public sealed class DefaultSignalHandlerAttribute : Attribute" />
<TypeSignature Language="ILAsm" Value=".class public auto ansi sealed beforefieldinit DefaultSignalHandlerAttribute extends System.Attribute" />
<AssemblyInfo>
<AssemblyName>glib-sharp</AssemblyName>
<AssemblyPublicKey>
</AssemblyPublicKey>
</AssemblyInfo>
<ThreadSafetyStatement />
<Base>
<BaseTypeName>System.Attribute</BaseTypeName>
</Base>
<Interfaces />
<Docs>
<summary>Identifies a virtual class method on a GLib.Object subclass.</summary>
<remarks>
<para>
When a virtual method tagged with this attribute is overridden in a subclass, the method is automatically hooked into the native object's vtable. For the most part, this is an internal implementation detail, but it can be used by binding code to manually identify GObject virtual methods that can be overridden by subclasses.
</para>
<para>
The following code identifies the ForAll method as an overridable native method on the <see cref="T:Gtk.Container" /> class. When a managed subclass of Container overrides the ForAll method, at type registration time, the OverrideForall method is invoked to connect up a delegate to the native GtkContainerClass::forall vtable slot.
</para>
<example>
<code lang="C#">
static void Forall_cb (IntPtr container, bool include_internals, IntPtr cb, IntPtr data)
{
Container obj = GLib.Object.GetObject (container, false) as Container;
CallbackInvoker invoker = new CallbackInvoker (cb, data);
obj.ForAll (include_internals, invoker);
}
static void OverrideForall (GLib.GType gtype)
{
if (ForallCallback == null)
ForallCallback = new ForallDelegate (Forall_cb);
gtksharp_container_override_forall (gtype, ForallCallback);
}
[GLib.DefaultSignalHandler (Type=typeof(Gtk.Container), ConnectionMethod="OverrideForall")]
protected virtual void ForAll (bool include_internals, CallbackInvoker invoker)
{
gtksharp_container_base_forall (Handle, include_internals, invoker.Callback, invoker.Data);
} </code>
</example>
</remarks>
</Docs>
<Members>
<Member MemberName=".ctor">
<MemberSignature Language="C#" Value="public DefaultSignalHandlerAttribute ();" />
<MemberSignature Language="ILAsm" Value=".method public hidebysig specialname rtspecialname instance void .ctor() cil managed" />
<MemberType>Constructor</MemberType>
<ReturnValue />
<Parameters />
<Docs>
<summary>Public Constructor.</summary>
<remarks />
</Docs>
</Member>
<Member MemberName="ConnectionMethod">
<MemberSignature Language="C#" Value="public string ConnectionMethod { get; set; }" />
<MemberSignature Language="ILAsm" Value=".property instance string ConnectionMethod" />
<MemberType>Property</MemberType>
<ReturnValue>
<ReturnType>System.String</ReturnType>
</ReturnValue>
<Parameters />
<Docs>
<summary>The method to invoke to hook into the native object's vtable.</summary>
<value>a <see cref="T:System.String" /> representing the method name to invoke.</value>
<remarks>This method is invoked during type registration to hook up a callback delegate into the native object's vtable for virtual methods.</remarks>
</Docs>
</Member>
<Member MemberName="Type">
<MemberSignature Language="C#" Value="public Type Type { get; set; }" />
<MemberSignature Language="ILAsm" Value=".property instance class System.Type Type" />
<MemberType>Property</MemberType>
<ReturnValue>
<ReturnType>System.Type</ReturnType>
</ReturnValue>
<Parameters />
<Docs>
<summary>The Type of the object which exposes the virtual method.</summary>
<value>a <see cref="T:System.Type" /></value>
<remarks>The type registration code reflects on this type for the <see cref="M:GLib.DefaultSignalHandlerAttribute.ConnectionMethod" /> to invoke.</remarks>
</Docs>
</Member>
</Members>
</Type>
|
{
"pile_set_name": "Github"
}
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import copy
from ..engine import Layer
from ..engine import InputSpec
from ..engine.topology import _object_list_uid
from ..utils.generic_utils import has_arg
from .. import backend as K
class Wrapper(Layer):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers.
# Arguments
layer: The layer to be wrapped.
"""
def __init__(self, layer, **kwargs):
self.layer = layer
# Tracks mapping of Wrapper inputs to inner layer inputs. Useful when
# the inner layer has update ops that depend on its inputs (as opposed
# to the inputs to the Wrapper layer).
self._input_map = {}
super(Wrapper, self).__init__(**kwargs)
def build(self, input_shape=None):
self.built = True
@property
def activity_regularizer(self):
if hasattr(self.layer, 'activity_regularizer'):
return self.layer.activity_regularizer
else:
return None
@property
def trainable_weights(self):
return self.layer.trainable_weights
@property
def non_trainable_weights(self):
return self.layer.non_trainable_weights
@property
def updates(self):
if hasattr(self.layer, 'updates'):
return self.layer.updates
return []
def get_updates_for(self, inputs=None):
# If the wrapper modifies the inputs, use the modified inputs to
# get the updates from the inner layer.
inner_inputs = inputs
if inputs is not None:
uid = _object_list_uid(inputs)
if uid in self._input_map:
inner_inputs = self._input_map[uid]
updates = self.layer.get_updates_for(inner_inputs)
updates += super(Wrapper, self).get_updates_for(inputs)
return updates
@property
def losses(self):
if hasattr(self.layer, 'losses'):
return self.layer.losses
return []
def get_losses_for(self, inputs=None):
if inputs is None:
losses = self.layer.get_losses_for(None)
return losses + super(Wrapper, self).get_losses_for(None)
return super(Wrapper, self).get_losses_for(inputs)
@property
def constraints(self):
return self.layer.constraints
def get_weights(self):
return self.layer.get_weights()
def set_weights(self, weights):
self.layer.set_weights(weights)
def get_config(self):
config = {'layer': {'class_name': self.layer.__class__.__name__,
'config': self.layer.get_config()}}
base_config = super(Wrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from . import deserialize as deserialize_layer
layer = deserialize_layer(config.pop('layer'),
custom_objects=custom_objects)
return cls(layer, **config)
class TimeDistributed(Wrapper):
"""This wrapper applies a layer to every temporal slice of an input.
The input should be at least 3D, and the dimension of index one
will be considered to be the temporal dimension.
Consider a batch of 32 samples,
where each sample is a sequence of 10 vectors of 16 dimensions.
The batch input shape of the layer is then `(32, 10, 16)`,
and the `input_shape`, not including the samples dimension, is `(10, 16)`.
You can then use `TimeDistributed` to apply a `Dense` layer
to each of the 10 timesteps, independently:
```python
# as the first layer in a model
model = Sequential()
model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))
# now model.output_shape == (None, 10, 8)
```
The output will then have shape `(32, 10, 8)`.
In subsequent layers, there is no need for the `input_shape`:
```python
model.add(TimeDistributed(Dense(32)))
# now model.output_shape == (None, 10, 32)
```
The output will then have shape `(32, 10, 32)`.
`TimeDistributed` can be used with arbitrary layers, not just `Dense`,
for instance with a `Conv2D` layer:
```python
model = Sequential()
model.add(TimeDistributed(Conv2D(64, (3, 3)),
input_shape=(10, 299, 299, 3)))
```
# Arguments
layer: a layer instance.
"""
def __init__(self, layer, **kwargs):
super(TimeDistributed, self).__init__(layer, **kwargs)
self.supports_masking = True
def build(self, input_shape):
assert len(input_shape) >= 3
self.input_spec = InputSpec(shape=input_shape)
child_input_shape = (input_shape[0],) + input_shape[2:]
if not self.layer.built:
self.layer.build(child_input_shape)
self.layer.built = True
super(TimeDistributed, self).build()
def compute_output_shape(self, input_shape):
child_input_shape = (input_shape[0],) + input_shape[2:]
child_output_shape = self.layer.compute_output_shape(child_input_shape)
timesteps = input_shape[1]
return (child_output_shape[0], timesteps) + child_output_shape[1:]
def call(self, inputs, training=None, mask=None):
kwargs = {}
if has_arg(self.layer.call, 'training'):
kwargs['training'] = training
uses_learning_phase = False
input_shape = K.int_shape(inputs)
if input_shape[0]:
# batch size matters, use rnn-based implementation
def step(x, _):
global uses_learning_phase
output = self.layer.call(x, **kwargs)
if hasattr(output, '_uses_learning_phase'):
uses_learning_phase = (output._uses_learning_phase or
uses_learning_phase)
return output, []
_, outputs, _ = K.rnn(step, inputs,
initial_states=[],
input_length=input_shape[1],
unroll=False)
y = outputs
else:
# No batch size specified, therefore the layer will be able
# to process batches of any size.
# We can go with reshape-based implementation for performance.
input_length = input_shape[1]
if not input_length:
input_length = K.shape(inputs)[1]
# Shape: (num_samples * timesteps, ...). And track the
# transformation in self._input_map.
input_uid = _object_list_uid(inputs)
inputs = K.reshape(inputs, (-1,) + input_shape[2:])
self._input_map[input_uid] = inputs
# (num_samples * timesteps, ...)
y = self.layer.call(inputs, **kwargs)
if hasattr(y, '_uses_learning_phase'):
uses_learning_phase = y._uses_learning_phase
# Shape: (num_samples, timesteps, ...)
output_shape = self.compute_output_shape(input_shape)
y = K.reshape(y, (-1, input_length) + output_shape[2:])
# Apply activity regularizer if any:
if (hasattr(self.layer, 'activity_regularizer') and
self.layer.activity_regularizer is not None):
regularization_loss = self.layer.activity_regularizer(y)
self.add_loss(regularization_loss, inputs)
if uses_learning_phase:
y._uses_learning_phase = True
return y
class Bidirectional(Wrapper):
"""Bidirectional wrapper for RNNs.
# Arguments
layer: `Recurrent` instance.
merge_mode: Mode by which outputs of the
forward and backward RNNs will be combined.
One of {'sum', 'mul', 'concat', 'ave', None}.
If None, the outputs will not be combined,
they will be returned as a list.
# Raises
ValueError: In case of invalid `merge_mode` argument.
# Examples
```python
model = Sequential()
model.add(Bidirectional(LSTM(10, return_sequences=True),
input_shape=(5, 10)))
model.add(Bidirectional(LSTM(10)))
model.add(Dense(5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
```
"""
def __init__(self, layer, merge_mode='concat', weights=None, **kwargs):
super(Bidirectional, self).__init__(layer, **kwargs)
if merge_mode not in ['sum', 'mul', 'ave', 'concat', None]:
raise ValueError('Invalid merge mode. '
'Merge mode should be one of '
'{"sum", "mul", "ave", "concat", None}')
self.forward_layer = copy.copy(layer)
config = layer.get_config()
config['go_backwards'] = not config['go_backwards']
self.backward_layer = layer.__class__.from_config(config)
self.forward_layer.name = 'forward_' + self.forward_layer.name
self.backward_layer.name = 'backward_' + self.backward_layer.name
self.merge_mode = merge_mode
if weights:
nw = len(weights)
self.forward_layer.initial_weights = weights[:nw // 2]
self.backward_layer.initial_weights = weights[nw // 2:]
self.stateful = layer.stateful
self.return_sequences = layer.return_sequences
self.supports_masking = True
def get_weights(self):
return self.forward_layer.get_weights() + self.backward_layer.get_weights()
def set_weights(self, weights):
nw = len(weights)
self.forward_layer.set_weights(weights[:nw // 2])
self.backward_layer.set_weights(weights[nw // 2:])
def compute_output_shape(self, input_shape):
if self.merge_mode in ['sum', 'ave', 'mul']:
return self.forward_layer.compute_output_shape(input_shape)
elif self.merge_mode == 'concat':
shape = list(self.forward_layer.compute_output_shape(input_shape))
shape[-1] *= 2
return tuple(shape)
elif self.merge_mode is None:
return [self.forward_layer.compute_output_shape(input_shape)] * 2
def call(self, inputs, training=None, mask=None):
kwargs = {}
if has_arg(self.layer.call, 'training'):
kwargs['training'] = training
if has_arg(self.layer.call, 'mask'):
kwargs['mask'] = mask
y = self.forward_layer.call(inputs, **kwargs)
y_rev = self.backward_layer.call(inputs, **kwargs)
if self.return_sequences:
y_rev = K.reverse(y_rev, 1)
if self.merge_mode == 'concat':
output = K.concatenate([y, y_rev])
elif self.merge_mode == 'sum':
output = y + y_rev
elif self.merge_mode == 'ave':
output = (y + y_rev) / 2
elif self.merge_mode == 'mul':
output = y * y_rev
elif self.merge_mode is None:
output = [y, y_rev]
# Properly set learning phase
if 0 < self.layer.dropout + self.layer.recurrent_dropout:
if self.merge_mode is None:
for out in output:
out._uses_learning_phase = True
else:
output._uses_learning_phase = True
return output
def reset_states(self):
self.forward_layer.reset_states()
self.backward_layer.reset_states()
def build(self, input_shape):
with K.name_scope(self.forward_layer.name):
self.forward_layer.build(input_shape)
with K.name_scope(self.backward_layer.name):
self.backward_layer.build(input_shape)
self.built = True
def compute_mask(self, inputs, mask):
if self.return_sequences:
if not self.merge_mode:
return [mask, mask]
else:
return mask
else:
return None
@property
def trainable_weights(self):
if hasattr(self.forward_layer, 'trainable_weights'):
return (self.forward_layer.trainable_weights +
self.backward_layer.trainable_weights)
return []
@property
def non_trainable_weights(self):
if hasattr(self.forward_layer, 'non_trainable_weights'):
return (self.forward_layer.non_trainable_weights +
self.backward_layer.non_trainable_weights)
return []
@property
def updates(self):
if hasattr(self.forward_layer, 'updates'):
return self.forward_layer.updates + self.backward_layer.updates
return []
@property
def losses(self):
if hasattr(self.forward_layer, 'losses'):
return self.forward_layer.losses + self.backward_layer.losses
return []
@property
def constraints(self):
constraints = {}
if hasattr(self.forward_layer, 'constraints'):
constraints.update(self.forward_layer.constraints)
constraints.update(self.backward_layer.constraints)
return constraints
def get_config(self):
config = {'merge_mode': self.merge_mode}
base_config = super(Bidirectional, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
{
"pile_set_name": "Github"
}
|
<!--
@license
Copyright (c) 2015 The Polymer Project Authors. All rights reserved.
This code may only be used under the BSD style license found at http://polymer.github.io/LICENSE.txt
The complete set of authors may be found at http://polymer.github.io/AUTHORS.txt
The complete set of contributors may be found at http://polymer.github.io/CONTRIBUTORS.txt
Code distributed by Google as part of the polymer project is also
subject to an additional IP rights grant found at http://polymer.github.io/PATENTS.txt
-->
<script>
console.warn('This file is deprecated. Please use `iron-flex-layout/iron-flex-layout-classes.html`, and one of the specific dom-modules instead');
</script>
<style>
/*******************************
Flex Layout
*******************************/
html /deep/ .layout.horizontal,
html /deep/ .layout.horizontal-reverse,
html /deep/ .layout.vertical,
html /deep/ .layout.vertical-reverse {
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
}
html /deep/ .layout.inline {
display: -ms-inline-flexbox;
display: -webkit-inline-flex;
display: inline-flex;
}
html /deep/ .layout.horizontal {
-ms-flex-direction: row;
-webkit-flex-direction: row;
flex-direction: row;
}
html /deep/ .layout.horizontal-reverse {
-ms-flex-direction: row-reverse;
-webkit-flex-direction: row-reverse;
flex-direction: row-reverse;
}
html /deep/ .layout.vertical {
-ms-flex-direction: column;
-webkit-flex-direction: column;
flex-direction: column;
}
html /deep/ .layout.vertical-reverse {
-ms-flex-direction: column-reverse;
-webkit-flex-direction: column-reverse;
flex-direction: column-reverse;
}
html /deep/ .layout.wrap {
-ms-flex-wrap: wrap;
-webkit-flex-wrap: wrap;
flex-wrap: wrap;
}
html /deep/ .layout.wrap-reverse {
-ms-flex-wrap: wrap-reverse;
-webkit-flex-wrap: wrap-reverse;
flex-wrap: wrap-reverse;
}
html /deep/ .flex-auto {
-ms-flex: 1 1 auto;
-webkit-flex: 1 1 auto;
flex: 1 1 auto;
}
html /deep/ .flex-none {
-ms-flex: none;
-webkit-flex: none;
flex: none;
}
html /deep/ .flex,
html /deep/ .flex-1 {
-ms-flex: 1;
-webkit-flex: 1;
flex: 1;
}
html /deep/ .flex-2 {
-ms-flex: 2;
-webkit-flex: 2;
flex: 2;
}
html /deep/ .flex-3 {
-ms-flex: 3;
-webkit-flex: 3;
flex: 3;
}
html /deep/ .flex-4 {
-ms-flex: 4;
-webkit-flex: 4;
flex: 4;
}
html /deep/ .flex-5 {
-ms-flex: 5;
-webkit-flex: 5;
flex: 5;
}
html /deep/ .flex-6 {
-ms-flex: 6;
-webkit-flex: 6;
flex: 6;
}
html /deep/ .flex-7 {
-ms-flex: 7;
-webkit-flex: 7;
flex: 7;
}
html /deep/ .flex-8 {
-ms-flex: 8;
-webkit-flex: 8;
flex: 8;
}
html /deep/ .flex-9 {
-ms-flex: 9;
-webkit-flex: 9;
flex: 9;
}
html /deep/ .flex-10 {
-ms-flex: 10;
-webkit-flex: 10;
flex: 10;
}
html /deep/ .flex-11 {
-ms-flex: 11;
-webkit-flex: 11;
flex: 11;
}
html /deep/ .flex-12 {
-ms-flex: 12;
-webkit-flex: 12;
flex: 12;
}
/* alignment in cross axis */
html /deep/ .layout.start {
-ms-flex-align: start;
-webkit-align-items: flex-start;
align-items: flex-start;
}
html /deep/ .layout.center,
html /deep/ .layout.center-center {
-ms-flex-align: center;
-webkit-align-items: center;
align-items: center;
}
html /deep/ .layout.end {
-ms-flex-align: end;
-webkit-align-items: flex-end;
align-items: flex-end;
}
/* alignment in main axis */
html /deep/ .layout.start-justified {
-ms-flex-pack: start;
-webkit-justify-content: flex-start;
justify-content: flex-start;
}
html /deep/ .layout.center-justified,
html /deep/ .layout.center-center {
-ms-flex-pack: center;
-webkit-justify-content: center;
justify-content: center;
}
html /deep/ .layout.end-justified {
-ms-flex-pack: end;
-webkit-justify-content: flex-end;
justify-content: flex-end;
}
html /deep/ .layout.around-justified {
-ms-flex-pack: around;
-webkit-justify-content: space-around;
justify-content: space-around;
}
html /deep/ .layout.justified {
-ms-flex-pack: justify;
-webkit-justify-content: space-between;
justify-content: space-between;
}
/* self alignment */
html /deep/ .self-start {
-ms-align-self: flex-start;
-webkit-align-self: flex-start;
align-self: flex-start;
}
html /deep/ .self-center {
-ms-align-self: center;
-webkit-align-self: center;
align-self: center;
}
html /deep/ .self-end {
-ms-align-self: flex-end;
-webkit-align-self: flex-end;
align-self: flex-end;
}
html /deep/ .self-stretch {
-ms-align-self: stretch;
-webkit-align-self: stretch;
align-self: stretch;
}
/*******************************
Other Layout
*******************************/
html /deep/ .block {
display: block;
}
/* IE 10 support for HTML5 hidden attr */
html /deep/ [hidden] {
display: none !important;
}
html /deep/ .invisible {
visibility: hidden !important;
}
html /deep/ .relative {
position: relative;
}
html /deep/ .fit {
position: absolute;
top: 0;
right: 0;
bottom: 0;
left: 0;
}
body.fullbleed {
margin: 0;
height: 100vh;
}
html /deep/ .scroll {
-webkit-overflow-scrolling: touch;
overflow: auto;
}
.fixed-bottom,
.fixed-left,
.fixed-right,
.fixed-top {
position: fixed;
}
html /deep/ .fixed-top {
top: 0;
left: 0;
right: 0;
}
html /deep/ .fixed-right {
top: 0;
right: 0;
bottom: 0;
}
html /deep/ .fixed-bottom {
right: 0;
bottom: 0;
left: 0;
}
html /deep/ .fixed-left {
top: 0;
bottom: 0;
left: 0;
}
</style>
|
{
"pile_set_name": "Github"
}
|
/*
* SonarQube PHP Plugin
* Copyright (C) 2010-2020 SonarSource SA
* mailto:info AT sonarsource DOT com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package org.sonar.php.symbols;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import org.sonar.plugins.php.api.symbols.QualifiedName;
import static java.util.Collections.emptyList;
import static java.util.Collections.singletonList;
import static org.sonar.php.symbols.UnknownLocationInFile.UNKNOWN_LOCATION;
import static org.sonar.plugins.php.api.symbols.QualifiedName.qualifiedName;
public enum BuiltinSymbolData {
BUILTINS;
private final Map<QualifiedName, ClassSymbolData> classSymbolsByQualifiedName = init();
private Map<QualifiedName, ClassSymbolData> init() {
List<ClassSymbolData> data = Arrays.asList(
new ClassSymbolData(UNKNOWN_LOCATION, qualifiedName("Throwable"), null, emptyList(), true, Collections.emptyList()),
new ClassSymbolData(UNKNOWN_LOCATION, qualifiedName("Exception"), null, singletonList(qualifiedName("Throwable")), Collections.emptyList()),
new ClassSymbolData(UNKNOWN_LOCATION, qualifiedName("RuntimeException"), qualifiedName("Exception"), emptyList(), Collections.emptyList())
);
return data.stream().collect(Collectors.toMap(ClassSymbolData::qualifiedName, a -> a));
}
public Optional<ClassSymbolData> classSymbolData(QualifiedName qualifiedName) {
return Optional.ofNullable(classSymbolsByQualifiedName.get(qualifiedName));
}
}
|
{
"pile_set_name": "Github"
}
|
// RUN: %empty-directory(%t)
// RUN: not %target-swift-frontend -c %s -index-store-path %t
struct X : Z {
func b(_ : Y) {}
}
protocol Z {
associatedtype a
typealias Y = a.c
}
|
{
"pile_set_name": "Github"
}
|
/* time.h -- An implementation of the standard Unix <sys/time.h> file.
Written by Geoffrey Noer <noer@cygnus.com>
Public domain; no rights reserved. */
#ifndef _SYS_TIME_H_
#define _SYS_TIME_H_
#include <_ansi.h>
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _WINSOCK_H
struct timeval {
long tv_sec;
long tv_usec;
};
struct timezone {
int tz_minuteswest;
int tz_dsttime;
};
#ifdef __CYGWIN__
#include <sys/select.h>
#endif /* __CYGWIN__ */
#endif /* _WINSOCK_H */
#define ITIMER_REAL 0
#define ITIMER_VIRTUAL 1
#define ITIMER_PROF 2
struct itimerval {
struct timeval it_interval;
struct timeval it_value;
};
/* BSD time macros used by RTEMS code */
#if defined (__rtems__) || defined (__CYGWIN__)
/* Convenience macros for operations on timevals.
NOTE: `timercmp' does not work for >= or <=. */
#define timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
#define timerclear(tvp) ((tvp)->tv_sec = (tvp)->tv_usec = 0)
#define timercmp(a, b, CMP) \
(((a)->tv_sec == (b)->tv_sec) ? \
((a)->tv_usec CMP (b)->tv_usec) : \
((a)->tv_sec CMP (b)->tv_sec))
#define timeradd(a, b, result) \
do { \
(result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
(result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
if ((result)->tv_usec >= 1000000) \
{ \
++(result)->tv_sec; \
(result)->tv_usec -= 1000000; \
} \
} while (0)
#define timersub(a, b, result) \
do { \
(result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \
(result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \
if ((result)->tv_usec < 0) { \
--(result)->tv_sec; \
(result)->tv_usec += 1000000; \
} \
} while (0)
#endif /* defined (__rtems__) || defined (__CYGWIN__) */
int _EXFUN(gettimeofday, (struct timeval *__p, struct timezone *__z));
int _EXFUN(settimeofday, (const struct timeval *, const struct timezone *));
int _EXFUN(utimes, (const char *__path, struct timeval *__tvp));
int _EXFUN(getitimer, (int __which, struct itimerval *__value));
int _EXFUN(setitimer, (int __which, const struct itimerval *__value,
struct itimerval *__ovalue));
#ifdef __cplusplus
}
#endif
#endif /* _SYS_TIME_H_ */
|
{
"pile_set_name": "Github"
}
|
---
{% for pv in pv_list %}
apiVersion: v1
kind: PersistentVolume
metadata:
name: {{ pv }}
spec:
capacity:
storage: {{pv_size}}
accessModes:
- ReadWriteOnce
nfs:
path: {{ nfs_export_path }}/{{pv}}
server: {{hostvars[nfs_host]['ec2_private_dns_name']}}
persistentVolumeReclaimPolicy: {{persistentVolumeReclaimPolicy}}
---
{% endfor %}
|
{
"pile_set_name": "Github"
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.netbeans.core.network.proxy.gnome;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.netbeans.core.network.proxy.NetworkProxySettings;
import static org.netbeans.core.network.proxy.gnome.GnomeNetworkProxy.executeCommand;
/**
*
* @author lfischme
*/
public class GconfNetworkProxy {
private final static Logger LOGGER = Logger.getLogger(GconfNetworkProxy.class.getName());
private final static String EQUALS = "="; //NOI18N
private final static String COLON = ","; //NOI18N
private final static String SQ_BRACKET_LEFT = "["; //NOI18N
private final static String SQ_BRACKET_RIGHT = "]"; //NOI18N
protected final static String GCONF_PATH = "/usr/bin/gconftool-2"; //NOI18N
private final static String GCONF_ARGUMENT_LIST_RECURSIVELY = " -R "; //NOI18N
private final static String GCONF_NODE_PROXY = "/system/proxy"; //NOI18N
private final static String GCONF_NODE_HTTP_PROXY = "/system/http_proxy"; //NOI18N
private final static String GCONF_KEY_MODE = "mode"; //NOI18N
private final static String GCONF_KEY_PAC_URL = "autoconfig_url"; //NOI18N
private final static String GCONF_KEY_HTTP_ALL = "use_http_proxy"; //NOI18N
private final static String GCONF_KEY_HTTP_HOST = "host"; //NOI18N
private final static String GCONF_KEY_HTTP_PORT = "port"; //NOI18N
private final static String GCONF_KEY_HTTPS_HOST = "secure_host"; //NOI18N
private final static String GCONF_KEY_HTTPS_PORT = "secure_port"; //NOI18N
private final static String GCONF_KEY_SOCKS_HOST = "socks_host"; //NOI18N
private final static String GCONF_KEY_SOCKS_PORT = "socks_port"; //NOI18N
private final static String GCONF_KEY_IGNORE_HOSTS = "ignore_hosts"; //NOI18N
private final static String GCONF_VALUE_NONE = "none"; //NOI18N
private final static String GCONF_VALUE_AUTO = "auto"; //NOI18N
private final static String GCONF_VALUE_MANUAL = "manual"; //NOI18N
/**
* Returns network proxy settings - retrieved via gconftool.
*
* @return network proxy settings via GSettings.
*/
protected static NetworkProxySettings getNetworkProxySettings() {
LOGGER.log(Level.FINE, "GConf system proxy resolver started."); //NOI18N
Map<String, String> proxyProperties = getGconfMap(GCONF_NODE_PROXY);
String proxyMode = proxyProperties.get(GCONF_KEY_MODE);
if (proxyMode == null) {
LOGGER.log(Level.SEVERE, "GConf proxy mode is null.");
return new NetworkProxySettings(false);
}
if (proxyMode.equals(GCONF_VALUE_NONE)) {
LOGGER.log(Level.INFO, "GConf system proxy resolver: direct connection"); //NOI18N
return new NetworkProxySettings();
}
if (proxyMode.equals(GCONF_VALUE_AUTO)) {
String pacUrl = proxyProperties.get(GCONF_KEY_PAC_URL);
LOGGER.log(Level.INFO, "GConf system proxy resolver: auto - PAC ({0})", pacUrl); //NOI18N
if (pacUrl != null) {
return new NetworkProxySettings(pacUrl);
} else {
return new NetworkProxySettings("");
}
}
if (proxyMode.equals(GCONF_VALUE_MANUAL)) {
proxyProperties.putAll(getGconfMap(GCONF_NODE_HTTP_PROXY));
String httpProxyAll = proxyProperties.get(GCONF_KEY_HTTP_ALL);
String httpProxyHost = proxyProperties.get(GCONF_KEY_HTTP_HOST);
String httpProxyPort = proxyProperties.get(GCONF_KEY_HTTP_PORT);
String noProxyHosts = proxyProperties.get(GCONF_KEY_IGNORE_HOSTS);
LOGGER.log(Level.INFO, "GConf system proxy resolver: manual - http for all ({0})", httpProxyAll); //NOI18N
LOGGER.log(Level.INFO, "GConf system proxy resolver: manual - http host ({0})", httpProxyHost); //NOI18N
LOGGER.log(Level.INFO, "GConf system proxy resolver: manual - http port ({0})", httpProxyPort); //NOI18N
LOGGER.log(Level.INFO, "GConf system proxy resolver: manual - no proxy hosts ({0})", noProxyHosts); //NOI18N
if (httpProxyAll != null && Boolean.parseBoolean(httpProxyAll)) {
return new NetworkProxySettings(httpProxyHost, httpProxyPort, getNoProxyHosts(noProxyHosts));
} else {
String httpsProxyHost = proxyProperties.get(GCONF_KEY_HTTPS_HOST);
String httpsProxyPort = proxyProperties.get(GCONF_KEY_HTTPS_PORT);
String socksProxyHost = proxyProperties.get(GCONF_KEY_SOCKS_HOST);
String socksProxyPort = proxyProperties.get(GCONF_KEY_SOCKS_PORT);
LOGGER.log(Level.INFO, "GConf system proxy resolver: manual - https host ({0})", httpsProxyHost); //NOI18N
LOGGER.log(Level.INFO, "GConf system proxy resolver: manual - https port ({0})", httpsProxyPort); //NOI18N
LOGGER.log(Level.INFO, "GConf system proxy resolver: manual - socks host ({0})", socksProxyHost); //NOI18N
LOGGER.log(Level.INFO, "GConf system proxy resolver: manual - socks port ({0})", socksProxyPort); //NOI18N
return new NetworkProxySettings(httpProxyHost, httpProxyPort,
httpsProxyHost, httpsProxyPort,
socksProxyHost, socksProxyPort, getNoProxyHosts(noProxyHosts));
}
}
return new NetworkProxySettings(false);
}
/**
* Checks if gconftool returns suitable response
*
* @return true if gconftool returns suitable response
*/
protected static boolean isGconfValid() {
String command = GCONF_PATH + GCONF_ARGUMENT_LIST_RECURSIVELY + GCONF_NODE_PROXY;
try {
BufferedReader reader = executeCommand(command);
if (reader.ready()) {
return true;
}
} catch (IOException ioe) {
LOGGER.log(Level.SEVERE, "Cannot read line: " + command, ioe); //NOI18N
}
LOGGER.log(Level.WARNING, "GConf return empty list"); //NOI18N
return false;
}
/**
* Returns map of properties retrieved from gconftool-2.
*
* Executes the command "/usr/bin/gconftool-2 -R [node]".
*
* @param gconfNode Node for which the properties should be returned.
* @return Map of properties retrieved from gconftool-2.
*/
private static Map<String, String> getGconfMap(String gconfNode) {
Map<String, String> map = new HashMap<String, String>();
String command = GCONF_PATH + GCONF_ARGUMENT_LIST_RECURSIVELY + gconfNode;
try {
BufferedReader reader = executeCommand(command);
if (reader != null) {
String line = reader.readLine();
while (line != null) {
String key = getKey(line).toLowerCase();
if (key != null && !key.isEmpty()) {
String value = getValue(line);
map.put(key, value);
}
line = reader.readLine();
}
} else {
return map;
}
} catch (IOException ioe) {
LOGGER.log(Level.SEVERE, "Cannot read line: " + command, ioe); //NOI18N
}
return map;
}
/**
* Returns the key for one line response from gconftool-2.
*
* @param line Line from gconftool-2 response.
* @return The key for one line response from gconftool-2.
*/
private static String getKey(String line) {
return line.substring(0, line.indexOf(EQUALS)).trim();
}
/**
* Returns the value for one line response from gconftool-2.
*
* @param line Line from gconftool-2 response.
* @return The value for one line response from gconftool-2.
*/
private static String getValue(String line) {
return line.substring(line.indexOf(EQUALS) + 1).trim();
}
/**
* Returns array of Strings of no proxy hosts.
*
* The value responding to "ignore_hosts" key.
*
* Parses the value returned from gconftool-2.
* Usually [host1,host2,host3]
*
* @param noProxyHostsString The value returned from gconftool-2.
* @return Array of Strings of no proxy hosts.
*/
private static String[] getNoProxyHosts(String noProxyHostsString) {
if (noProxyHostsString != null && !noProxyHostsString.isEmpty()) {
if (noProxyHostsString.startsWith(SQ_BRACKET_LEFT)) {
noProxyHostsString = noProxyHostsString.substring(1);
}
if (noProxyHostsString.endsWith(SQ_BRACKET_RIGHT)) {
noProxyHostsString = noProxyHostsString.substring(0, noProxyHostsString.length() - 1);
}
return noProxyHostsString.split(COLON);
}
return new String[0];
}
}
|
{
"pile_set_name": "Github"
}
|
/*******************************************************************************
* Copyright (c) 2010, 2013 SAP AG and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License 2.0
* which accompanies this distribution, and is available at
* https://www.eclipse.org/legal/epl-2.0/
*
* SPDX-License-Identifier: EPL-2.0
*
* Contributors:
* Mathias Kinzler (SAP AG) - initial implementation
*******************************************************************************/
package org.eclipse.egit.ui.test.team.actions;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.eclipse.egit.ui.common.LocalRepositoryTestCase;
import org.eclipse.egit.ui.internal.repository.RepositoriesView;
import org.eclipse.egit.ui.test.ContextMenuHelper;
import org.eclipse.egit.ui.test.TestUtil;
import org.eclipse.swtbot.eclipse.finder.widgets.SWTBotView;
import org.eclipse.swtbot.swt.finder.junit.SWTBotJunit4ClassRunner;
import org.eclipse.swtbot.swt.finder.widgets.SWTBotTree;
import org.eclipse.team.ui.history.IHistoryView;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
/**
* Tests for the Team->Show History and Team->Show in Repositories View actions
*/
@RunWith(SWTBotJunit4ClassRunner.class)
public class ShowInTest extends LocalRepositoryTestCase {
@Before
public void setup() throws Exception {
createProjectAndCommitToRepository();
}
@Test
public void testOpenHistory() throws Exception {
try {
SWTBotView view = bot.viewById(IHistoryView.VIEW_ID);
view.close();
} catch (Exception e) {
// ignore
}
SWTBotTree projectExplorerTree = TestUtil.getExplorerTree();
getProjectItem(projectExplorerTree, PROJ1).select();
String menuString = util
.getPluginLocalizedValue("ShowResourceInHistoryAction_label");
ContextMenuHelper.clickContextMenuSync(projectExplorerTree, "Team",
menuString);
bot.viewById(IHistoryView.VIEW_ID).close();
}
@Test
public void testOpenHistoryMultiSelection() throws Exception {
SWTBotTree projectExplorerTree = TestUtil.getExplorerTree();
projectExplorerTree.select(0, 1);
String menuString = util
.getPluginLocalizedValue("ShowResourceInHistoryAction_label");
// Team->show in history must be enabled on a multiple selection
assertTrue(ContextMenuHelper.isContextMenuItemEnabled(projectExplorerTree, "Team",
menuString));
}
@Test
public void testOpenRepoView() throws Exception {
try {
SWTBotView view = bot.viewById(RepositoriesView.VIEW_ID);
view.close();
} catch (Exception e) {
// ignore
}
SWTBotTree projectExplorerTree = TestUtil.getExplorerTree();
getProjectItem(projectExplorerTree, PROJ1).select();
String menuString = util
.getPluginLocalizedValue("ShowRepositoryAction_label");
ContextMenuHelper.clickContextMenuSync(projectExplorerTree, "Team",
menuString);
bot.viewById(RepositoriesView.VIEW_ID).close();
}
@Test
public void testOpenRepoViewMultiSelection() throws Exception {
SWTBotTree projectExplorerTree = TestUtil.getExplorerTree();
projectExplorerTree.select(0, 1);
String menuString = util
.getPluginLocalizedValue("ShowRepositoryAction_label");
// Team->show in repository must be disabled on a multiple selection
assertFalse(ContextMenuHelper.isContextMenuItemEnabled(projectExplorerTree, "Team",
menuString));
}
}
|
{
"pile_set_name": "Github"
}
|
version https://git-lfs.github.com/spec/v1
oid sha256:17bda811cef1a12565143038c253f0599947b3a13799795e9406821a4ac5273f
size 10663
|
{
"pile_set_name": "Github"
}
|
{{! BEGIN-SNIPPET opacity-demo-snippet.hbs }}
<div class="controls">
<label>Show Message<input type="checkbox" checked={{fadeMessage}} onchange={{action (mut fadeMessage) (not fadeMessage)}}></label>
</div>
<div class="scenario-transitions clearfix">
<AnimatedContainer>
{{#animated-if fadeMessage use=transition }}
<div class="message">
{{mail}}
</div>
{{/animated-if}}
</AnimatedContainer>
</div>
{{! END-SNIPPET }}
|
{
"pile_set_name": "Github"
}
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"crypto/rsa"
"fmt"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/internal"
"golang.org/x/oauth2/jws"
)
// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON
// key file to read the credentials that authorize and authenticate the
// requests, and returns a TokenSource that does not use any OAuth2 flow but
// instead creates a JWT and sends that as the access token.
// The audience is typically a URL that specifies the scope of the credentials.
//
// Note that this is not a standard OAuth flow, but rather an
// optimization supported by a few Google services.
// Unless you know otherwise, you should use JWTConfigFromJSON instead.
func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) {
cfg, err := JWTConfigFromJSON(jsonKey)
if err != nil {
return nil, fmt.Errorf("google: could not parse JSON key: %v", err)
}
pk, err := internal.ParseKey(cfg.PrivateKey)
if err != nil {
return nil, fmt.Errorf("google: could not parse key: %v", err)
}
ts := &jwtAccessTokenSource{
email: cfg.Email,
audience: audience,
pk: pk,
pkID: cfg.PrivateKeyID,
}
tok, err := ts.Token()
if err != nil {
return nil, err
}
return oauth2.ReuseTokenSource(tok, ts), nil
}
type jwtAccessTokenSource struct {
email, audience string
pk *rsa.PrivateKey
pkID string
}
func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) {
iat := time.Now()
exp := iat.Add(time.Hour)
cs := &jws.ClaimSet{
Iss: ts.email,
Sub: ts.email,
Aud: ts.audience,
Iat: iat.Unix(),
Exp: exp.Unix(),
}
hdr := &jws.Header{
Algorithm: "RS256",
Typ: "JWT",
KeyID: string(ts.pkID),
}
msg, err := jws.Encode(hdr, cs, ts.pk)
if err != nil {
return nil, fmt.Errorf("google: could not encode JWT: %v", err)
}
return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil
}
|
{
"pile_set_name": "Github"
}
|
<!DOCTYPE html>
<meta charset="utf-8">
<title>Selector: pseudo-classes (:read-write, :read-only) input type change</title>
<link rel="author" title="Rune Lillesveen" href="mailto:rune@opera.com">
<link rel="help" href="https://html.spec.whatwg.org/multipage/#pseudo-classes">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<style>
span {
color: red;
background-color: pink;
}
:required + span {
color: green;
}
:not(:optional) + span {
background-color: lime;
}
</style>
<input id="hiddenInput" type="hidden" required>
<span id="sibling">This text should be green on lime background.</span>
<script>
test(() => {
assert_equals(getComputedStyle(sibling).color, "rgb(255, 0, 0)",
"Not matching :required for type=hidden");
assert_equals(getComputedStyle(sibling).backgroundColor, "rgb(255, 192, 203)",
"Matching :optional for type=hidden");
hiddenInput.type = "text";
assert_equals(getComputedStyle(sibling).color, "rgb(0, 128, 0)",
"Matching :required for type=text");
assert_equals(getComputedStyle(sibling).backgroundColor, "rgb(0, 255, 0)",
"Matching :not(:optional) for type=text");
}, "Evaluation of :required and :optional changes for input type change.");
</script>
|
{
"pile_set_name": "Github"
}
|
/*
* Swap block device support for MTDs
* Turns an MTD device into a swap device with block wear leveling
*
* Copyright © 2007,2011 Nokia Corporation. All rights reserved.
*
* Authors: Jarkko Lavinen <jarkko.lavinen@nokia.com>
*
* Based on Richard Purdie's earlier implementation in 2007. Background
* support and lock-less operation written by Adrian Hunter.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/genhd.h>
#include <linux/swap.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/math64.h>
#define MTDSWAP_PREFIX "mtdswap"
/*
* The number of free eraseblocks when GC should stop
*/
#define CLEAN_BLOCK_THRESHOLD 20
/*
* Number of free eraseblocks below which GC can also collect low frag
* blocks.
*/
#define LOW_FRAG_GC_TRESHOLD 5
/*
* Wear level cost amortization. We want to do wear leveling on the background
* without disturbing gc too much. This is made by defining max GC frequency.
* Frequency value 6 means 1/6 of the GC passes will pick an erase block based
* on the biggest wear difference rather than the biggest dirtiness.
*
* The lower freq2 should be chosen so that it makes sure the maximum erase
* difference will decrease even if a malicious application is deliberately
* trying to make erase differences large.
*/
#define MAX_ERASE_DIFF 4000
#define COLLECT_NONDIRTY_BASE MAX_ERASE_DIFF
#define COLLECT_NONDIRTY_FREQ1 6
#define COLLECT_NONDIRTY_FREQ2 4
#define PAGE_UNDEF UINT_MAX
#define BLOCK_UNDEF UINT_MAX
#define BLOCK_ERROR (UINT_MAX - 1)
#define BLOCK_MAX (UINT_MAX - 2)
#define EBLOCK_BAD (1 << 0)
#define EBLOCK_NOMAGIC (1 << 1)
#define EBLOCK_BITFLIP (1 << 2)
#define EBLOCK_FAILED (1 << 3)
#define EBLOCK_READERR (1 << 4)
#define EBLOCK_IDX_SHIFT 5
struct swap_eb {
struct rb_node rb;
struct rb_root *root;
unsigned int flags;
unsigned int active_count;
unsigned int erase_count;
unsigned int pad; /* speeds up pointer decrement */
};
#define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
rb)->erase_count)
#define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \
rb)->erase_count)
struct mtdswap_tree {
struct rb_root root;
unsigned int count;
};
enum {
MTDSWAP_CLEAN,
MTDSWAP_USED,
MTDSWAP_LOWFRAG,
MTDSWAP_HIFRAG,
MTDSWAP_DIRTY,
MTDSWAP_BITFLIP,
MTDSWAP_FAILING,
MTDSWAP_TREE_CNT,
};
struct mtdswap_dev {
struct mtd_blktrans_dev *mbd_dev;
struct mtd_info *mtd;
struct device *dev;
unsigned int *page_data;
unsigned int *revmap;
unsigned int eblks;
unsigned int spare_eblks;
unsigned int pages_per_eblk;
unsigned int max_erase_count;
struct swap_eb *eb_data;
struct mtdswap_tree trees[MTDSWAP_TREE_CNT];
unsigned long long sect_read_count;
unsigned long long sect_write_count;
unsigned long long mtd_write_count;
unsigned long long mtd_read_count;
unsigned long long discard_count;
unsigned long long discard_page_count;
unsigned int curr_write_pos;
struct swap_eb *curr_write;
char *page_buf;
char *oob_buf;
struct dentry *debugfs_root;
};
struct mtdswap_oobdata {
__le16 magic;
__le32 count;
} __packed;
#define MTDSWAP_MAGIC_CLEAN 0x2095
#define MTDSWAP_MAGIC_DIRTY (MTDSWAP_MAGIC_CLEAN + 1)
#define MTDSWAP_TYPE_CLEAN 0
#define MTDSWAP_TYPE_DIRTY 1
#define MTDSWAP_OOBSIZE sizeof(struct mtdswap_oobdata)
#define MTDSWAP_ERASE_RETRIES 3 /* Before marking erase block bad */
#define MTDSWAP_IO_RETRIES 3
enum {
MTDSWAP_SCANNED_CLEAN,
MTDSWAP_SCANNED_DIRTY,
MTDSWAP_SCANNED_BITFLIP,
MTDSWAP_SCANNED_BAD,
};
/*
* In the worst case mtdswap_writesect() has allocated the last clean
* page from the current block and is then pre-empted by the GC
* thread. The thread can consume a full erase block when moving a
* block.
*/
#define MIN_SPARE_EBLOCKS 2
#define MIN_ERASE_BLOCKS (MIN_SPARE_EBLOCKS + 1)
#define TREE_ROOT(d, name) (&d->trees[MTDSWAP_ ## name].root)
#define TREE_EMPTY(d, name) (TREE_ROOT(d, name)->rb_node == NULL)
#define TREE_NONEMPTY(d, name) (!TREE_EMPTY(d, name))
#define TREE_COUNT(d, name) (d->trees[MTDSWAP_ ## name].count)
#define MTDSWAP_MBD_TO_MTDSWAP(dev) ((struct mtdswap_dev *)dev->priv)
static char partitions[128] = "";
module_param_string(partitions, partitions, sizeof(partitions), 0444);
MODULE_PARM_DESC(partitions, "MTD partition numbers to use as swap "
"partitions=\"1,3,5\"");
static unsigned int spare_eblocks = 10;
module_param(spare_eblocks, uint, 0444);
MODULE_PARM_DESC(spare_eblocks, "Percentage of spare erase blocks for "
"garbage collection (default 10%)");
static bool header; /* false */
module_param(header, bool, 0444);
MODULE_PARM_DESC(header,
"Include builtin swap header (default 0, without header)");
static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background);
static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb)
{
return (loff_t)(eb - d->eb_data) * d->mtd->erasesize;
}
static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb)
{
unsigned int oldidx;
struct mtdswap_tree *tp;
if (eb->root) {
tp = container_of(eb->root, struct mtdswap_tree, root);
oldidx = tp - &d->trees[0];
d->trees[oldidx].count--;
rb_erase(&eb->rb, eb->root);
}
}
static void __mtdswap_rb_add(struct rb_root *root, struct swap_eb *eb)
{
struct rb_node **p, *parent = NULL;
struct swap_eb *cur;
p = &root->rb_node;
while (*p) {
parent = *p;
cur = rb_entry(parent, struct swap_eb, rb);
if (eb->erase_count > cur->erase_count)
p = &(*p)->rb_right;
else
p = &(*p)->rb_left;
}
rb_link_node(&eb->rb, parent, p);
rb_insert_color(&eb->rb, root);
}
static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx)
{
struct rb_root *root;
if (eb->root == &d->trees[idx].root)
return;
mtdswap_eb_detach(d, eb);
root = &d->trees[idx].root;
__mtdswap_rb_add(root, eb);
eb->root = root;
d->trees[idx].count++;
}
static struct rb_node *mtdswap_rb_index(struct rb_root *root, unsigned int idx)
{
struct rb_node *p;
unsigned int i;
p = rb_first(root);
i = 0;
while (i < idx && p) {
p = rb_next(p);
i++;
}
return p;
}
static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb)
{
int ret;
loff_t offset;
d->spare_eblks--;
eb->flags |= EBLOCK_BAD;
mtdswap_eb_detach(d, eb);
eb->root = NULL;
/* badblocks not supported */
if (!mtd_can_have_bb(d->mtd))
return 1;
offset = mtdswap_eb_offset(d, eb);
dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
ret = mtd_block_markbad(d->mtd, offset);
if (ret) {
dev_warn(d->dev, "Mark block bad failed for block at %08llx "
"error %d\n", offset, ret);
return ret;
}
return 1;
}
static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb)
{
unsigned int marked = eb->flags & EBLOCK_FAILED;
struct swap_eb *curr_write = d->curr_write;
eb->flags |= EBLOCK_FAILED;
if (curr_write == eb) {
d->curr_write = NULL;
if (!marked && d->curr_write_pos != 0) {
mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
return 0;
}
}
return mtdswap_handle_badblock(d, eb);
}
static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
struct mtd_oob_ops *ops)
{
int ret = mtd_read_oob(d->mtd, from, ops);
if (mtd_is_bitflip(ret))
return ret;
if (ret) {
dev_warn(d->dev, "Read OOB failed %d for block at %08llx\n",
ret, from);
return ret;
}
if (ops->oobretlen < ops->ooblen) {
dev_warn(d->dev, "Read OOB return short read (%zd bytes not "
"%zd) for block at %08llx\n",
ops->oobretlen, ops->ooblen, from);
return -EIO;
}
return 0;
}
static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
{
struct mtdswap_oobdata *data, *data2;
int ret;
loff_t offset;
struct mtd_oob_ops ops;
offset = mtdswap_eb_offset(d, eb);
/* Check first if the block is bad. */
if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
return MTDSWAP_SCANNED_BAD;
ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
ops.oobbuf = d->oob_buf;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.mode = MTD_OPS_AUTO_OOB;
ret = mtdswap_read_oob(d, offset, &ops);
if (ret && !mtd_is_bitflip(ret))
return ret;
data = (struct mtdswap_oobdata *)d->oob_buf;
data2 = (struct mtdswap_oobdata *)
(d->oob_buf + d->mtd->ecclayout->oobavail);
if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
eb->erase_count = le32_to_cpu(data->count);
if (mtd_is_bitflip(ret))
ret = MTDSWAP_SCANNED_BITFLIP;
else {
if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY)
ret = MTDSWAP_SCANNED_DIRTY;
else
ret = MTDSWAP_SCANNED_CLEAN;
}
} else {
eb->flags |= EBLOCK_NOMAGIC;
ret = MTDSWAP_SCANNED_DIRTY;
}
return ret;
}
static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
u16 marker)
{
struct mtdswap_oobdata n;
int ret;
loff_t offset;
struct mtd_oob_ops ops;
ops.ooboffs = 0;
ops.oobbuf = (uint8_t *)&n;
ops.mode = MTD_OPS_AUTO_OOB;
ops.datbuf = NULL;
if (marker == MTDSWAP_TYPE_CLEAN) {
n.magic = cpu_to_le16(MTDSWAP_MAGIC_CLEAN);
n.count = cpu_to_le32(eb->erase_count);
ops.ooblen = MTDSWAP_OOBSIZE;
offset = mtdswap_eb_offset(d, eb);
} else {
n.magic = cpu_to_le16(MTDSWAP_MAGIC_DIRTY);
ops.ooblen = sizeof(n.magic);
offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
}
ret = mtd_write_oob(d->mtd, offset, &ops);
if (ret) {
dev_warn(d->dev, "Write OOB failed for block at %08llx "
"error %d\n", offset, ret);
if (ret == -EIO || mtd_is_eccerr(ret))
mtdswap_handle_write_error(d, eb);
return ret;
}
if (ops.oobretlen != ops.ooblen) {
dev_warn(d->dev, "Short OOB write for block at %08llx: "
"%zd not %zd\n",
offset, ops.oobretlen, ops.ooblen);
return ret;
}
return 0;
}
/*
* Are there any erase blocks without MAGIC_CLEAN header, presumably
* because power was cut off after erase but before header write? We
* need to guestimate the erase count.
*/
static void mtdswap_check_counts(struct mtdswap_dev *d)
{
struct rb_root hist_root = RB_ROOT;
struct rb_node *medrb;
struct swap_eb *eb;
unsigned int i, cnt, median;
cnt = 0;
for (i = 0; i < d->eblks; i++) {
eb = d->eb_data + i;
if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
continue;
__mtdswap_rb_add(&hist_root, eb);
cnt++;
}
if (cnt == 0)
return;
medrb = mtdswap_rb_index(&hist_root, cnt / 2);
median = rb_entry(medrb, struct swap_eb, rb)->erase_count;
d->max_erase_count = MTDSWAP_ECNT_MAX(&hist_root);
for (i = 0; i < d->eblks; i++) {
eb = d->eb_data + i;
if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR))
eb->erase_count = median;
if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
continue;
rb_erase(&eb->rb, &hist_root);
}
}
static void mtdswap_scan_eblks(struct mtdswap_dev *d)
{
int status;
unsigned int i, idx;
struct swap_eb *eb;
for (i = 0; i < d->eblks; i++) {
eb = d->eb_data + i;
status = mtdswap_read_markers(d, eb);
if (status < 0)
eb->flags |= EBLOCK_READERR;
else if (status == MTDSWAP_SCANNED_BAD) {
eb->flags |= EBLOCK_BAD;
continue;
}
switch (status) {
case MTDSWAP_SCANNED_CLEAN:
idx = MTDSWAP_CLEAN;
break;
case MTDSWAP_SCANNED_DIRTY:
case MTDSWAP_SCANNED_BITFLIP:
idx = MTDSWAP_DIRTY;
break;
default:
idx = MTDSWAP_FAILING;
}
eb->flags |= (idx << EBLOCK_IDX_SHIFT);
}
mtdswap_check_counts(d);
for (i = 0; i < d->eblks; i++) {
eb = d->eb_data + i;
if (eb->flags & EBLOCK_BAD)
continue;
idx = eb->flags >> EBLOCK_IDX_SHIFT;
mtdswap_rb_add(d, eb, idx);
}
}
/*
* Place eblk into a tree corresponding to its number of active blocks
* it contains.
*/
static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb)
{
unsigned int weight = eb->active_count;
unsigned int maxweight = d->pages_per_eblk;
if (eb == d->curr_write)
return;
if (eb->flags & EBLOCK_BITFLIP)
mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED))
mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
if (weight == maxweight)
mtdswap_rb_add(d, eb, MTDSWAP_USED);
else if (weight == 0)
mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
else if (weight > (maxweight/2))
mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG);
else
mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG);
}
static void mtdswap_erase_callback(struct erase_info *done)
{
wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
wake_up(wait_q);
}
static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb)
{
struct mtd_info *mtd = d->mtd;
struct erase_info erase;
wait_queue_head_t wq;
unsigned int retries = 0;
int ret;
eb->erase_count++;
if (eb->erase_count > d->max_erase_count)
d->max_erase_count = eb->erase_count;
retry:
init_waitqueue_head(&wq);
memset(&erase, 0, sizeof(struct erase_info));
erase.mtd = mtd;
erase.callback = mtdswap_erase_callback;
erase.addr = mtdswap_eb_offset(d, eb);
erase.len = mtd->erasesize;
erase.priv = (u_long)&wq;
ret = mtd_erase(mtd, &erase);
if (ret) {
if (retries++ < MTDSWAP_ERASE_RETRIES) {
dev_warn(d->dev,
"erase of erase block %#llx on %s failed",
erase.addr, mtd->name);
yield();
goto retry;
}
dev_err(d->dev, "Cannot erase erase block %#llx on %s\n",
erase.addr, mtd->name);
mtdswap_handle_badblock(d, eb);
return -EIO;
}
ret = wait_event_interruptible(wq, erase.state == MTD_ERASE_DONE ||
erase.state == MTD_ERASE_FAILED);
if (ret) {
dev_err(d->dev, "Interrupted erase block %#llx erassure on %s",
erase.addr, mtd->name);
return -EINTR;
}
if (erase.state == MTD_ERASE_FAILED) {
if (retries++ < MTDSWAP_ERASE_RETRIES) {
dev_warn(d->dev,
"erase of erase block %#llx on %s failed",
erase.addr, mtd->name);
yield();
goto retry;
}
mtdswap_handle_badblock(d, eb);
return -EIO;
}
return 0;
}
static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page,
unsigned int *block)
{
int ret;
struct swap_eb *old_eb = d->curr_write;
struct rb_root *clean_root;
struct swap_eb *eb;
if (old_eb == NULL || d->curr_write_pos >= d->pages_per_eblk) {
do {
if (TREE_EMPTY(d, CLEAN))
return -ENOSPC;
clean_root = TREE_ROOT(d, CLEAN);
eb = rb_entry(rb_first(clean_root), struct swap_eb, rb);
rb_erase(&eb->rb, clean_root);
eb->root = NULL;
TREE_COUNT(d, CLEAN)--;
ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY);
} while (ret == -EIO || mtd_is_eccerr(ret));
if (ret)
return ret;
d->curr_write_pos = 0;
d->curr_write = eb;
if (old_eb)
mtdswap_store_eb(d, old_eb);
}
*block = (d->curr_write - d->eb_data) * d->pages_per_eblk +
d->curr_write_pos;
d->curr_write->active_count++;
d->revmap[*block] = page;
d->curr_write_pos++;
return 0;
}
static unsigned int mtdswap_free_page_cnt(struct mtdswap_dev *d)
{
return TREE_COUNT(d, CLEAN) * d->pages_per_eblk +
d->pages_per_eblk - d->curr_write_pos;
}
static unsigned int mtdswap_enough_free_pages(struct mtdswap_dev *d)
{
return mtdswap_free_page_cnt(d) > d->pages_per_eblk;
}
static int mtdswap_write_block(struct mtdswap_dev *d, char *buf,
unsigned int page, unsigned int *bp, int gc_context)
{
struct mtd_info *mtd = d->mtd;
struct swap_eb *eb;
size_t retlen;
loff_t writepos;
int ret;
retry:
if (!gc_context)
while (!mtdswap_enough_free_pages(d))
if (mtdswap_gc(d, 0) > 0)
return -ENOSPC;
ret = mtdswap_map_free_block(d, page, bp);
eb = d->eb_data + (*bp / d->pages_per_eblk);
if (ret == -EIO || mtd_is_eccerr(ret)) {
d->curr_write = NULL;
eb->active_count--;
d->revmap[*bp] = PAGE_UNDEF;
goto retry;
}
if (ret < 0)
return ret;
writepos = (loff_t)*bp << PAGE_SHIFT;
ret = mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf);
if (ret == -EIO || mtd_is_eccerr(ret)) {
d->curr_write_pos--;
eb->active_count--;
d->revmap[*bp] = PAGE_UNDEF;
mtdswap_handle_write_error(d, eb);
goto retry;
}
if (ret < 0) {
dev_err(d->dev, "Write to MTD device failed: %d (%zd written)",
ret, retlen);
goto err;
}
if (retlen != PAGE_SIZE) {
dev_err(d->dev, "Short write to MTD device: %zd written",
retlen);
ret = -EIO;
goto err;
}
return ret;
err:
d->curr_write_pos--;
eb->active_count--;
d->revmap[*bp] = PAGE_UNDEF;
return ret;
}
static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
unsigned int *newblock)
{
struct mtd_info *mtd = d->mtd;
struct swap_eb *eb, *oldeb;
int ret;
size_t retlen;
unsigned int page, retries;
loff_t readpos;
page = d->revmap[oldblock];
readpos = (loff_t) oldblock << PAGE_SHIFT;
retries = 0;
retry:
ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
oldeb = d->eb_data + oldblock / d->pages_per_eblk;
oldeb->flags |= EBLOCK_READERR;
dev_err(d->dev, "Read Error: %d (block %u)\n", ret,
oldblock);
retries++;
if (retries < MTDSWAP_IO_RETRIES)
goto retry;
goto read_error;
}
if (retlen != PAGE_SIZE) {
dev_err(d->dev, "Short read: %zd (block %u)\n", retlen,
oldblock);
ret = -EIO;
goto read_error;
}
ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1);
if (ret < 0) {
d->page_data[page] = BLOCK_ERROR;
dev_err(d->dev, "Write error: %d\n", ret);
return ret;
}
eb = d->eb_data + *newblock / d->pages_per_eblk;
d->page_data[page] = *newblock;
d->revmap[oldblock] = PAGE_UNDEF;
eb = d->eb_data + oldblock / d->pages_per_eblk;
eb->active_count--;
return 0;
read_error:
d->page_data[page] = BLOCK_ERROR;
d->revmap[oldblock] = PAGE_UNDEF;
return ret;
}
static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb)
{
unsigned int i, block, eblk_base, newblock;
int ret, errcode;
errcode = 0;
eblk_base = (eb - d->eb_data) * d->pages_per_eblk;
for (i = 0; i < d->pages_per_eblk; i++) {
if (d->spare_eblks < MIN_SPARE_EBLOCKS)
return -ENOSPC;
block = eblk_base + i;
if (d->revmap[block] == PAGE_UNDEF)
continue;
ret = mtdswap_move_block(d, block, &newblock);
if (ret < 0 && !errcode)
errcode = ret;
}
return errcode;
}
static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d)
{
int idx, stopat;
if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD)
stopat = MTDSWAP_LOWFRAG;
else
stopat = MTDSWAP_HIFRAG;
for (idx = MTDSWAP_BITFLIP; idx >= stopat; idx--)
if (d->trees[idx].root.rb_node != NULL)
return idx;
return -1;
}
static int mtdswap_wlfreq(unsigned int maxdiff)
{
unsigned int h, x, y, dist, base;
/*
* Calculate linear ramp down from f1 to f2 when maxdiff goes from
* MAX_ERASE_DIFF to MAX_ERASE_DIFF + COLLECT_NONDIRTY_BASE. Similar
* to triangle with height f1 - f1 and width COLLECT_NONDIRTY_BASE.
*/
dist = maxdiff - MAX_ERASE_DIFF;
if (dist > COLLECT_NONDIRTY_BASE)
dist = COLLECT_NONDIRTY_BASE;
/*
* Modelling the slop as right angular triangle with base
* COLLECT_NONDIRTY_BASE and height freq1 - freq2. The ratio y/x is
* equal to the ratio h/base.
*/
h = COLLECT_NONDIRTY_FREQ1 - COLLECT_NONDIRTY_FREQ2;
base = COLLECT_NONDIRTY_BASE;
x = dist - base;
y = (x * h + base / 2) / base;
return COLLECT_NONDIRTY_FREQ2 + y;
}
static int mtdswap_choose_wl_tree(struct mtdswap_dev *d)
{
static unsigned int pick_cnt;
unsigned int i, idx = -1, wear, max;
struct rb_root *root;
max = 0;
for (i = 0; i <= MTDSWAP_DIRTY; i++) {
root = &d->trees[i].root;
if (root->rb_node == NULL)
continue;
wear = d->max_erase_count - MTDSWAP_ECNT_MIN(root);
if (wear > max) {
max = wear;
idx = i;
}
}
if (max > MAX_ERASE_DIFF && pick_cnt >= mtdswap_wlfreq(max) - 1) {
pick_cnt = 0;
return idx;
}
pick_cnt++;
return -1;
}
static int mtdswap_choose_gc_tree(struct mtdswap_dev *d,
unsigned int background)
{
int idx;
if (TREE_NONEMPTY(d, FAILING) &&
(background || (TREE_EMPTY(d, CLEAN) && TREE_EMPTY(d, DIRTY))))
return MTDSWAP_FAILING;
idx = mtdswap_choose_wl_tree(d);
if (idx >= MTDSWAP_CLEAN)
return idx;
return __mtdswap_choose_gc_tree(d);
}
static struct swap_eb *mtdswap_pick_gc_eblk(struct mtdswap_dev *d,
unsigned int background)
{
struct rb_root *rp = NULL;
struct swap_eb *eb = NULL;
int idx;
if (background && TREE_COUNT(d, CLEAN) > CLEAN_BLOCK_THRESHOLD &&
TREE_EMPTY(d, DIRTY) && TREE_EMPTY(d, FAILING))
return NULL;
idx = mtdswap_choose_gc_tree(d, background);
if (idx < 0)
return NULL;
rp = &d->trees[idx].root;
eb = rb_entry(rb_first(rp), struct swap_eb, rb);
rb_erase(&eb->rb, rp);
eb->root = NULL;
d->trees[idx].count--;
return eb;
}
static unsigned int mtdswap_test_patt(unsigned int i)
{
return i % 2 ? 0x55555555 : 0xAAAAAAAA;
}
static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
struct swap_eb *eb)
{
struct mtd_info *mtd = d->mtd;
unsigned int test, i, j, patt, mtd_pages;
loff_t base, pos;
unsigned int *p1 = (unsigned int *)d->page_buf;
unsigned char *p2 = (unsigned char *)d->oob_buf;
struct mtd_oob_ops ops;
int ret;
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = mtd->writesize;
ops.ooblen = mtd->ecclayout->oobavail;
ops.ooboffs = 0;
ops.datbuf = d->page_buf;
ops.oobbuf = d->oob_buf;
base = mtdswap_eb_offset(d, eb);
mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize;
for (test = 0; test < 2; test++) {
pos = base;
for (i = 0; i < mtd_pages; i++) {
patt = mtdswap_test_patt(test + i);
memset(d->page_buf, patt, mtd->writesize);
memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
ret = mtd_write_oob(mtd, pos, &ops);
if (ret)
goto error;
pos += mtd->writesize;
}
pos = base;
for (i = 0; i < mtd_pages; i++) {
ret = mtd_read_oob(mtd, pos, &ops);
if (ret)
goto error;
patt = mtdswap_test_patt(test + i);
for (j = 0; j < mtd->writesize/sizeof(int); j++)
if (p1[j] != patt)
goto error;
for (j = 0; j < mtd->ecclayout->oobavail; j++)
if (p2[j] != (unsigned char)patt)
goto error;
pos += mtd->writesize;
}
ret = mtdswap_erase_block(d, eb);
if (ret)
goto error;
}
eb->flags &= ~EBLOCK_READERR;
return 1;
error:
mtdswap_handle_badblock(d, eb);
return 0;
}
static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background)
{
struct swap_eb *eb;
int ret;
if (d->spare_eblks < MIN_SPARE_EBLOCKS)
return 1;
eb = mtdswap_pick_gc_eblk(d, background);
if (!eb)
return 1;
ret = mtdswap_gc_eblock(d, eb);
if (ret == -ENOSPC)
return 1;
if (eb->flags & EBLOCK_FAILED) {
mtdswap_handle_badblock(d, eb);
return 0;
}
eb->flags &= ~EBLOCK_BITFLIP;
ret = mtdswap_erase_block(d, eb);
if ((eb->flags & EBLOCK_READERR) &&
(ret || !mtdswap_eblk_passes(d, eb)))
return 0;
if (ret == 0)
ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN);
if (ret == 0)
mtdswap_rb_add(d, eb, MTDSWAP_CLEAN);
else if (ret != -EIO && !mtd_is_eccerr(ret))
mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
return 0;
}
static void mtdswap_background(struct mtd_blktrans_dev *dev)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
int ret;
while (1) {
ret = mtdswap_gc(d, 1);
if (ret || mtd_blktrans_cease_background(dev))
return;
}
}
static void mtdswap_cleanup(struct mtdswap_dev *d)
{
vfree(d->eb_data);
vfree(d->revmap);
vfree(d->page_data);
kfree(d->oob_buf);
kfree(d->page_buf);
}
static int mtdswap_flush(struct mtd_blktrans_dev *dev)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
mtd_sync(d->mtd);
return 0;
}
static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size)
{
loff_t offset;
unsigned int badcnt;
badcnt = 0;
if (mtd_can_have_bb(mtd))
for (offset = 0; offset < size; offset += mtd->erasesize)
if (mtd_block_isbad(mtd, offset))
badcnt++;
return badcnt;
}
static int mtdswap_writesect(struct mtd_blktrans_dev *dev,
unsigned long page, char *buf)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
unsigned int newblock, mapped;
struct swap_eb *eb;
int ret;
d->sect_write_count++;
if (d->spare_eblks < MIN_SPARE_EBLOCKS)
return -ENOSPC;
if (header) {
/* Ignore writes to the header page */
if (unlikely(page == 0))
return 0;
page--;
}
mapped = d->page_data[page];
if (mapped <= BLOCK_MAX) {
eb = d->eb_data + (mapped / d->pages_per_eblk);
eb->active_count--;
mtdswap_store_eb(d, eb);
d->page_data[page] = BLOCK_UNDEF;
d->revmap[mapped] = PAGE_UNDEF;
}
ret = mtdswap_write_block(d, buf, page, &newblock, 0);
d->mtd_write_count++;
if (ret < 0)
return ret;
eb = d->eb_data + (newblock / d->pages_per_eblk);
d->page_data[page] = newblock;
return 0;
}
/* Provide a dummy swap header for the kernel */
static int mtdswap_auto_header(struct mtdswap_dev *d, char *buf)
{
union swap_header *hd = (union swap_header *)(buf);
memset(buf, 0, PAGE_SIZE - 10);
hd->info.version = 1;
hd->info.last_page = d->mbd_dev->size - 1;
hd->info.nr_badpages = 0;
memcpy(buf + PAGE_SIZE - 10, "SWAPSPACE2", 10);
return 0;
}
static int mtdswap_readsect(struct mtd_blktrans_dev *dev,
unsigned long page, char *buf)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
struct mtd_info *mtd = d->mtd;
unsigned int realblock, retries;
loff_t readpos;
struct swap_eb *eb;
size_t retlen;
int ret;
d->sect_read_count++;
if (header) {
if (unlikely(page == 0))
return mtdswap_auto_header(d, buf);
page--;
}
realblock = d->page_data[page];
if (realblock > BLOCK_MAX) {
memset(buf, 0x0, PAGE_SIZE);
if (realblock == BLOCK_UNDEF)
return 0;
else
return -EIO;
}
eb = d->eb_data + (realblock / d->pages_per_eblk);
BUG_ON(d->revmap[realblock] == PAGE_UNDEF);
readpos = (loff_t)realblock << PAGE_SHIFT;
retries = 0;
retry:
ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf);
d->mtd_read_count++;
if (mtd_is_bitflip(ret)) {
eb->flags |= EBLOCK_BITFLIP;
mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
ret = 0;
}
if (ret < 0) {
dev_err(d->dev, "Read error %d\n", ret);
eb->flags |= EBLOCK_READERR;
mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
retries++;
if (retries < MTDSWAP_IO_RETRIES)
goto retry;
return ret;
}
if (retlen != PAGE_SIZE) {
dev_err(d->dev, "Short read %zd\n", retlen);
return -EIO;
}
return 0;
}
static int mtdswap_discard(struct mtd_blktrans_dev *dev, unsigned long first,
unsigned nr_pages)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
unsigned long page;
struct swap_eb *eb;
unsigned int mapped;
d->discard_count++;
for (page = first; page < first + nr_pages; page++) {
mapped = d->page_data[page];
if (mapped <= BLOCK_MAX) {
eb = d->eb_data + (mapped / d->pages_per_eblk);
eb->active_count--;
mtdswap_store_eb(d, eb);
d->page_data[page] = BLOCK_UNDEF;
d->revmap[mapped] = PAGE_UNDEF;
d->discard_page_count++;
} else if (mapped == BLOCK_ERROR) {
d->page_data[page] = BLOCK_UNDEF;
d->discard_page_count++;
}
}
return 0;
}
static int mtdswap_show(struct seq_file *s, void *data)
{
struct mtdswap_dev *d = (struct mtdswap_dev *) s->private;
unsigned long sum;
unsigned int count[MTDSWAP_TREE_CNT];
unsigned int min[MTDSWAP_TREE_CNT];
unsigned int max[MTDSWAP_TREE_CNT];
unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages;
uint64_t use_size;
char *name[] = {"clean", "used", "low", "high", "dirty", "bitflip",
"failing"};
mutex_lock(&d->mbd_dev->lock);
for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
struct rb_root *root = &d->trees[i].root;
if (root->rb_node) {
count[i] = d->trees[i].count;
min[i] = rb_entry(rb_first(root), struct swap_eb,
rb)->erase_count;
max[i] = rb_entry(rb_last(root), struct swap_eb,
rb)->erase_count;
} else
count[i] = 0;
}
if (d->curr_write) {
cw = 1;
cwp = d->curr_write_pos;
cwecount = d->curr_write->erase_count;
}
sum = 0;
for (i = 0; i < d->eblks; i++)
sum += d->eb_data[i].erase_count;
use_size = (uint64_t)d->eblks * d->mtd->erasesize;
bb_cnt = mtdswap_badblocks(d->mtd, use_size);
mapped = 0;
pages = d->mbd_dev->size;
for (i = 0; i < pages; i++)
if (d->page_data[i] != BLOCK_UNDEF)
mapped++;
mutex_unlock(&d->mbd_dev->lock);
for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
if (!count[i])
continue;
if (min[i] != max[i])
seq_printf(s, "%s:\t%5d erase blocks, erased min %d, "
"max %d times\n",
name[i], count[i], min[i], max[i]);
else
seq_printf(s, "%s:\t%5d erase blocks, all erased %d "
"times\n", name[i], count[i], min[i]);
}
if (bb_cnt)
seq_printf(s, "bad:\t%5u erase blocks\n", bb_cnt);
if (cw)
seq_printf(s, "current erase block: %u pages used, %u free, "
"erased %u times\n",
cwp, d->pages_per_eblk - cwp, cwecount);
seq_printf(s, "total erasures: %lu\n", sum);
seq_puts(s, "\n");
seq_printf(s, "mtdswap_readsect count: %llu\n", d->sect_read_count);
seq_printf(s, "mtdswap_writesect count: %llu\n", d->sect_write_count);
seq_printf(s, "mtdswap_discard count: %llu\n", d->discard_count);
seq_printf(s, "mtd read count: %llu\n", d->mtd_read_count);
seq_printf(s, "mtd write count: %llu\n", d->mtd_write_count);
seq_printf(s, "discarded pages count: %llu\n", d->discard_page_count);
seq_puts(s, "\n");
seq_printf(s, "total pages: %u\n", pages);
seq_printf(s, "pages mapped: %u\n", mapped);
return 0;
}
static int mtdswap_open(struct inode *inode, struct file *file)
{
return single_open(file, mtdswap_show, inode->i_private);
}
static const struct file_operations mtdswap_fops = {
.open = mtdswap_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int mtdswap_add_debugfs(struct mtdswap_dev *d)
{
struct gendisk *gd = d->mbd_dev->disk;
struct device *dev = disk_to_dev(gd);
struct dentry *root;
struct dentry *dent;
root = debugfs_create_dir(gd->disk_name, NULL);
if (IS_ERR(root))
return 0;
if (!root) {
dev_err(dev, "failed to initialize debugfs\n");
return -1;
}
d->debugfs_root = root;
dent = debugfs_create_file("stats", S_IRUSR, root, d,
&mtdswap_fops);
if (!dent) {
dev_err(d->dev, "debugfs_create_file failed\n");
debugfs_remove_recursive(root);
d->debugfs_root = NULL;
return -1;
}
return 0;
}
static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
unsigned int spare_cnt)
{
struct mtd_info *mtd = d->mbd_dev->mtd;
unsigned int i, eblk_bytes, pages, blocks;
int ret = -ENOMEM;
d->mtd = mtd;
d->eblks = eblocks;
d->spare_eblks = spare_cnt;
d->pages_per_eblk = mtd->erasesize >> PAGE_SHIFT;
pages = d->mbd_dev->size;
blocks = eblocks * d->pages_per_eblk;
for (i = 0; i < MTDSWAP_TREE_CNT; i++)
d->trees[i].root = RB_ROOT;
d->page_data = vmalloc(sizeof(int)*pages);
if (!d->page_data)
goto page_data_fail;
d->revmap = vmalloc(sizeof(int)*blocks);
if (!d->revmap)
goto revmap_fail;
eblk_bytes = sizeof(struct swap_eb)*d->eblks;
d->eb_data = vzalloc(eblk_bytes);
if (!d->eb_data)
goto eb_data_fail;
for (i = 0; i < pages; i++)
d->page_data[i] = BLOCK_UNDEF;
for (i = 0; i < blocks; i++)
d->revmap[i] = PAGE_UNDEF;
d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!d->page_buf)
goto page_buf_fail;
d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL);
if (!d->oob_buf)
goto oob_buf_fail;
mtdswap_scan_eblks(d);
return 0;
oob_buf_fail:
kfree(d->page_buf);
page_buf_fail:
vfree(d->eb_data);
eb_data_fail:
vfree(d->revmap);
revmap_fail:
vfree(d->page_data);
page_data_fail:
printk(KERN_ERR "%s: init failed (%d)\n", MTDSWAP_PREFIX, ret);
return ret;
}
static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct mtdswap_dev *d;
struct mtd_blktrans_dev *mbd_dev;
char *parts;
char *this_opt;
unsigned long part;
unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
uint64_t swap_size, use_size, size_limit;
struct nand_ecclayout *oinfo;
int ret;
parts = &partitions[0];
if (!*parts)
return;
while ((this_opt = strsep(&parts, ",")) != NULL) {
if (kstrtoul(this_opt, 0, &part) < 0)
return;
if (mtd->index == part)
break;
}
if (mtd->index != part)
return;
if (mtd->erasesize < PAGE_SIZE || mtd->erasesize % PAGE_SIZE) {
printk(KERN_ERR "%s: Erase size %u not multiple of PAGE_SIZE "
"%lu\n", MTDSWAP_PREFIX, mtd->erasesize, PAGE_SIZE);
return;
}
if (PAGE_SIZE % mtd->writesize || mtd->writesize > PAGE_SIZE) {
printk(KERN_ERR "%s: PAGE_SIZE %lu not multiple of write size"
" %u\n", MTDSWAP_PREFIX, PAGE_SIZE, mtd->writesize);
return;
}
oinfo = mtd->ecclayout;
if (!oinfo) {
printk(KERN_ERR "%s: mtd%d does not have OOB\n",
MTDSWAP_PREFIX, mtd->index);
return;
}
if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
printk(KERN_ERR "%s: Not enough free bytes in OOB, "
"%d available, %zu needed.\n",
MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
return;
}
if (spare_eblocks > 100)
spare_eblocks = 100;
use_size = mtd->size;
size_limit = (uint64_t) BLOCK_MAX * PAGE_SIZE;
if (mtd->size > size_limit) {
printk(KERN_WARNING "%s: Device too large. Limiting size to "
"%llu bytes\n", MTDSWAP_PREFIX, size_limit);
use_size = size_limit;
}
eblocks = mtd_div_by_eb(use_size, mtd);
use_size = (uint64_t)eblocks * mtd->erasesize;
bad_blocks = mtdswap_badblocks(mtd, use_size);
eavailable = eblocks - bad_blocks;
if (eavailable < MIN_ERASE_BLOCKS) {
printk(KERN_ERR "%s: Not enough erase blocks. %u available, "
"%d needed\n", MTDSWAP_PREFIX, eavailable,
MIN_ERASE_BLOCKS);
return;
}
spare_cnt = div_u64((uint64_t)eavailable * spare_eblocks, 100);
if (spare_cnt < MIN_SPARE_EBLOCKS)
spare_cnt = MIN_SPARE_EBLOCKS;
if (spare_cnt > eavailable - 1)
spare_cnt = eavailable - 1;
swap_size = (uint64_t)(eavailable - spare_cnt) * mtd->erasesize +
(header ? PAGE_SIZE : 0);
printk(KERN_INFO "%s: Enabling MTD swap on device %lu, size %llu KB, "
"%u spare, %u bad blocks\n",
MTDSWAP_PREFIX, part, swap_size / 1024, spare_cnt, bad_blocks);
d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL);
if (!d)
return;
mbd_dev = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
if (!mbd_dev) {
kfree(d);
return;
}
d->mbd_dev = mbd_dev;
mbd_dev->priv = d;
mbd_dev->mtd = mtd;
mbd_dev->devnum = mtd->index;
mbd_dev->size = swap_size >> PAGE_SHIFT;
mbd_dev->tr = tr;
if (!(mtd->flags & MTD_WRITEABLE))
mbd_dev->readonly = 1;
if (mtdswap_init(d, eblocks, spare_cnt) < 0)
goto init_failed;
if (add_mtd_blktrans_dev(mbd_dev) < 0)
goto cleanup;
d->dev = disk_to_dev(mbd_dev->disk);
ret = mtdswap_add_debugfs(d);
if (ret < 0)
goto debugfs_failed;
return;
debugfs_failed:
del_mtd_blktrans_dev(mbd_dev);
cleanup:
mtdswap_cleanup(d);
init_failed:
kfree(mbd_dev);
kfree(d);
}
static void mtdswap_remove_dev(struct mtd_blktrans_dev *dev)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
debugfs_remove_recursive(d->debugfs_root);
del_mtd_blktrans_dev(dev);
mtdswap_cleanup(d);
kfree(d);
}
static struct mtd_blktrans_ops mtdswap_ops = {
.name = "mtdswap",
.major = 0,
.part_bits = 0,
.blksize = PAGE_SIZE,
.flush = mtdswap_flush,
.readsect = mtdswap_readsect,
.writesect = mtdswap_writesect,
.discard = mtdswap_discard,
.background = mtdswap_background,
.add_mtd = mtdswap_add_mtd,
.remove_dev = mtdswap_remove_dev,
.owner = THIS_MODULE,
};
static int __init mtdswap_modinit(void)
{
return register_mtd_blktrans(&mtdswap_ops);
}
static void __exit mtdswap_modexit(void)
{
deregister_mtd_blktrans(&mtdswap_ops);
}
module_init(mtdswap_modinit);
module_exit(mtdswap_modexit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
MODULE_DESCRIPTION("Block device access to an MTD suitable for using as "
"swap space");
|
{
"pile_set_name": "Github"
}
|
'''
serializers for APP
'''
import os
import copy
import uuid
from six import text_type
from saml2 import BINDING_HTTP_POST, BINDING_HTTP_REDIRECT
from saml2.config import SPConfig
from saml2.metadata import entity_descriptor
from saml2.entity_category.edugain import COC
from saml2.saml import NAME_FORMAT_URI
from saml2.sigver import CertificateError
try:
from saml2.sigver import get_xmlsec_binary
except ImportError:
get_xmlsec_binary = None
from rest_framework import serializers
from rest_framework.exceptions import ValidationError, MethodNotAllowed
from common.django.drf.serializer import DynamicFieldsModelSerializer
from oneid_meta.models import (
APP,
OAuthAPP,
OIDCAPP,
SAMLAPP,
LDAPAPP,
HTTPAPP,
Dept,
User,
)
from siteapi.v1.views.utils import gen_uid
from siteapi.v1.serializers.perm import PermWithOwnerSerializer
if get_xmlsec_binary:
xmlsec_path = get_xmlsec_binary(["/opt/local/bin", "/usr/local/bin"]) # pylint: disable=invalid-name
else:
xmlsec_path = '/usr/local/bin/xmlsec1' # pylint: disable=invalid-name
BASEDIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
class OAuthAPPSerializer(DynamicFieldsModelSerializer):
'''
Serializer for OAuthAPP
'''
class Meta: # pylint: disable=missing-docstring
model = OAuthAPP
fields = (
'client_id',
'client_secret',
'redirect_uris',
'client_type',
'authorization_grant_type',
'more_detail',
)
read_only_fields = (
'client_id',
'client_secret',
'more_detail',
)
class OIDCAPPSerializer(DynamicFieldsModelSerializer):
'''Serializer for OIDCAPP
'''
class Meta: # pylint: disable=missing-docstring
model = OIDCAPP
fields = (
'client_id',
'client_secret',
'redirect_uris',
'client_type',
'response_type',
'more_detail',
)
read_only_fields = (
'client_id',
'client_secret',
'more_detail',
)
class SAMLAPPSerializer(DynamicFieldsModelSerializer):
'''Serializer for SAMLAPP
'''
class Meta: # pylint: disable=missing-docstring
model = SAMLAPP
fields = (
'entity_id',
'acs',
'sls',
'cert',
'xmldata',
'more_detail',
)
def gen_xml(self, filename, entity_id, acs, sls): # pylint: disable=no-self-use
'''将SAMLAPP配置写入指定路径xml文件
'''
conf = SPConfig()
endpointconfig = {
"entityid": entity_id,
'entity_category': [COC],
"description": "extra SP setup",
"service": {
"sp": {
"want_response_signed": False,
"authn_requests_signed": True,
"logout_requests_signed": True,
"endpoints": {
"assertion_consumer_service": [(acs, BINDING_HTTP_POST)],
"single_logout_service": [
(sls, BINDING_HTTP_REDIRECT),
(sls.replace('redirect', 'post'), BINDING_HTTP_POST),
],
}
},
},
"key_file": BASEDIR + "/djangosaml2idp/certificates/mykey.pem", # 随便放一个私钥,并不知道SP私钥
"cert_file": BASEDIR + '/djangosaml2idp/saml2_config/sp_cert/%s.pem' % filename,
"xmlsec_binary": xmlsec_path,
"metadata": {
"local": [BASEDIR + '/djangosaml2idp/saml2_config/idp_metadata.xml']
},
"name_form": NAME_FORMAT_URI,
}
conf.load(copy.deepcopy(endpointconfig))
meta_data = entity_descriptor(conf)
content = text_type(meta_data).encode('utf-8')
with open(BASEDIR + '/djangosaml2idp/saml2_config/%s.xml' % filename, 'wb+') as f:
f.write(content)
def create(self, validated_data):
filename = uuid.uuid4()
app = validated_data['app']
xmldata = validated_data.get('xmldata', '')
entity_id = validated_data.get('entity_id', '')
cert = validated_data.get('cert', '')
acs = validated_data.get('acs', '')
sls = validated_data.get('sls', '')
if xmldata not in ['', None]:
with open(BASEDIR + '/djangosaml2idp/saml2_config/%s.xml' % filename, 'w+') as f:
f.write(xmldata)
else:
self.dump_cert(filename, cert)
try:
self.gen_xml(filename=filename, entity_id=entity_id, acs=acs, sls=sls)
except CertificateError:
raise ValidationError({'msg': 'perm incorrect'})
if os.path.exists(BASEDIR + '/djangosaml2idp/saml2_config/sp_cert/%s.pem' % filename):
os.remove(BASEDIR + '/djangosaml2idp/saml2_config/sp_cert/%s.pem' % filename)
saml_app = SAMLAPP.objects.create(app=app, xmldata=xmldata, entity_id=entity_id,\
acs=acs, sls=sls, cert=cert)
saml_app.save()
saml_app.refresh_from_db()
return saml_app
def update(self, instance, validated_data):
saml_app = instance
filename = uuid.uuid4()
xmldata = validated_data.get('xmldata', '')
cert = validated_data.get('cert', '')
entity_id = validated_data.get('entity_id', '')
acs = validated_data.get('ace', '')
sls = validated_data.get('sls', '')
kwargs = {}
if entity_id != '':
kwargs['entity_id'] = entity_id
if acs != '':
kwargs['acs'] = acs
if sls != '':
kwargs['sls'] = sls
if cert != '':
kwargs['cert'] = cert
if xmldata != '':
with open(BASEDIR + '/djangosaml2idp/saml2_config/%s.xml' % filename, 'w+') as f:
f.write(xmldata)
else:
self.dump_cert(filename, cert)
self.gen_xml(filename=filename, entity_id=entity_id, acs=acs, sls=sls)
with open(BASEDIR + '/djangosaml2idp/saml2_config/%s.xml' % filename, 'rb') as f:
xmldata = f.read()
kwargs['xmldata'] = xmldata
saml_app.__dict__.update(**kwargs)
saml_app.save()
saml_app.refresh_from_db()
return saml_app
def dump_cert(self, filename, cert): # pylint: disable=no-self-use
'''存储SP方公钥,以备生成元数据文件时用
'''
with open(BASEDIR + '/djangosaml2idp/saml2_config/sp_cert/%s.pem' % filename, 'w+') as f:
f.write(cert)
class LDAPAPPSerializer(DynamicFieldsModelSerializer):
'''
Serializer for LDAP APP
'''
class Meta: # pylint: disable=missing-docstring
model = LDAPAPP
fields = ('more_detail', )
read_only_fields = ('more_detail', )
class HTTPAPPSerializer(DynamicFieldsModelSerializer):
'''
Serializer for HTTP APP
'''
class Meta: # pylint: disable=missing-docstring
model = HTTPAPP
fields = ('more_detail', )
read_only_fields = ('more_detail', )
class APPPublicSerializer(DynamicFieldsModelSerializer):
'''
public serializer for APP
'''
app_id = serializers.IntegerField(source='id', read_only=True)
uid = serializers.CharField(required=False, help_text='默认情况下根据`name`生成')
class Meta: # pylint: disable=missing-docstring
model = APP
fields = (
"app_id",
"uid",
"name",
"index",
"logo",
"remark",
'auth_protocols',
)
class APPSerializer(DynamicFieldsModelSerializer):
'''
Serializer for APP
'''
app_id = serializers.IntegerField(source='id', read_only=True)
oauth_app = OAuthAPPSerializer(many=False, required=False, allow_null=True)
http_app = HTTPAPPSerializer(many=False, required=False, allow_null=True)
ldap_app = LDAPAPPSerializer(many=False, required=False, allow_null=True)
oidc_app = OIDCAPPSerializer(many=False, required=False)
saml_app = SAMLAPPSerializer(many=False, required=False, allow_null=True)
uid = serializers.CharField(required=False, help_text='默认情况下根据`name`生成')
class Meta: # pylint: disable=missing-docstring
model = APP
fields = (
"app_id",
"uid",
"name",
"index",
"logo",
"remark",
"oauth_app",
"ldap_app",
"http_app",
"oidc_app",
"saml_app",
"allow_any_user",
'auth_protocols',
)
def validate_uid(self, value):
'''
校验uid唯一
'''
exclude = {'pk': self.instance.pk} if self.instance else {}
if self.Meta.model.valid_objects.filter(uid=value).exclude(**exclude).exists():
raise ValidationError(['this value has be used'])
return value
def create(self, validated_data):
'''
create app
create oauth_app if provided
create oidc_app if provided
create http_app if provided
create saml_app if provided
'''
if 'uid' not in validated_data:
validated_data['uid'] = gen_uid(validated_data['name'], cls=self.Meta.model)
oauth_app_data = validated_data.pop('oauth_app', None)
oidc_app_data = validated_data.pop('oidc_app', None)
saml_app_data = validated_data.pop('saml_app', None)
ldap_app_data = validated_data.pop('ldap_app', None)
http_app_data = validated_data.pop('http_app', None)
app = APP.objects.create(**validated_data)
if oauth_app_data is not None:
oauth_app_serializer = OAuthAPPSerializer(data=oauth_app_data)
oauth_app_serializer.is_valid(raise_exception=True)
oauth_app_serializer.save(app=app, name=app.name)
if ldap_app_data is not None:
serializer = LDAPAPPSerializer(data=ldap_app_data)
serializer.is_valid(raise_exception=True)
serializer.save(app=app)
if http_app_data is not None:
serializer = HTTPAPPSerializer(data=http_app_data)
serializer.is_valid(raise_exception=True)
serializer.save(app=app)
if oidc_app_data is not None:
oidc_app_serializer = OIDCAPPSerializer(data=oidc_app_data)
oidc_app_serializer.is_valid(raise_exception=True)
oidc_app_serializer.save(app=app, name=app.name)
if saml_app_data is not None:
saml_app_data['app'] = app
serializer = SAMLAPPSerializer(data=saml_app_data)
serializer.is_valid(raise_exception=True)
serializer.save(app=app)
return app
# TODO: support update name of app
def update(self, instance, validated_data): # pylint: disable=too-many-branches, too-many-statements
'''
update app
update/create oauth_app if provided
update/create oidc_app if provided
update/create saml_app if provided
'''
app = instance
if not app.editable:
raise MethodNotAllowed('MODIFY protected APP')
oidc_app_data = validated_data.pop('oidc_app', None)
uid = validated_data.pop('uid', '')
if uid and uid != app.uid:
raise ValidationError({'uid': ['this field is immutable']})
if 'oauth_app' in validated_data:
oauth_app_data = validated_data.pop('oauth_app')
if oauth_app_data is None:
if hasattr(app, 'oauth_app'):
instance = app.oauth_app
instance.delete()
else:
if hasattr(app, 'oauth_app'):
serializer = OAuthAPPSerializer(app.oauth_app, data=oauth_app_data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
else:
serializer = OAuthAPPSerializer(data=oauth_app_data)
serializer.is_valid(raise_exception=True)
serializer.save(app=app)
if 'ldap_app' in validated_data:
data = validated_data.pop('ldap_app')
if data is None:
if hasattr(app, 'ldap_app'):
instance = app.ldap_app
instance.delete()
else:
if hasattr(app, 'ldap_app'):
serializer = LDAPAPPSerializer(app.ldap_app, data=data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
else:
serializer = LDAPAPPSerializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save(app=app)
if 'http_app' in validated_data:
data = validated_data.pop('http_app')
if data is None:
if hasattr(app, 'http_app'):
instance = app.http_app
instance.delete()
else:
if hasattr(app, 'http_app'):
serializer = HTTPAPPSerializer(app.http_app, data=data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
else:
serializer = HTTPAPPSerializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save(app=app)
if oidc_app_data:
pass
if 'saml_app' in validated_data:
data = validated_data.pop('saml_app')
if data is None:
if hasattr(app, 'saml_app'):
instance = app.saml_app
instance.delete()
else:
if hasattr(app, 'saml_app'):
serializer = SAMLAPPSerializer(app.saml_app, data=data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
else:
serializer = SAMLAPPSerializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save(app=app)
app.__dict__.update(validated_data)
app.save()
app.refresh_from_db()
return app
class APPWithAccessSerializer(APPSerializer):
'''
Serializer for APP with access perm
'''
access_perm = PermWithOwnerSerializer(required=False, read_only=True)
class Meta: # pylint: disable=missing-docstring
model = APP
fields = (
"app_id",
"uid",
"name",
"logo",
"remark",
"oauth_app",
"oidc_app",
"saml_app",
"ldap_app",
"http_app",
"index",
"allow_any_user",
'access_perm',
'auth_protocols',
)
class APPWithAccessOwnerSerializer(APPWithAccessSerializer):
'''
Serializer for APP with access perm
'''
access_result = serializers.SerializerMethodField()
class Meta: # pylint: disable=missing-docstring
model = APP
fields = (
"app_id",
"uid",
"name",
"logo",
"remark",
"oauth_app",
"oidc_app",
"saml_app",
"ldap_app",
"http_app",
"index",
"allow_any_user",
'access_perm',
'auth_protocols',
'access_result',
)
def get_access_result(self, instance):
'''
某个节点或人,有无访问该应用的权限
'''
request = self.context['request']
owner = None
node_uid = request.query_params.get('node_uid', '')
if node_uid:
node, _ = Dept.retrieve_node(node_uid)
if not node:
raise ValidationError({'node_uid': ['not found']})
owner = node
user_uid = request.query_params.get('user_uid', '')
if user_uid:
user = User.valid_objects.filter(username=user_uid).first()
if not user:
raise ValidationError({'user_uid': ['not found']})
owner = user
return {
'node_uid': node_uid,
'user_uid': user_uid,
'value': owner.owner_perm_cls.get(perm=instance.access_perm, owner=owner).value if owner else False,
}
|
{
"pile_set_name": "Github"
}
|
// @flow weak
import webpack from 'webpack'
import baseConfig from './baseConfig'
export default {
...baseConfig,
module: {
rules: [
...baseConfig.module.rules.map(rule => {
if (rule.use === 'babel-loader') {
return {
...rule,
options: {
presets: [
[
'es2015',
{
modules: false,
},
],
],
},
}
}
return rule
}),
],
},
plugins: [
...baseConfig.plugins,
new webpack.DefinePlugin({
'process.env': {
NODE_ENV: JSON.stringify('production'),
},
}),
],
}
|
{
"pile_set_name": "Github"
}
|
<VisualStudioProject ProjectType="Visual C++" Version="8.00" Name="db_sql_shell" ProjectGUID="{A5DB89F0-06E5-11DF-8A39-0800200C9A66}">
<Platforms>
<Platform Name="Win32"/>
<Platform Name="x64"/>
</Platforms>
<Configurations>
<Configuration Name="Debug|Win32" OutputDirectory="..\..\build_windows\$(PlatformName)\Debug" IntermediateDirectory="./$(OutDir)/db_sql_shell" ConfigurationType="1" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" CharacterSet="2">
<Tool Name="VCPreLinkEventTool"/>
<Tool Name="VCResourceCompilerTool"/>
<Tool Name="VCXMLDataGeneratorTool"/>
<Tool Name="VCManagedWrapperGeneratorTool"/>
<Tool Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
<Tool Name="VCCLCompilerTool" Optimization="0" MinimalRebuild="TRUE" AdditionalIncludeDirectories="../../build_windows,../../src,../../src/dbinc,../../include,../../lang/sql/generated,../../lang/sql/adapter,../../lang/sql/sqlite/src,../../lang/sql/sqlite/ext/fts3,../../lang/sql/sqlite/ext/rtree" PreprocessorDefinitions="WIN32;_WINDOWS;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_DEBUG;DIAGNOSTIC;WIN32;_CONSOLE;SQLITE_SERVER;SQLITE_PRIVATE;SQLITE_CORE;SQLITE_THREADSAFE=1;_CRT_NONSTDC_NO_DEPRECATE_UNICODE;UNICODE;SQLITE_DEBUG;SQLITE_MEMDEBUG;;_CONSOLE" StringPooling="TRUE" RuntimeLibrary="3" UsePrecompiledHeader="0" PrecompiledHeaderFile="./$(OutDir)/db_sql_shell.pch" AssemblerListingLocation="$(OutDir)/dbsql/" ObjectFile="$(OutDir)/dbsql/" WarningLevel="3" SuppressStartupBanner="TRUE" DebugInformationFormat="3" BasicRuntimeChecks="3" CompileAs="0"/>
<Tool Name="VCLinkerTool" AdditionalOptions="/machine:x86" AdditionalDependencies="libdb61d.lib libdb_sql61d.lib" AdditionalLibraryDirectories="$(OutDir);$(OutDir);../../lib" OutputFile="$(OutDir)/dbsql.exe" ProgramDatabaseFile="$(OutDir)/dbsql.pdb" LinkIncremental="1" GenerateDebugInformation="TRUE" SuppressStartupBanner="TRUE" OptimizeReferences="2" TargetMachine="0"/>
<Tool Name="VCPreBuildEventTool"/>
<Tool Name="VCPostBuildEventTool"/>
<Tool Name="VCCustomBuildTool"/></Configuration>
<Configuration Name="Release|Win32" OutputDirectory="..\..\build_windows\$(PlatformName)\Release" IntermediateDirectory="./$(OutDir)/db_sql_shell" ConfigurationType="1" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" CharacterSet="2">
<Tool Name="VCPreLinkEventTool"/>
<Tool Name="VCResourceCompilerTool"/>
<Tool Name="VCXMLDataGeneratorTool"/>
<Tool Name="VCManagedWrapperGeneratorTool"/>
<Tool Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
<Tool Name="VCCLCompilerTool" Optimization="2" MinimalRebuild="TRUE" InlineFunctionExpansion="1" AdditionalIncludeDirectories="../../build_windows,../../src,../../src/dbinc,../../include,../../lang/sql/generated,../../lang/sql/adapter,../../lang/sql/sqlite/src,../../lang/sql/sqlite/ext/fts3,../../lang/sql/sqlite/ext/rtree" PreprocessorDefinitions="WIN32;_WINDOWS;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;NDEBUG;WIN32;_CONSOLE;SQLITE_SERVER;SQLITE_PRIVATE;SQLITE_CORE;SQLITE_THREADSAFE=1;_CRT_NONSTDC_NO_DEPRECATE_UNICODE;UNICODE;_CONSOLE" StringPooling="TRUE" RuntimeLibrary="2" EnableFunctionLevelLinking="TRUE" UsePrecompiledHeader="0" PrecompiledHeaderFile="./$(OutDir)/db_sql_shell.pch" AssemblerListingLocation="$(OutDir)/dbsql/" ObjectFile="$(OutDir)/dbsql/" WarningLevel="3" SuppressStartupBanner="TRUE" DebugInformationFormat="3" CompileAs="0"/>
<Tool Name="VCLinkerTool" AdditionalOptions="/machine:x86" AdditionalDependencies="libdb61.lib libdb_sql61.lib" AdditionalLibraryDirectories="$(OutDir);$(OutDir);../../lib" OutputFile="$(OutDir)/dbsql.exe" ProgramDatabaseFile="$(OutDir)/dbsql.pdb" LinkIncremental="1" GenerateDebugInformation="TRUE" SuppressStartupBanner="TRUE" OptimizeReferences="2" TargetMachine="0"/>
<Tool Name="VCPreBuildEventTool"/>
<Tool Name="VCPostBuildEventTool"/>
<Tool Name="VCCustomBuildTool"/></Configuration>
<Configuration Name="Debug|x64" OutputDirectory="..\..\build_windows\$(PlatformName)\Debug" IntermediateDirectory="./$(OutDir)/db_sql_shell" ConfigurationType="1" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" CharacterSet="2">
<Tool Name="VCPreLinkEventTool"/>
<Tool Name="VCResourceCompilerTool"/>
<Tool Name="VCXMLDataGeneratorTool"/>
<Tool Name="VCManagedWrapperGeneratorTool"/>
<Tool Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
<Tool Name="VCCLCompilerTool" Optimization="0" MinimalRebuild="TRUE" AdditionalIncludeDirectories="../../build_windows,../../src,../../src/dbinc,../../include,../../lang/sql/generated,../../lang/sql/adapter,../../lang/sql/sqlite/src,../../lang/sql/sqlite/ext/fts3,../../lang/sql/sqlite/ext/rtree" PreprocessorDefinitions="WIN32;_WINDOWS;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_DEBUG;DIAGNOSTIC;WIN32;_CONSOLE;SQLITE_SERVER;SQLITE_PRIVATE;SQLITE_CORE;SQLITE_THREADSAFE=1;_CRT_NONSTDC_NO_DEPRECATE_UNICODE;UNICODE;SQLITE_DEBUG;SQLITE_MEMDEBUG;;_CONSOLE" StringPooling="TRUE" RuntimeLibrary="3" UsePrecompiledHeader="0" PrecompiledHeaderFile="./$(OutDir)/db_sql_shell.pch" AssemblerListingLocation="$(OutDir)/dbsql/" ObjectFile="$(OutDir)/dbsql/" WarningLevel="3" SuppressStartupBanner="TRUE" DebugInformationFormat="3" BasicRuntimeChecks="3" CompileAs="0"/>
<Tool Name="VCLinkerTool" AdditionalOptions="/machine:x64" AdditionalDependencies="libdb61d.lib libdb_sql61d.lib" AdditionalLibraryDirectories="$(OutDir);$(OutDir);../../lib" OutputFile="$(OutDir)/dbsql.exe" ProgramDatabaseFile="$(OutDir)/dbsql.pdb" LinkIncremental="1" GenerateDebugInformation="TRUE" SuppressStartupBanner="TRUE" OptimizeReferences="2" TargetMachine="0"/>
<Tool Name="VCPreBuildEventTool"/>
<Tool Name="VCPostBuildEventTool"/>
<Tool Name="VCCustomBuildTool"/></Configuration>
<Configuration Name="Release|x64" OutputDirectory="..\..\build_windows\$(PlatformName)\Release" IntermediateDirectory="./$(OutDir)/db_sql_shell" ConfigurationType="1" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" CharacterSet="2">
<Tool Name="VCPreLinkEventTool"/>
<Tool Name="VCResourceCompilerTool"/>
<Tool Name="VCXMLDataGeneratorTool"/>
<Tool Name="VCManagedWrapperGeneratorTool"/>
<Tool Name="VCAuxiliaryManagedWrapperGeneratorTool"/>
<Tool Name="VCCLCompilerTool" Optimization="2" MinimalRebuild="TRUE" InlineFunctionExpansion="1" AdditionalIncludeDirectories="../../build_windows,../../src,../../src/dbinc,../../include,../../lang/sql/generated,../../lang/sql/adapter,../../lang/sql/sqlite/src,../../lang/sql/sqlite/ext/fts3,../../lang/sql/sqlite/ext/rtree" PreprocessorDefinitions="WIN32;_WINDOWS;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;NDEBUG;WIN32;_CONSOLE;SQLITE_SERVER;SQLITE_PRIVATE;SQLITE_CORE;SQLITE_THREADSAFE=1;_CRT_NONSTDC_NO_DEPRECATE_UNICODE;UNICODE;_CONSOLE" StringPooling="TRUE" RuntimeLibrary="2" EnableFunctionLevelLinking="TRUE" UsePrecompiledHeader="0" PrecompiledHeaderFile="./$(OutDir)/db_sql_shell.pch" AssemblerListingLocation="$(OutDir)/dbsql/" ObjectFile="$(OutDir)/dbsql/" WarningLevel="3" SuppressStartupBanner="TRUE" DebugInformationFormat="3" CompileAs="0"/>
<Tool Name="VCLinkerTool" AdditionalOptions="/machine:x64" AdditionalDependencies="libdb61.lib libdb_sql61.lib" AdditionalLibraryDirectories="$(OutDir);$(OutDir);../../lib" OutputFile="$(OutDir)/dbsql.exe" ProgramDatabaseFile="$(OutDir)/dbsql.pdb" LinkIncremental="1" GenerateDebugInformation="TRUE" SuppressStartupBanner="TRUE" OptimizeReferences="2" TargetMachine="0"/>
<Tool Name="VCPreBuildEventTool"/>
<Tool Name="VCPostBuildEventTool"/>
<Tool Name="VCCustomBuildTool"/></Configuration>
</Configurations>
<References/>
<Files>
<File RelativePath="..\..\lang\sql\sqlite\src\shell.c"/>
</Files>
<Globals/>
</VisualStudioProject>
|
{
"pile_set_name": "Github"
}
|
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
//#include <stdint.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include "ionutils.h"
#include "ipcsocket.h"
void write_buffer(void *buffer, unsigned long len)
{
int i;
unsigned char *ptr = (unsigned char *)buffer;
if (!ptr) {
fprintf(stderr, "<%s>: Invalid buffer...\n", __func__);
return;
}
printf("Fill buffer content:\n");
memset(ptr, 0xfd, len);
for (i = 0; i < len; i++)
printf("0x%x ", ptr[i]);
printf("\n");
}
void read_buffer(void *buffer, unsigned long len)
{
int i;
unsigned char *ptr = (unsigned char *)buffer;
if (!ptr) {
fprintf(stderr, "<%s>: Invalid buffer...\n", __func__);
return;
}
printf("Read buffer content:\n");
for (i = 0; i < len; i++)
printf("0x%x ", ptr[i]);
printf("\n");
}
int ion_export_buffer_fd(struct ion_buffer_info *ion_info)
{
int i, ret, ionfd, buffer_fd;
unsigned int heap_id;
unsigned long maplen;
unsigned char *map_buffer;
struct ion_allocation_data alloc_data;
struct ion_heap_query query;
struct ion_heap_data heap_data[MAX_HEAP_COUNT];
if (!ion_info) {
fprintf(stderr, "<%s>: Invalid ion info\n", __func__);
return -1;
}
/* Create an ION client */
ionfd = open(ION_DEVICE, O_RDWR);
if (ionfd < 0) {
fprintf(stderr, "<%s>: Failed to open ion client: %s\n",
__func__, strerror(errno));
return -1;
}
memset(&query, 0, sizeof(query));
query.cnt = MAX_HEAP_COUNT;
query.heaps = (unsigned long int)&heap_data[0];
/* Query ION heap_id_mask from ION heap */
ret = ioctl(ionfd, ION_IOC_HEAP_QUERY, &query);
if (ret < 0) {
fprintf(stderr, "<%s>: Failed: ION_IOC_HEAP_QUERY: %s\n",
__func__, strerror(errno));
goto err_query;
}
heap_id = MAX_HEAP_COUNT + 1;
for (i = 0; i < query.cnt; i++) {
if (heap_data[i].type == ion_info->heap_type) {
heap_id = heap_data[i].heap_id;
break;
}
}
if (heap_id > MAX_HEAP_COUNT) {
fprintf(stderr, "<%s>: ERROR: heap type does not exists\n",
__func__);
goto err_heap;
}
alloc_data.len = ion_info->heap_size;
alloc_data.heap_id_mask = 1 << heap_id;
alloc_data.flags = ion_info->flag_type;
/* Allocate memory for this ION client as per heap_type */
ret = ioctl(ionfd, ION_IOC_ALLOC, &alloc_data);
if (ret < 0) {
fprintf(stderr, "<%s>: Failed: ION_IOC_ALLOC: %s\n",
__func__, strerror(errno));
goto err_alloc;
}
/* This will return a valid buffer fd */
buffer_fd = alloc_data.fd;
maplen = alloc_data.len;
if (buffer_fd < 0 || maplen <= 0) {
fprintf(stderr, "<%s>: Invalid map data, fd: %d, len: %ld\n",
__func__, buffer_fd, maplen);
goto err_fd_data;
}
/* Create memory mapped buffer for the buffer fd */
map_buffer = (unsigned char *)mmap(NULL, maplen, PROT_READ|PROT_WRITE,
MAP_SHARED, buffer_fd, 0);
if (map_buffer == MAP_FAILED) {
fprintf(stderr, "<%s>: Failed: mmap: %s\n",
__func__, strerror(errno));
goto err_mmap;
}
ion_info->ionfd = ionfd;
ion_info->buffd = buffer_fd;
ion_info->buffer = map_buffer;
ion_info->buflen = maplen;
return 0;
munmap(map_buffer, maplen);
err_fd_data:
err_mmap:
/* in case of error: close the buffer fd */
if (buffer_fd)
close(buffer_fd);
err_query:
err_heap:
err_alloc:
/* In case of error: close the ion client fd */
if (ionfd)
close(ionfd);
return -1;
}
int ion_import_buffer_fd(struct ion_buffer_info *ion_info)
{
int buffd;
unsigned char *map_buf;
unsigned long map_len;
if (!ion_info) {
fprintf(stderr, "<%s>: Invalid ion info\n", __func__);
return -1;
}
map_len = ion_info->buflen;
buffd = ion_info->buffd;
if (buffd < 0 || map_len <= 0) {
fprintf(stderr, "<%s>: Invalid map data, fd: %d, len: %ld\n",
__func__, buffd, map_len);
goto err_buffd;
}
map_buf = (unsigned char *)mmap(NULL, map_len, PROT_READ|PROT_WRITE,
MAP_SHARED, buffd, 0);
if (map_buf == MAP_FAILED) {
printf("<%s>: Failed - mmap: %s\n",
__func__, strerror(errno));
goto err_mmap;
}
ion_info->buffer = map_buf;
ion_info->buflen = map_len;
return 0;
err_mmap:
if (buffd)
close(buffd);
err_buffd:
return -1;
}
void ion_close_buffer_fd(struct ion_buffer_info *ion_info)
{
if (ion_info) {
/* unmap the buffer properly in the end */
munmap(ion_info->buffer, ion_info->buflen);
/* close the buffer fd */
if (ion_info->buffd > 0)
close(ion_info->buffd);
/* Finally, close the client fd */
if (ion_info->ionfd > 0)
close(ion_info->ionfd);
}
}
int socket_send_fd(struct socket_info *info)
{
int status;
int fd, sockfd;
struct socketdata skdata;
if (!info) {
fprintf(stderr, "<%s>: Invalid socket info\n", __func__);
return -1;
}
sockfd = info->sockfd;
fd = info->datafd;
memset(&skdata, 0, sizeof(skdata));
skdata.data = fd;
skdata.len = sizeof(skdata.data);
status = sendtosocket(sockfd, &skdata);
if (status < 0) {
fprintf(stderr, "<%s>: Failed: sendtosocket\n", __func__);
return -1;
}
return 0;
}
int socket_receive_fd(struct socket_info *info)
{
int status;
int fd, sockfd;
struct socketdata skdata;
if (!info) {
fprintf(stderr, "<%s>: Invalid socket info\n", __func__);
return -1;
}
sockfd = info->sockfd;
memset(&skdata, 0, sizeof(skdata));
status = receivefromsocket(sockfd, &skdata);
if (status < 0) {
fprintf(stderr, "<%s>: Failed: receivefromsocket\n", __func__);
return -1;
}
fd = (int)skdata.data;
info->datafd = fd;
return status;
}
|
{
"pile_set_name": "Github"
}
|
Welcome and Introduction to HTCondor
====================================
:index:`user manual<single: user manual; HTCondor>` :index:`user manual`
HTCondor is developed by the Center for High Throughput Computing at the
University of Wisconsin-Madison (UW-Madison), and was first installed as
a production system in the UW-Madison Computer Sciences department in
the 1990s. HTCondor pools have since served as a major source of
computing cycles to thousands of campuses, labs, organizations and
commercial entities. For many, it has revolutionized the role computing
plays in their research. An increase of several orders of magnitude in
the computing throughput of a organization can have a profound impact on
their results.
HTCondor is a specialized batch system :index:`batch system` for managing
compute-intensive jobs. HTCondor provides a queuing mechanism, scheduling
policy, priority scheme, and resource classifications. Users submit
their compute jobs to HTCondor, HTCondor puts the jobs in a queue, runs
them, and then informs the user as to the result.
Batch systems normally operate only with dedicated machines. Often
termed worker nodes, these dedicated machines are typically owned by
one group and dedicated to the sole purpose of running compute
jobs. HTCondor can schedule jobs on dedicated machines. But unlike
traditional batch systems, HTCondor is also designed to run jobs
on machines shared and used by other systems or people. By being directed to run
jobs on machines which are currently idle, HTCondor can effectively harness
all machines throughout a campus. This is important
because often an organization has more latent, idle computers
than any single department or group otherwise has access to.
|
{
"pile_set_name": "Github"
}
|
/****************************************************************************************
Copyright (C) 2015 Autodesk, Inc.
All rights reserved.
Use of this software is subject to the terms of the Autodesk license agreement
provided at the time of installation or download, or which otherwise accompanies
this software in either electronic or hard copy form.
****************************************************************************************/
//! \file fbxcontrolset.h
#ifndef _FBXSDK_SCENE_CONSTRAINT_CONTROL_SET_H_
#define _FBXSDK_SCENE_CONSTRAINT_CONTROL_SET_H_
#include <fbxsdk/fbxsdk_def.h>
#include <fbxsdk/scene/constraint/fbxcharacter.h>
#include <fbxsdk/fbxsdk_nsbegin.h>
class FbxControlSetPlug;
/** \class FbxControlSetLink
*
* \brief This class represents a link between a given character's FK node and the associated node in the character hierarchy.
*
*/
class FBXSDK_DLL FbxControlSetLink
{
public:
//! Default constructor.
FbxControlSetLink();
/** Copy constructor.
* \param pControlSetLink Given object.
*/
FbxControlSetLink(const FbxControlSetLink& pControlSetLink);
/** Assignment operator.
* \param pControlSetLink Another FbxControlSetLink object assigned to this one.
*/
FbxControlSetLink& operator=(const FbxControlSetLink& pControlSetLink);
/** Reset to default values.
*
* Member mNode is set to \c NULL and member mTemplateName is cleared.
*/
void Reset();
//! The character's node in a hierarchy linked to this control set link.
FbxNode* mNode;
//! A template name is a naming convention that is used to automatically map
//! the nodes of other skeletons that use the same naming convention for automatic characterization.
FbxString mTemplateName;
};
/**
* An effector wraps a character node (FbxNode) used to animate its control rig (FbxControlSet) via inverse kinematics.
*/
class FBXSDK_DLL FbxEffector
{
public:
enum ESetId
{
eDefaultSet,
eAux1Set,
eAux2Set,
eAux3Set,
eAux4Set,
eAux5Set,
eAux6Set,
eAux7Set,
eAux8Set,
eAux9Set,
eAux10Set,
eAux11Set,
eAux12Set,
eAux13Set,
eAux14Set,
eSetIdCount
};
enum ENodeId
{
eHips,
eLeftAnkle,
eRightAnkle,
eLeftWrist,
eRightWrist,
eLeftKnee,
eRightKnee,
eLeftElbow,
eRightElbow,
eChestOrigin,
eChestEnd,
eLeftFoot,
eRightFoot,
eLeftShoulder,
eRightShoulder,
eHead,
eLeftHip,
eRightHip,
eLeftHand,
eRightHand,
eLeftHandThumb,
eLeftHandIndex,
eLeftHandMiddle,
eLeftHandRing,
eLeftHandPinky,
eLeftHandExtraFinger,
eRightHandThumb,
eRightHandIndex,
eRightHandMiddle,
eRightHandRing,
eRightHandPinky,
eRightHandExtraFinger,
eLeftFootThumb,
eLeftFootIndex,
eLeftFootMiddle,
eLeftFootRing,
eLeftFootPinky,
eLeftFootExtraFinger,
eRightFootThumb,
eRightFootIndex,
eRightFootMiddle,
eRightFootRing,
eRightFootPinky,
eRightFootExtraFinger,
eNodeIdCount,
eNodeIdInvalid=-1
};
//! Default constructor with uninitialized character node.
FbxEffector();
/** Assignment operator.
* \param pEffector Another FbxEffector assigned to this one.
*/
FbxEffector& operator=(const FbxEffector& pEffector);
/** Reset to default values.
* - mNode is set to NULL.
* - mShow is set to true.
*/
void Reset();
//! The character's node in a hierarchy linked to this effector.
FbxNode* mNode;
//! \c true if the effector is visible, \c false if hidden
bool mShow;
/*****************************************************************************************************************************
** WARNING! Anything beyond these lines is for internal use, may not be documented and is subject to change without notice! **
*****************************************************************************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//These members are for backward compatibility and should not be used.
//These properties are now published through class FbxControlSetPlug.
bool mTActive;
bool mRActive;
bool mCandidateTActive;
bool mCandidateRActive;
#endif /* !DOXYGEN_SHOULD_SKIP_THIS *****************************************************************************************/
};
/** \class FbxControlSet
*
* This class contains all methods to either set-up an exported control rig or query information on an imported control rig.
* A Control rig is a character manipulation tool that lets you change the position and orientation
* of a character to create or alter animation.
*
* This class also contains some methods to manipulate the FbxEffector and FbxControlSetLink.
*
* The FbxControlSet class contains FK rig (Forward Kinematics) and IK rig (Inverse Kinematics) animation. The FK rig is represented
* by a list of nodes while the IK rig is represented by a list of effectors.
*
* You can access the FK rig with the FbxControlSetLink class, using the functions FbxControlSet::SetControlSetLink() and FbxControlSet::GetControlSetLink().
*
* You can access the IK rig with the FbxEffector class, using the functions FbxControlSet::SetEffector() and FbxControlSet::GetEffector().
*
* \see FbxEffector, FbxControlSetLink
*/
class FBXSDK_DLL FbxControlSet
{
public:
/** Reset to default values.
* Reset all effector and control set links.
*/
void Reset();
/** \enum EType Control rig type.
* - \e eNone No Control rig.
* - \e eFkIk Both an FK rig and IK rig.
* - \e eIkOnly Only an IK rig.
*/
enum EType
{
eNone,
eFkIk,
eIkOnly
};
/** Set type as given.
* \param pType The given type.
*/
void SetType(EType pType);
/** Get type.
* \return The gotten type.
*/
EType GetType() const;
/** Set use axis flag as given.
* \param pUseAxis The given use axis flag.
*/
void SetUseAxis(bool pUseAxis);
/** Get use axis flag.
* \return The gotten use axis flag.
*/
bool GetUseAxis() const;
/** Set lock transform flag as given.
* \param pLockTransform The given lock transform flag.
*/
void SetLockTransform(bool pLockTransform);
/** Get lock transform flag.
* \return The gotten lock transform flag.
*/
bool GetLockTransform()const;
/** Set lock 3D pick flag as given.
* \param pLock3DPick The given lock 3D pick flag.
*/
void SetLock3DPick(bool pLock3DPick);
/** Get lock 3D pick flag.
* \return The gotten lock 3D pick flag.
*/
bool GetLock3DPick() const;
/** Set a control set link for a character node ID.
* \param pCharacterNodeId Character node ID.
* \param pControlSetLink Control set link to be associated with the Character node ID.
* \return \c true if successful, \c false otherwise.
* \remarks You should avoid setting a control set link for
* eCharacterLeftFloor, eCharacterRightFloor, eCharacterLeftHandFloor, eCharacterRightHandFloor,
* eCharacterProps0, eCharacterProps1, eCharacterProps2, eCharacterProps3 or eCharacterProps4.
* None of these nodes are part of a control set.
*/
bool SetControlSetLink(FbxCharacter::ENodeId pCharacterNodeId, const FbxControlSetLink& pControlSetLink);
/** Get the control set link associated with a character node ID.
* \param pCharacterNodeId Requested character node ID.
* \param pControlSetLink Optional pointer that returns the control set link if the function succeeds.
* \return \c true if successful, \c false otherwise.
* \remarks You should avoid getting a control set link for
* eCharacterLeftFloor, eCharacterRightFloor, eCharacterLeftHandFloor, eCharacterRightHandFloor,
* eCharacterProps0, eCharacterProps1, eCharacterProps2, eCharacterProps3 or eCharacterProps4.
* None of these nodes are part of a control set.
*/
bool GetControlSetLink(FbxCharacter::ENodeId pCharacterNodeId, FbxControlSetLink* pControlSetLink = NULL) const;
/** Set an effector node for an effector node ID.
* \param pEffectorNodeId Effector node ID.
* \param pEffector Effector to be associated with the effector node ID.
* \return \c true if successful, \c false otherwise.
*/
bool SetEffector(FbxEffector::ENodeId pEffectorNodeId, FbxEffector pEffector);
/** Get the effector associated with an effector node ID.
* \param pEffectorNodeId ID of requested effector node.
* \param pEffector Optional pointer that returns the effector if the function succeeds.
* \return \c true if successful, \c false otherwise.
*/
bool GetEffector(FbxEffector::ENodeId pEffectorNodeId, FbxEffector* pEffector = NULL);
/** Set an auxiliary effector node for an effector node ID.
* \param pEffectorNodeId Effector node ID.
* \param pNode Auxiliary effector node to be associated with the effector node ID.
* \param pEffectorSetId Effector set ID. Set to FbxEffector::eAux1Set by default.
* \return \c true if successful, \c false otherwise.
*/
bool SetEffectorAux(FbxEffector::ENodeId pEffectorNodeId, FbxNode* pNode, FbxEffector::ESetId pEffectorSetId=FbxEffector::eAux1Set);
/** Get the auxiliary effector associated with an effector node ID.
* \param pEffectorNodeId ID of requested auxiliary effector node.
* \param pNode Optional pointer that returns the auxiliary effector node if the function succeeds.
* \param pEffectorSetId Effector set ID. Set to FbxEffector::eAux1Set by default.
* \return \c true if successful, \c false otherwise.
*/
bool GetEffectorAux(FbxEffector::ENodeId pEffectorNodeId, FbxNode** pNode=NULL, FbxEffector::ESetId pEffectorSetId=FbxEffector::eAux1Set) const;
/** Get the name associated with an effector node ID.
* \param pEffectorNodeId Effector node ID.
* \return Name associated with the effector node ID.
*/
static char* GetEffectorNodeName(FbxEffector::ENodeId pEffectorNodeId);
/** Get ID associated with an effector node name.
* \param pEffectorNodeName Effector node name.
* \return Effector node ID associated with the given effector node name, or FbxEffector::eNodeIdInvalid (-1) if
* no effector node with pEffectorNodeName exists.
*/
static FbxEffector::ENodeId GetEffectorNodeId(char* pEffectorNodeName);
/*****************************************************************************************************************************
** WARNING! Anything beyond these lines is for internal use, may not be documented and is subject to change without notice! **
*****************************************************************************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_THIS
void FromPlug(FbxControlSetPlug *pPlug);
void ToPlug(FbxControlSetPlug *pPlug);
private:
FbxControlSet();
~FbxControlSet();
FbxCharacter* mCharacter;
EType mType;
bool mUseAxis;
bool mLockTransform;
bool mLock3DPick;
FbxControlSetLink mControlSetLink[FbxCharacter::eNodeIdCount]; // Except floor node IDs!
FbxEffector mEffector[FbxEffector::eNodeIdCount];
FbxNode* mEffectorAux[FbxEffector::eNodeIdCount][FbxEffector::eSetIdCount-1];
FBXSDK_FRIEND_NEW();
friend class FbxCharacter;
friend class FbxNode;
#endif /* !DOXYGEN_SHOULD_SKIP_THIS *****************************************************************************************/
};
/** Plug class for control set.
* \nosubgrouping
*/
class FBXSDK_DLL FbxControlSetPlug : public FbxObject
{
FBXSDK_OBJECT_DECLARE(FbxControlSetPlug, FbxObject);
public:
//! EType property of control set.
FbxPropertyT<FbxControlSet::EType> ControlSetType;
//! Use axis flag.
FbxPropertyT<FbxBool> UseAxis;
//! Reference character.
FbxPropertyT<FbxReference> Character;
protected:
virtual void Construct(const FbxObject* pFrom);
virtual void ConstructProperties(bool pForceSet);
virtual FbxStringList GetTypeFlags() const;
private:
FbxArray<FbxProperty> mFKBuf;
FbxArray<FbxProperty> mIKBuf;
friend class FbxScene;
friend class FbxControlSet;
};
inline EFbxType FbxTypeOf(const FbxControlSet::EType&){ return eFbxEnum; }
#include <fbxsdk/fbxsdk_nsend.h>
#endif /* _FBXSDK_SCENE_CONSTRAINT_CONTROL_SET_H_ */
|
{
"pile_set_name": "Github"
}
|
Imports System.Text.RegularExpressions
Imports System.Text
Imports StaxRip.UI
<Serializable()>
Public MustInherit Class AudioProfile
Inherits Profile
Property Language As New Language
Property Delay As Integer
Property Depth As Integer = 24
Property StreamName As String = ""
Property Gain As Single
Property Streams As List(Of AudioStream) = New List(Of AudioStream)
Property [Default] As Boolean
Property Forced As Boolean
Property ExtractDTSCore As Boolean
Property Decoder As AudioDecoderMode
Property DecodingMode As AudioDecodingMode
Overridable Property Channels As Integer = 6
Overridable Property OutputFileType As String = "unknown"
Overridable Property Bitrate As Double
Overridable Property SupportedInput As String()
Overridable Property CommandLines As String
Sub New(name As String)
MyBase.New(name)
End Sub
Sub New(name As String,
bitrate As Integer,
input As String(),
fileType As String,
channels As Integer)
MyBase.New(name)
Me.Channels = channels
Me.Bitrate = bitrate
SupportedInput = input
OutputFileType = fileType
End Sub
Private FileValue As String = ""
Property File As String
Get
Return FileValue
End Get
Set(value As String)
If FileValue <> value Then
FileValue = value
Stream = Nothing
OnFileChanged()
End If
End Set
End Property
Private StreamValue As AudioStream
Property Stream As AudioStream
Get
Return StreamValue
End Get
Set(value As AudioStream)
If Not value Is StreamValue Then
StreamValue = value
If Not Stream Is Nothing Then
If Not p.Script.GetFilter("Source").Script.Contains("DirectShowSource") Then
Delay = Stream.Delay
End If
Language = Stream.Language
Forced = Stream.Forced
Me.Default = Stream.Default
If StreamName = "" AndAlso Stream.Title <> "" Then
StreamName = Stream.Title
End If
End If
OnStreamChanged()
End If
End Set
End Property
Property DisplayName As String
Get
Dim ret = ""
If Stream Is Nothing Then
Dim streams = MediaInfo.GetAudioStreams(File)
If streams.Count > 0 Then
ret = GetAudioText(streams(0), File)
Else
ret = File.FileName
End If
Else
ret = Stream.Name + " (" + File.Ext + ")"
End If
Return ret
End Get
Set(value As String)
End Set
End Property
Private SourceSamplingRateValue As Integer
ReadOnly Property SourceSamplingRate As Integer
Get
If SourceSamplingRateValue = 0 Then
If Stream Is Nothing Then
If File <> "" AndAlso IO.File.Exists(File) Then
SourceSamplingRateValue = MediaInfo.GetAudio(File, "SamplingRate").ToInt
End If
Else
SourceSamplingRateValue = Stream.SamplingRate
End If
End If
If SourceSamplingRateValue = 0 Then
SourceSamplingRateValue = 48000
End If
Return SourceSamplingRateValue
End Get
End Property
ReadOnly Property HasStream As Boolean
Get
Return Stream IsNot Nothing
End Get
End Property
Overridable Sub Migrate()
If Depth = 0 Then
Depth = 24
End If
End Sub
ReadOnly Property ConvertExt As String
Get
Dim ret As String
Select Case DecodingMode
Case AudioDecodingMode.WAVE
ret = "wav"
Case AudioDecodingMode.W64
ret = "w64"
Case Else
ret = "flac"
End Select
If Not SupportedInput.Contains(ret) Then
ret = "flac"
End If
If Not SupportedInput.Contains(ret) Then
ret = "w64"
End If
If Not SupportedInput.Contains(ret) Then
ret = "wav"
End If
Return ret
End Get
End Property
Overridable Sub OnFileChanged()
End Sub
Overridable Sub OnStreamChanged()
End Sub
Function ContainsCommand(value As String) As Boolean
Return CommandLines.ContainsEx(value)
End Function
Function IsUsedAndContainsCommand(value As String) As Boolean
Return File <> "" AndAlso CommandLines.ContainsEx(value)
End Function
Function GetDuration() As TimeSpan
If IO.File.Exists(File) Then
If Stream Is Nothing Then
Return TimeSpan.FromMilliseconds(MediaInfo.GetAudio(File, "Duration").ToDouble)
Else
Using mi As New MediaInfo(File)
Return TimeSpan.FromMilliseconds(mi.GetAudio(Stream.Index, "Duration").ToDouble)
End Using
End If
End If
End Function
Function GetAudioText(stream As AudioStream, path As String) As String
For Each i In Language.Languages
If path.Contains(i.CultureInfo.EnglishName) Then
stream.Language = i
Exit For
End If
Next
Dim matchDelay = Regex.Match(path, " (-?\d+)ms")
If matchDelay.Success Then
stream.Delay = matchDelay.Groups(1).Value.ToInt
End If
Dim matchID = Regex.Match(path, " ID(\d+)")
Dim name As String
name = stream.Name.Substring(3)
If File.Base = p.SourceFile.Base Then
Return name + " (" + File.Ext + ")"
Else
Return name + " (" + File.FileName + ")"
End If
End Function
Sub SetStreamOrLanguage()
If File = "" Then
Exit Sub
End If
If File <> p.LastOriginalSourceFile Then
For Each i In Language.Languages
If File.Contains(i.CultureInfo.EnglishName) Then
Language = i
Exit Sub
End If
Next
Else
For Each i In Streams
If i.Language.Equals(Language) Then
Stream = i
Exit For
End If
Next
If Stream Is Nothing AndAlso Streams.Count > 0 Then
Stream = Streams(0)
End If
End If
End Sub
Function IsInputSupported() As Boolean
Return SupportedInput.NothingOrEmpty OrElse SupportedInput.Contains(File.Ext)
End Function
Function IsMuxProfile() As Boolean
Return TypeOf Me Is MuxAudioProfile
End Function
Overridable Sub Encode()
End Sub
Overridable Sub EditProject()
End Sub
Overridable Function HandlesDelay() As Boolean
End Function
Function GetTrackID() As Integer
If Me Is p.Audio0 Then Return 1
If Me Is p.Audio1 Then Return 2
For x = 0 To p.AudioTracks.Count - 1
If Me Is p.AudioTracks(x) Then
Return x + 3
End If
Next
End Function
Function GetOutputFile() As String
Dim base As String
If p.TempDir.EndsWithEx("_temp\") AndAlso File.Base.StartsWithEx(p.SourceFile.Base) Then
base = File.Base.Substring(p.SourceFile.Base.Length)
Else
base = File.Base
End If
If Delay <> 0 Then
If HandlesDelay() Then
If base.Contains("ms") Then
Dim re As New Regex(" (-?\d+)ms")
If re.IsMatch(base) Then
base = re.Replace(base, "")
End If
End If
Else
If Not base.Contains("ms") Then
base += " " & Delay & "ms"
End If
End If
End If
Dim tracks = g.GetAudioProfiles.Where(Function(track) track.File <> "")
Dim trackID = If(tracks.Count > 1, "_a" & GetTrackID(), "")
Dim outfile = p.TempDir + base + trackID & "." + OutputFileType.ToLower
If File.IsEqualIgnoreCase(outfile) Then
Return p.TempDir + base + trackID & "_new." + OutputFileType.ToLower
End If
Return outfile
End Function
Function ExpandMacros(value As String) As String
Return ExpandMacros(value, True)
End Function
Function ExpandMacros(value As String, silent As Boolean) As String
If value = "" Then Return ""
If value.Contains("""%input%""") Then value = value.Replace("""%input%""", File.Escape)
If value.Contains("%input%") Then value = value.Replace("%input%", File.Escape)
If value.Contains("""%output%""") Then value = value.Replace("""%output%""", GetOutputFile.Escape)
If value.Contains("%output%") Then value = value.Replace("%output%", GetOutputFile.Escape)
If value.Contains("%bitrate%") Then value = value.Replace("%bitrate%", Bitrate.ToString)
If value.Contains("%channels%") Then value = value.Replace("%channels%", Channels.ToString)
If value.Contains("%language_native%") Then value = value.Replace("%language_native%", Language.CultureInfo.NativeName)
If value.Contains("%language_english%") Then value = value.Replace("%language_english%", Language.Name)
If value.Contains("%delay%") Then value = value.Replace("%delay%", Delay.ToString)
Return Macro.Expand(value)
End Function
Shared Function GetDefaults() As List(Of AudioProfile)
Dim ret As New List(Of AudioProfile)
ret.Add(New GUIAudioProfile(AudioCodec.AAC, 50))
ret.Add(New GUIAudioProfile(AudioCodec.Opus, 1) With {.Bitrate = 250})
ret.Add(New GUIAudioProfile(AudioCodec.FLAC, 0.3))
ret.Add(New GUIAudioProfile(AudioCodec.Vorbis, 1))
ret.Add(New GUIAudioProfile(AudioCodec.MP3, 4))
ret.Add(New GUIAudioProfile(AudioCodec.AC3, 1.0) With {.Channels = 6, .Bitrate = 640})
ret.Add(New GUIAudioProfile(AudioCodec.EAC3, 1.0) With {.Channels = 6, .Bitrate = 640})
ret.Add(New BatchAudioProfile(640, {}, "ac3", 6, """%app:ffmpeg%"" -i ""%input%"" -b:a %bitrate%k -y -hide_banner ""%output%"""))
ret.Add(New MuxAudioProfile())
ret.Add(New NullAudioProfile())
Return ret
End Function
End Class
<Serializable()>
Public Class BatchAudioProfile
Inherits AudioProfile
Sub New(bitrate As Integer,
input As String(),
fileType As String,
channels As Integer,
commandLines As String)
MyBase.New("Command Line", bitrate, input, fileType, channels)
Me.CommandLines = commandLines
CanEditValue = True
End Sub
Overrides Function Edit() As DialogResult
Using form As New CommandLineAudioEncoderForm(Me)
form.mbLanguage.Enabled = False
form.lLanguage.Enabled = False
form.tbDelay.Enabled = False
form.lDelay.Enabled = False
Return form.ShowDialog()
End Using
End Function
Function GetCode() As String
Return ExpandMacros(CommandLines).Trim
End Function
Overrides Sub Encode()
If File <> "" Then
Dim bitrateBefore = p.VideoBitrate
Dim targetPath = GetOutputFile()
For Each line In Macro.Expand(GetCode).SplitLinesNoEmpty
Using proc As New Proc
proc.Header = "Audio Encoding: " + Name
proc.SkipStrings = Proc.GetSkipStrings(CommandLines)
proc.File = "cmd.exe"
proc.Arguments = "/S /C """ + line + """"
Try
proc.Start()
Catch ex As AbortException
Throw ex
Catch ex As Exception
g.ShowException(ex)
Throw New AbortException
End Try
End Using
Next
If g.FileExists(targetPath) Then
File = targetPath
If Not p.BitrateIsFixed Then
Bitrate = Calc.GetBitrateFromFile(File, p.TargetSeconds)
p.VideoBitrate = CInt(Calc.GetVideoBitrate)
If Not p.VideoEncoder.QualityMode Then
Log.WriteLine("Video Bitrate: " + bitrateBefore.ToString() + " -> " & p.VideoBitrate & BR)
End If
End If
Log.WriteLine(MediaInfo.GetSummary(File))
Else
Log.Write("Error", "no output found")
If Not File.Ext = "wav" Then
Audio.Convert(Me)
If File.Ext = "wav" Then
Encode()
End If
End If
End If
End If
End Sub
Overrides Sub EditProject()
Using f As New CommandLineAudioEncoderForm(Me)
f.ShowDialog()
End Using
End Sub
Overrides Function HandlesDelay() As Boolean
Return CommandLines.Contains("%delay%")
End Function
End Class
<Serializable()>
Public Class NullAudioProfile
Inherits AudioProfile
Sub New()
MyBase.New("No Audio", 0, {}, "ignore", 0)
End Sub
Overrides Function HandlesDelay() As Boolean
End Function
Overrides Sub EditProject()
Using form As New SimpleSettingsForm("Null Audio Profile Options")
form.ScaleClientSize(20, 10)
Dim ui = form.SimpleUI
ui.Store = Me
Dim n = ui.AddNum()
n.Text = "Reserved Bitrate:"
n.Config = {0, Integer.MaxValue, 8}
n.Property = NameOf(Bitrate)
If form.ShowDialog() = DialogResult.OK Then
ui.Save()
End If
End Using
End Sub
Public Overrides Property OutputFileType As String
Get
Return "ignore"
End Get
Set(value As String)
End Set
End Property
Public Overrides Sub Encode()
End Sub
End Class
<Serializable()>
Public Class MuxAudioProfile
Inherits AudioProfile
Sub New()
MyBase.New("Copy/Mux", 0, Nothing, "ignore", 2)
CanEditValue = True
End Sub
Public Overrides Property OutputFileType As String
Get
If Stream Is Nothing Then
Return File.Ext
Else
Return Stream.Ext
End If
End Get
Set(value As String)
End Set
End Property
Overrides Property SupportedInput As String()
Get
Return {}
End Get
Set(value As String())
End Set
End Property
Overrides Function Edit() As DialogResult
Return Edit(False)
End Function
Overrides Sub EditProject()
Edit(True)
End Sub
Overrides Sub Encode()
End Sub
Overrides Sub OnFileChanged()
MyBase.OnFileChanged()
SetBitrate()
End Sub
Overrides Sub OnStreamChanged()
MyBase.OnStreamChanged()
SetBitrate()
End Sub
Sub SetBitrate()
If Stream Is Nothing Then
Bitrate = Calc.GetBitrateFromFile(File, p.SourceSeconds)
Else
Bitrate = Stream.Bitrate + Stream.Bitrate2
End If
End Sub
Private Overloads Function Edit(showProjectSettings As Boolean) As DialogResult
Using form As New SimpleSettingsForm("Audio Mux Options",
"The Audio Mux options allow to add a audio file without reencoding.")
form.ScaleClientSize(30, 15)
Dim ui = form.SimpleUI
Dim page = ui.CreateFlowPage("main page")
page.SuspendLayout()
Dim tbb = ui.AddTextButton(page)
tbb.Label.Text = "Track Name:"
tbb.Label.Help = "Track name used by the muxer. The track name may contain macros."
tbb.Edit.Expand = True
tbb.Edit.Text = StreamName
tbb.Edit.SaveAction = Sub(value) StreamName = value
tbb.Button.Text = "Macro Editor..."
tbb.Button.ClickAction = AddressOf tbb.Edit.EditMacro
Dim nb = ui.AddNum(page)
nb.Label.Text = "Delay:"
nb.Label.Help = "Delay used by the muxer."
nb.NumEdit.Config = {Integer.MinValue, Integer.MaxValue, 1}
nb.NumEdit.Value = Delay
nb.NumEdit.SaveAction = Sub(value) Delay = CInt(value)
Dim mbi = ui.AddMenu(Of Language)(page)
mbi.Label.Text = "Language:"
mbi.Label.Help = "Language of the audio track."
mbi.Button.Value = Language
mbi.Button.SaveAction = Sub(value) Language = value
For Each i In Language.Languages
If i.IsCommon Then
mbi.Button.Add(i.ToString, i)
Else
mbi.Button.Add("More | " + i.ToString.Substring(0, 1).ToUpper + " | " + i.ToString, i)
End If
Next
Dim cb = ui.AddBool(page)
cb.Text = "Default"
cb.Help = "Flaged as default in MKV."
cb.Checked = [Default]
cb.SaveAction = Sub(value) [Default] = value
cb = ui.AddBool(page)
cb.Text = "Forced"
cb.Help = "Flaged as forced in MKV."
cb.Checked = Forced
cb.SaveAction = Sub(value) Forced = value
cb = ui.AddBool(page)
cb.Text = "Extract DTS Core"
cb.Help = "Only include DTS core using mkvmerge."
cb.Checked = ExtractDTSCore
cb.SaveAction = Sub(value) ExtractDTSCore = value
page.ResumeLayout()
Dim ret = form.ShowDialog()
If ret = DialogResult.OK Then
ui.Save()
End If
Return ret
End Using
End Function
End Class
<Serializable()>
Public Class GUIAudioProfile
Inherits AudioProfile
Property Params As New Parameters
Sub New(codec As AudioCodec, quality As Single)
MyBase.New(Nothing)
Params.Codec = codec
Params.Quality = quality
Select Case codec
Case AudioCodec.DTS, AudioCodec.AC3, AudioCodec.EAC3
Params.RateMode = AudioRateMode.CBR
Case Else
Params.RateMode = AudioRateMode.VBR
End Select
Bitrate = GetBitrate()
End Sub
Public Overrides Property Channels As Integer
Get
Select Case Params.ChannelsMode
Case ChannelsMode.Original
If Not Stream Is Nothing Then
If Stream.Channels > Stream.Channels2 Then
Return Stream.Channels
Else
Return Stream.Channels2
End If
ElseIf File <> "" AndAlso IO.File.Exists(File) Then
Return MediaInfo.GetChannels(File)
Else
Return 6
End If
Case ChannelsMode._1
Return 1
Case ChannelsMode._2
Return 2
Case ChannelsMode._6
Return 6
Case ChannelsMode._7
Return 7
Case ChannelsMode._8
Return 8
Case Else
Throw New NotImplementedException
End Select
End Get
Set(value As Integer)
End Set
End Property
ReadOnly Property TargetSamplingRate As Integer
Get
If Params.SamplingRate <> 0 Then
Return Params.SamplingRate
Else
Return SourceSamplingRate
End If
End Get
End Property
Public Overrides Sub Migrate()
MyBase.Migrate()
Params.Migrate()
End Sub
Function GetBitrate() As Integer
If Params.RateMode = AudioRateMode.VBR Then
Select Case Params.Codec
Case AudioCodec.AAC
Select Case Params.Encoder
Case GuiAudioEncoder.qaac, GuiAudioEncoder.Automatic
Return Calc.GetYFromTwoPointForm(0, CInt(50 / 8 * Channels), 127, CInt(1000 / 8 * Channels), Params.Quality)
Case GuiAudioEncoder.eac3to
Return Calc.GetYFromTwoPointForm(0.01, CInt(50 / 8 * Channels), 1, CInt(1000 / 8 * Channels), Params.Quality)
Case Else
Return Calc.GetYFromTwoPointForm(1, CInt(50 / 8 * Channels), 5, CInt(900 / 8 * Channels), Params.Quality)
End Select
Case AudioCodec.MP3
Return Calc.GetYFromTwoPointForm(9, 65, 0, 245, Params.Quality)
Case AudioCodec.Vorbis
If Channels >= 6 Then
Return Calc.GetYFromTwoPointForm(0, 120, 10, 1440, Params.Quality)
Else
Return Calc.GetYFromTwoPointForm(0, 64, 10, 500, Params.Quality)
End If
Case AudioCodec.Opus
Return CInt(Bitrate)
End Select
End If
Select Case Params.Codec
Case AudioCodec.FLAC
Return CInt(((TargetSamplingRate * Depth * Channels) / 1000) * 0.55)
Case AudioCodec.W64, AudioCodec.WAV
Return CInt((TargetSamplingRate * Depth * Channels) / 1000)
End Select
Return CInt(Bitrate)
End Function
Public Overrides Sub Encode()
If File <> "" Then
Dim bitrateBefore = p.VideoBitrate
Dim targetPath = GetOutputFile()
Dim cl = GetCommandLine(True)
Using proc As New Proc
proc.Header = "Audio Encoding " & GetTrackID()
If cl.Contains("|") Then
proc.File = "cmd.exe"
proc.Arguments = "/S /C """ + cl + """"
Else
proc.CommandLine = cl
End If
If cl.Contains("qaac64") Then
proc.Package = Package.qaac
proc.SkipStrings = {", ETA ", "x)"}
ElseIf cl.Contains("fdkaac") Then
proc.Package = Package.fdkaac
proc.SkipStrings = {"%]", "x)"}
ElseIf cl.Contains("eac3to") Then
proc.Package = Package.eac3to
proc.SkipStrings = {"process: ", "analyze: "}
proc.TrimChars = {"-"c, " "c}
g.AddToPath(Package.NeroAAC.Directory)
ElseIf cl.Contains("ffmpeg") Then
If cl.Contains("libfdk_aac") Then
proc.Package = Package.ffmpeg_non_free
Else
proc.Package = Package.ffmpeg
End If
proc.SkipStrings = {"frame=", "size="}
proc.Encoding = Encoding.UTF8
proc.Duration = GetDuration()
End If
proc.Start()
End Using
If g.FileExists(targetPath) Then
File = targetPath
If Not p.BitrateIsFixed Then
Bitrate = Calc.GetBitrateFromFile(File, p.TargetSeconds)
p.VideoBitrate = CInt(Calc.GetVideoBitrate)
If Not p.VideoEncoder.QualityMode Then
Log.WriteLine("Video Bitrate: " + bitrateBefore.ToString() + " -> " & p.VideoBitrate & BR)
End If
End If
Log.WriteLine(MediaInfo.GetSummary(File))
Else
Throw New ErrorAbortException("Error audio encoding", "The output file is missing")
End If
End If
End Sub
Sub NormalizeFF()
If Not Params.Normalize OrElse ExtractCore OrElse
Not {ffmpegNormalizeMode.loudnorm, ffmpegNormalizeMode.volumedetect}.Contains(Params.ffmpegNormalizeMode) Then
Exit Sub
End If
Dim args = "-i " + File.LongPathPrefix.Escape
If Not Stream Is Nothing AndAlso Streams.Count > 1 Then
args += " -map 0:a:" & Stream.Index
End If
args += " -sn -vn -hide_banner"
If Params.ffmpegNormalizeMode = ffmpegNormalizeMode.volumedetect Then
args += " -af volumedetect"
ElseIf Params.ffmpegNormalizeMode = ffmpegNormalizeMode.loudnorm Then
args += " -af loudnorm=I=" & Params.ffmpegLoudnormIntegrated.ToInvariantString +
":TP=" & Params.ffmpegLoudnormTruePeak.ToInvariantString + ":LRA=" &
Params.ffmpegLoudnormLRA.ToInvariantString + ":print_format=summary"
End If
args += " -f null NUL"
Using proc As New Proc
proc.Header = "Find Gain " & GetTrackID()
proc.SkipStrings = {"frame=", "size="}
proc.Encoding = Encoding.UTF8
proc.Package = Package.ffmpeg
proc.Arguments = args
proc.Start()
Dim match = Regex.Match(proc.Log.ToString, "max_volume: -(\d+\.\d+) dB")
If match.Success Then Gain += match.Groups(1).Value.ToSingle()
match = Regex.Match(proc.Log.ToString, "Input Integrated:\s*([-\.0-9]+)")
If match.Success Then Params.ffmpegLoudnormIntegratedMeasured = match.Groups(1).Value.ToDouble
match = Regex.Match(proc.Log.ToString, "Input True Peak:\s*([-\.0-9]+)")
If match.Success Then Params.ffmpegLoudnormTruePeakMeasured = match.Groups(1).Value.ToDouble
match = Regex.Match(proc.Log.ToString, "Input LRA:\s*([-\.0-9]+)")
If match.Success Then Params.ffmpegLoudnormLraMeasured = match.Groups(1).Value.ToDouble
match = Regex.Match(proc.Log.ToString, "Input Threshold:\s*([-\.0-9]+)")
If match.Success Then Params.ffmpegLoudnormThresholdMeasured = match.Groups(1).Value.ToDouble
End Using
End Sub
Overrides Function Edit() As DialogResult
Using form As New AudioForm()
form.LoadProfile(Me)
form.mbLanguage.Enabled = False
form.numDelay.Enabled = False
form.numGain.Enabled = False
Return form.ShowDialog()
End Using
End Function
Overrides Sub EditProject()
Using form As New AudioForm()
form.LoadProfile(Me)
form.ShowDialog()
End Using
End Sub
Public Overrides Property OutputFileType As String
Get
Select Case Params.Codec
Case AudioCodec.AAC
Return "m4a"
Case AudioCodec.Vorbis
Return "ogg"
Case Else
Return Params.Codec.ToString.ToLower
End Select
End Get
Set(value As String)
End Set
End Property
Function GetEac3toCommandLine(includePaths As Boolean) As String
Dim id As String
Dim sb As New StringBuilder
If File.Ext.EqualsAny("ts", "m2ts", "mkv") AndAlso Not Stream Is Nothing Then
id = (Stream.StreamOrder + 1) & ": "
End If
If includePaths Then
sb.Append(Package.eac3to.Path.Escape + " " + id + File.LongPathPrefix.Escape +
" " + GetOutputFile.LongPathPrefix.Escape)
Else
sb.Append("eac3to")
End If
If Not (Params.Codec = AudioCodec.DTS AndAlso ExtractDTSCore) Then
Select Case Params.Codec
Case AudioCodec.AAC
sb.Append(" -quality=" & Params.Quality.ToInvariantString)
Case AudioCodec.AC3
sb.Append(" -" & Bitrate)
If Not {192, 224, 384, 448, 640}.Contains(CInt(Bitrate)) Then
Return "Invalid bitrate, select 192, 224, 384, 448 or 640"
End If
Case AudioCodec.DTS
sb.Append(" -" & Bitrate)
End Select
If Params.Normalize Then
sb.Append(" -normalize")
End If
If Depth = 16 Then
sb.Append(" -down16")
End If
If Params.SamplingRate <> 0 Then
sb.Append(" -resampleTo" & Params.SamplingRate)
End If
If Params.FrameRateMode = AudioFrameRateMode.Speedup Then
sb.Append(" -speedup")
End If
If Params.FrameRateMode = AudioFrameRateMode.Slowdown Then
sb.Append(" -slowdown")
End If
If Delay <> 0 Then
sb.Append(" " + If(Delay > 0, "+", "") & Delay & "ms")
End If
If Gain < 0 Then
sb.Append(" " & CInt(Gain) & "dB")
End If
If Gain > 0 Then
sb.Append(" +" & CInt(Gain) & "dB")
End If
Select Case Channels
Case 6
If Params.ChannelsMode <> ChannelsMode.Original Then
sb.Append(" -down6")
End If
Case 2
If Params.eac3toStereoDownmixMode = 0 Then
If Params.ChannelsMode <> ChannelsMode.Original Then
sb.Append(" -downStereo")
End If
Else
sb.Append(" -downDpl")
End If
End Select
If Params.CustomSwitches <> "" Then
sb.Append(" " + Params.CustomSwitches)
End If
ElseIf ExtractDTSCore Then
sb.Append(" -core")
End If
If includePaths Then
sb.Append(" -progressnumbers")
End If
Return sb.ToString
End Function
Function GetfdkaacCommandLine(includePaths As Boolean) As String
Dim sb As New StringBuilder
includePaths = includePaths And File <> ""
If DecodingMode = AudioDecodingMode.Pipe Then
sb.Append(GetPipeCommandLine(includePaths))
End If
If includePaths Then
sb.Append(Package.fdkaac.Path.Escape)
Else
sb.Clear()
sb.Append("fdkaac")
End If
If Params.fdkaacProfile <> 2 Then
sb.Append(" --profile " & Params.fdkaacProfile)
End If
If Params.SimpleRateMode = SimpleAudioRateMode.CBR Then
sb.Append(" --bitrate " & CInt(Bitrate))
Else
sb.Append(" --bitrate-mode " & Params.Quality)
End If
If Params.fdkaacGaplessMode <> 0 Then sb.Append(" --gapless-mode " & Params.fdkaacGaplessMode)
If Params.fdkaacBandwidth <> 0 Then sb.Append(" --bandwidth " & Params.fdkaacBandwidth)
If Not Params.fdkaacAfterburner Then sb.Append(" --afterburner 0")
If Params.fdkaacAdtsCrcCheck Then sb.Append(" --adts-crc-check")
If Params.fdkaacMoovBeforeMdat Then sb.Append(" --moov-before-mdat")
If Params.fdkaacIncludeSbrDelay Then sb.Append(" --include-sbr-delay")
If Params.fdkaacHeaderPeriod Then sb.Append(" --header-period")
If Params.fdkaacLowDelaySBR <> 0 Then sb.Append(" --lowdelay-sbr " & Params.fdkaacLowDelaySBR)
If Params.fdkaacSbrRatio <> 0 Then sb.Append(" --sbr-ratio " & Params.fdkaacSbrRatio)
If Params.fdkaacTransportFormat <> 0 Then sb.Append(" --transport-format " & Params.fdkaacTransportFormat)
If Params.CustomSwitches <> "" Then sb.Append(" " + Params.CustomSwitches)
Dim input = If(DecodingMode = AudioDecodingMode.Pipe, "-", File.LongPathPrefix.Escape)
If includePaths Then
sb.Append(" --ignorelength -o " + GetOutputFile.LongPathPrefix.Escape + " " + input)
End If
Return sb.ToString
End Function
Function GetQaacCommandLine(includePaths As Boolean) As String
Dim sb As New StringBuilder
includePaths = includePaths And File <> ""
If DecodingMode = AudioDecodingMode.Pipe Then
sb.Append(GetPipeCommandLine(includePaths))
End If
If includePaths Then
sb.Append(Package.qaac.Path.Escape)
Else
sb.Clear()
sb.Append("qaac")
End If
Select Case Params.qaacRateMode
Case 0
sb.Append(" --tvbr " & CInt(Params.Quality))
Case 1
sb.Append(" --cvbr " & CInt(Bitrate))
Case 2
sb.Append(" --abr " & CInt(Bitrate))
Case 3
sb.Append(" --cbr " & CInt(Bitrate))
End Select
If Params.qaacHE AndAlso {1, 2, 3}.Contains(Params.qaacRateMode) Then
sb.Append(" --he")
End If
If Delay <> 0 Then
sb.Append(" --delay " + (Delay / 1000).ToInvariantString)
End If
If Params.Normalize Then
sb.Append(" --normalize")
End If
If Params.qaacQuality <> 2 Then
sb.Append(" --quality " & Params.qaacQuality)
End If
If Params.SamplingRate <> 0 Then
sb.Append(" --rate " & Params.SamplingRate)
End If
If Params.qaacLowpass <> 0 Then
sb.Append(" --lowpass " & Params.qaacLowpass)
End If
If Params.qaacNoDither Then
sb.Append(" --no-dither")
End If
If Gain <> 0 Then
sb.Append(" --gain " & Gain.ToInvariantString)
End If
If Params.CustomSwitches <> "" Then
sb.Append(" " + Params.CustomSwitches)
End If
Dim input = If(DecodingMode = AudioDecodingMode.Pipe, "-", File.ToShortFilePath.Escape)
If includePaths Then
sb.Append(" " + input + " -o " + GetOutputFile.ToShortFilePath.Escape)
End If
Return sb.ToString
End Function
Function GetPipeCommandLine(includePaths As Boolean) As String
Dim sb As New StringBuilder
If includePaths AndAlso File <> "" Then
sb.Append(Package.ffmpeg.Path.Escape + " -i " + File.Escape)
Else
sb.Append("ffmpeg")
End If
If Not Stream Is Nothing AndAlso Streams.Count > 1 Then
sb.Append(" -map 0:a:" & Stream.Index)
End If
If Params.ChannelsMode <> ChannelsMode.Original Then
sb.Append(" -ac " & Channels)
End If
If Params.Normalize Then
If Params.ffmpegNormalizeMode = ffmpegNormalizeMode.dynaudnorm Then
sb.Append(" " + Audio.GetDynAudNormArgs(Params))
ElseIf Params.ffmpegNormalizeMode = ffmpegNormalizeMode.loudnorm Then
sb.Append(" " + Audio.GetLoudNormArgs(Params))
End If
End If
If includePaths AndAlso File <> "" Then
sb.Append(" -loglevel fatal -hide_banner -f wav - | ")
End If
Return sb.ToString
End Function
Function GetffmpegCommandLine(includePaths As Boolean) As String
Dim sb As New StringBuilder
Dim pack = If(Params.Codec = AudioCodec.AAC AndAlso Params.ffmpegLibFdkAAC,
Package.ffmpeg_non_free, Package.ffmpeg)
If includePaths AndAlso File <> "" Then
sb.Append(pack.Path.Escape + " -i " + File.LongPathPrefix.Escape)
Else
sb.Append("ffmpeg")
End If
If Not Stream Is Nothing AndAlso Streams.Count > 1 Then
sb.Append(" -map 0:a:" & Stream.Index)
End If
Select Case Params.Codec
Case AudioCodec.MP3
If Not Params.CustomSwitches.Contains("-c:a ") Then
sb.Append(" -c:a libmp3lame")
End If
Select Case Params.RateMode
Case AudioRateMode.ABR
sb.Append(" -b:a " & CInt(Bitrate) & "k -abr 1")
Case AudioRateMode.CBR
sb.Append(" -b:a " & CInt(Bitrate) & "k")
Case AudioRateMode.VBR
sb.Append(" -q:a " & CInt(Params.Quality))
End Select
Case AudioCodec.AC3
If Not Params.CustomSwitches.Contains("-c:a ") Then
sb.Append(" -c:a ac3")
End If
If Not {192, 224, 384, 448, 640}.Contains(CInt(Bitrate)) Then
Return "Invalid bitrate, select 192, 224, 384, 448 or 640"
End If
sb.Append(" -b:a " & CInt(Bitrate) & "k")
Case AudioCodec.EAC3
If Not Params.CustomSwitches.Contains("-c:a ") Then
sb.Append(" -c:a eac3")
End If
sb.Append(" -b:a " & CInt(Bitrate) & "k")
Case AudioCodec.DTS
If ExtractDTSCore Then
sb.Append(" -bsf:a dca_core -c:a copy")
Else
sb.Append(" -strict -2 -b:a " & CInt(Bitrate) & "k")
End If
Case AudioCodec.Vorbis
If Not Params.CustomSwitches.Contains("-c:a ") Then
sb.Append(" -c:a libvorbis")
End If
If Params.RateMode = AudioRateMode.VBR Then
sb.Append(" -q:a " & CInt(Params.Quality))
Else
sb.Append(" -b:a " & CInt(Bitrate) & "k")
End If
Case AudioCodec.Opus
If Not Params.CustomSwitches.Contains("-c:a ") Then
sb.Append(" -c:a libopus")
End If
Select Case Params.ffmpegOpusRateMode
Case OpusRateMode.CBR
sb.Append(" -vbr off")
Case OpusRateMode.CVBR
sb.Append(" -vbr constrained")
End Select
sb.Append(" -b:a " & CInt(Bitrate) & "k")
Select Case Params.ffmpegOpusApp
Case OpusApp.voip
sb.Append(" -application voip")
Case OpusApp.lowdelay
sb.Append(" -application lowdelay")
End Select
If Params.ffmpegOpusFrame <> 20 Then
sb.Append(" -frame_duration " & Params.ffmpegOpusFrame.ToInvariantString)
End If
If Params.ffmpegOpusMap <> -1 Then
sb.Append(" -mapping_family " & CInt(Params.ffmpegOpusMap))
End If
If Params.ffmpegOpusCompress <> 10 Then
sb.Append(" -compression_level " & CInt(Params.ffmpegOpusCompress))
End If
If Params.ffmpegOpusPacket <> 0 Then
sb.Append(" -packet_loss " & CInt(Params.ffmpegOpusPacket))
End If
Case AudioCodec.AAC
If Params.ffmpegLibFdkAAC Then
sb.Append(" -c:a libfdk_aac")
If Params.RateMode = SimpleAudioRateMode.CBR Then
sb.Append(" -b:a " & CInt(Bitrate) & "k")
Else
sb.Append(" -vbr " & CInt(Params.Quality))
End If
Else
sb.Append(" -c:a aac")
If Params.RateMode = SimpleAudioRateMode.CBR Then
sb.Append(" -b:a " & CInt(Bitrate) & "k")
Else
sb.Append(" -q:a " & CInt(Params.Quality))
End If
End If
Case AudioCodec.W64, AudioCodec.WAV
If Depth = 24 Then
sb.Append(" -c:a pcm_s24le")
Else
sb.Append(" -c:a pcm_s16le")
End If
End Select
If Gain <> 0 Then
sb.Append(" -af volume=" + Gain.ToInvariantString + "dB")
End If
If Params.Normalize Then
If Params.ffmpegNormalizeMode = ffmpegNormalizeMode.loudnorm Then
sb.Append(" " + Audio.GetLoudNormArgs(Params))
ElseIf Params.ffmpegNormalizeMode = ffmpegNormalizeMode.dynaudnorm Then
sb.Append(" " + Audio.GetDynAudNormArgs(Params))
End If
End If
If Params.ChannelsMode <> ChannelsMode.Original Then
sb.Append(" -ac " & Channels)
End If
If Params.SamplingRate <> 0 Then
sb.Append(" -ar " & Params.SamplingRate)
End If
If Params.CustomSwitches <> "" Then
sb.Append(" " + Params.CustomSwitches)
End If
If includePaths AndAlso File <> "" Then
sb.Append(" -y -hide_banner")
sb.Append(" " + GetOutputFile.LongPathPrefix.Escape)
End If
Return sb.ToString
End Function
Function SupportsNormalize() As Boolean
Return GetEncoder() = GuiAudioEncoder.eac3to OrElse GetEncoder() = GuiAudioEncoder.qaac
End Function
Public Overrides ReadOnly Property DefaultName As String
Get
If Params Is Nothing Then
Exit Property
End If
Dim ch As String
Select Case Params.ChannelsMode
Case ChannelsMode._8
ch += " 7.1"
Case ChannelsMode._7
ch += " 6.1"
Case ChannelsMode._6
ch += " 5.1"
Case ChannelsMode._2
ch += " 2.0"
Case ChannelsMode._1
ch += " Mono"
End Select
Dim circa = If(Params.RateMode = AudioRateMode.VBR OrElse Params.Codec = AudioCodec.FLAC, "~", "")
Dim bitrate = If(Params.RateMode = AudioRateMode.VBR, GetBitrate(), Me.Bitrate)
If ExtractCore Then
Return "Extract DTS Core"
Else
Return Params.Codec.ToString + ch & " " & circa & bitrate & " Kbps"
End If
End Get
End Property
ReadOnly Property ExtractCore As Boolean
Get
Dim enc = GetEncoder()
If Params.Codec = AudioCodec.DTS AndAlso ExtractDTSCore AndAlso
(enc = GuiAudioEncoder.eac3to OrElse enc = GuiAudioEncoder.ffmpeg) Then
Return True
End If
End Get
End Property
Overrides Property CommandLines() As String
Get
Return GetCommandLine(True)
End Get
Set(Value As String)
End Set
End Property
Overrides ReadOnly Property CanEdit() As Boolean
Get
Return True
End Get
End Property
Overrides Function HandlesDelay() As Boolean
If {GuiAudioEncoder.eac3to, GuiAudioEncoder.qaac}.Contains(GetEncoder()) Then
Return True
End If
End Function
Function GetEncoder() As GuiAudioEncoder
Select Case Params.Encoder
Case GuiAudioEncoder.eac3to
If {AudioCodec.AAC, AudioCodec.AC3, AudioCodec.FLAC, AudioCodec.DTS,
AudioCodec.W64, AudioCodec.WAV}.Contains(Params.Codec) Then
Return GuiAudioEncoder.eac3to
End If
Case GuiAudioEncoder.qaac
If Params.Codec = AudioCodec.AAC Then
Return GuiAudioEncoder.qaac
End If
Case GuiAudioEncoder.fdkaac
If Params.Codec = AudioCodec.AAC Then
Return GuiAudioEncoder.fdkaac
End If
Case GuiAudioEncoder.Automatic
If Params.Codec = AudioCodec.AAC Then
Return GuiAudioEncoder.qaac
End If
End Select
Return GuiAudioEncoder.ffmpeg
End Function
Function GetCommandLine(includePaths As Boolean) As String
Select Case GetEncoder()
Case GuiAudioEncoder.eac3to
Return GetEac3toCommandLine(includePaths)
Case GuiAudioEncoder.qaac
Return GetQaacCommandLine(includePaths)
Case GuiAudioEncoder.fdkaac
Return GetfdkaacCommandLine(includePaths)
Case Else
Return GetffmpegCommandLine(includePaths)
End Select
End Function
Overrides Property SupportedInput As String()
Get
Select Case GetEncoder()
Case GuiAudioEncoder.eac3to
Return FileTypes.eac3toInput
Case GuiAudioEncoder.qaac
If DecodingMode <> AudioDecodingMode.Pipe Then
If p.Ranges.Count > 0 Then
Return {"wav", "w64"}
Else
Return {"wav", "flac", "w64"}
End If
End If
Case GuiAudioEncoder.fdkaac
If DecodingMode <> AudioDecodingMode.Pipe Then
Return {"wav"}
End If
End Select
Return {}
End Get
Set(value As String())
End Set
End Property
<Serializable()>
Public Class Parameters
Property Codec As AudioCodec
Property CustomSwitches As String = ""
Property eac3toStereoDownmixMode As Integer
Property Encoder As GuiAudioEncoder
Property FrameRateMode As AudioFrameRateMode
Property Normalize As Boolean = True
Property Quality As Single = 0.3
Property RateMode As AudioRateMode
Property SamplingRate As Integer
Property ChannelsMode As ChannelsMode
Property Migrate1 As Boolean = True
Property MigrateffNormalizeMode As Boolean = True
Property qaacHE As Boolean
Property qaacLowpass As Integer
Property qaacNoDither As Boolean
Property qaacQuality As Integer = 2
Property qaacRateMode As Integer
Property ffmpegOpusApp As OpusApp = OpusApp.audio
Property ffmpegOpusCompress As Integer = 10
Property ffmpegOpusFrame As Double = 20
Property ffmpegOpusMap As Integer = -1
Property ffmpegOpusPacket As Integer
Property ffmpegOpusRateMode As OpusRateMode = OpusRateMode.VBR
Property ffmpegOpusMigrate As Integer = 1
Property fdkaacProfile As Integer = 2
Property fdkaacBandwidth As Integer
Property fdkaacAfterburner As Boolean = True
Property fdkaacLowDelaySBR As Integer
Property fdkaacSbrRatio As Integer
Property fdkaacTransportFormat As Integer
Property fdkaacGaplessMode As Integer
Property fdkaacAdtsCrcCheck As Boolean
Property fdkaacHeaderPeriod As Boolean
Property fdkaacIncludeSbrDelay As Boolean
Property fdkaacMoovBeforeMdat As Boolean
Property ffNormalizeMode As ffNormalizeMode
Property ffmpegNormalizeMode As ffmpegNormalizeMode
Property ffmpegLibFdkAAC As Boolean
Property ffmpegLoudnormIntegrated As Double = -24
Property ffmpegLoudnormIntegratedMeasured As Double
Property ffmpegLoudnormLRA As Double = 7
Property ffmpegLoudnormLraMeasured As Double
Property ffmpegLoudnormThresholdMeasured As Double
Property ffmpegLoudnormTruePeak As Double = -2
Property ffmpegLoudnormTruePeakMeasured As Double
Property ffmpegDynaudnormF As Integer = 500
Property ffmpegDynaudnormG As Integer = 31
Property ffmpegDynaudnormP As Double = 0.95
Property ffmpegDynaudnormM As Double = 10
Property ffmpegDynaudnormR As Double
Property ffmpegDynaudnormN As Boolean = True
Property ffmpegDynaudnormC As Boolean
Property ffmpegDynaudnormB As Boolean
Property ffmpegDynaudnormS As Double
Property SimpleRateMode As SimpleAudioRateMode
Get
If RateMode = AudioRateMode.CBR Then
Return SimpleAudioRateMode.CBR
Else
Return SimpleAudioRateMode.VBR
End If
End Get
Set(value As SimpleAudioRateMode)
If value = SimpleAudioRateMode.CBR Then
RateMode = AudioRateMode.CBR
Else
RateMode = AudioRateMode.VBR
End If
End Set
End Property
'legacy/obsolete
Sub Migrate()
'2019
If fdkaacProfile = 0 Then
fdkaacProfile = 2
SimpleRateMode = SimpleAudioRateMode.VBR
fdkaacAfterburner = True
End If
'2019
If Not Migrate1 Then
Normalize = True
ffmpegLoudnormIntegrated = -24
ffmpegLoudnormLRA = 7
ffmpegLoudnormTruePeak = -2
ffmpegDynaudnormF = 500
ffmpegDynaudnormG = 31
ffmpegDynaudnormP = 0.95
ffmpegDynaudnormM = 10
ffmpegDynaudnormN = True
Migrate1 = True
End If
'2020
If Not MigrateffNormalizeMode Then
ffmpegNormalizeMode = CType(ffNormalizeMode, ffmpegNormalizeMode)
MigrateffNormalizeMode = True
End If
'2020
If ffmpegOpusMigrate <> 1 Then
ffmpegOpusMigrate = 1
ffmpegOpusApp = OpusApp.audio
ffmpegOpusCompress = 10
ffmpegOpusFrame = 20
ffmpegOpusMap = -1
ffmpegOpusRateMode = OpusRateMode.VBR
End If
End Sub
End Class
End Class
Public Enum AudioCodec
AAC
AC3
DTS
FLAC
MP3
Opus
Vorbis
W64
WAV
EAC3
End Enum
Public Enum AudioRateMode
CBR
ABR
VBR
End Enum
Public Enum OpusRateMode
CBR
VBR
CVBR
End Enum
Public Enum OpusApp
voip
audio
lowdelay
End Enum
Public Enum SimpleAudioRateMode
CBR
VBR
End Enum
Public Enum AudioAacProfile
Automatic
LC
SBR
<DispName("SBR+PS")> SBRPS = 300
End Enum
Public Enum GuiAudioEncoder
Automatic
eac3to
ffmpeg
qaac
fdkaac
End Enum
Public Enum AudioFrameRateMode
Keep
<DispName("Apply PAL speedup")> Speedup
<DispName("Reverse PAL speedup")> Slowdown
End Enum
Public Enum AudioDownMixMode
<DispName("Simple")> Stereo
<DispName("Dolby Surround")> Surround
<DispName("Dolby Surround 2")> Surround2
End Enum
Public Enum ChannelsMode
Original
<DispName("1 (Mono)")> _1
<DispName("2 (Stereo)")> _2
<DispName("5.1")> _6
<DispName("6.1")> _7
<DispName("7.1")> _8
End Enum
|
{
"pile_set_name": "Github"
}
|
reftype: Web Page
publication_journal: None
title: None
publication_year: None
conference: None
key: PAM
authors: []
source_url: http://en.wikipedia.org/wiki/Pluggable_Authentication_Modules
id: 5019
|
{
"pile_set_name": "Github"
}
|
<?php
/**
* This file is part of PHP Mess Detector.
*
* Copyright (c) Manuel Pichler <mapi@phpmd.org>.
* All rights reserved.
*
* Licensed under BSD License
* For full copyright and license information, please see the LICENSE file.
* Redistributions of files must retain the above copyright notice.
*
* @author Manuel Pichler <mapi@phpmd.org>
* @copyright Manuel Pichler. All rights reserved.
* @license https://opensource.org/licenses/bsd-license.php BSD License
* @link http://phpmd.org/
*/
class testRuleDoesNotApplyForPropertyNameWithCapital
{
public $AlsoValidPropertyName;
}
|
{
"pile_set_name": "Github"
}
|
require 'abstract_unit'
class TestMissingSourceFile < Test::Unit::TestCase
def test_with_require
assert_raise(MissingSourceFile) { require 'no_this_file_don\'t_exist' }
end
def test_with_load
assert_raise(MissingSourceFile) { load 'nor_does_this_one' }
end
def test_path
begin load 'nor/this/one.rb'
rescue MissingSourceFile => e
assert_equal 'nor/this/one.rb', e.path
end
end
end
|
{
"pile_set_name": "Github"
}
|
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "mpegvideodsp.h"
static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h,
int x16, int y16, int rounder)
{
const int A = (16 - x16) * (16 - y16);
const int B = (x16) * (16 - y16);
const int C = (16 - x16) * (y16);
const int D = (x16) * (y16);
int i;
for (i = 0; i < h; i++) {
dst[0] = (A * src[0] + B * src[1] + C * src[stride + 0] + D * src[stride + 1] + rounder) >> 8;
dst[1] = (A * src[1] + B * src[2] + C * src[stride + 1] + D * src[stride + 2] + rounder) >> 8;
dst[2] = (A * src[2] + B * src[3] + C * src[stride + 2] + D * src[stride + 3] + rounder) >> 8;
dst[3] = (A * src[3] + B * src[4] + C * src[stride + 3] + D * src[stride + 4] + rounder) >> 8;
dst[4] = (A * src[4] + B * src[5] + C * src[stride + 4] + D * src[stride + 5] + rounder) >> 8;
dst[5] = (A * src[5] + B * src[6] + C * src[stride + 5] + D * src[stride + 6] + rounder) >> 8;
dst[6] = (A * src[6] + B * src[7] + C * src[stride + 6] + D * src[stride + 7] + rounder) >> 8;
dst[7] = (A * src[7] + B * src[8] + C * src[stride + 7] + D * src[stride + 8] + rounder) >> 8;
dst += stride;
src += stride;
}
}
void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
int dxx, int dxy, int dyx, int dyy, int shift, int r,
int width, int height)
{
int y, vx, vy;
const int s = 1 << shift;
width--;
height--;
for (y = 0; y < h; y++) {
int x;
vx = ox;
vy = oy;
for (x = 0; x < 8; x++) { // FIXME: optimize
int index;
int src_x = vx >> 16;
int src_y = vy >> 16;
int frac_x = src_x & (s - 1);
int frac_y = src_y & (s - 1);
src_x >>= shift;
src_y >>= shift;
if ((unsigned) src_x < width) {
if ((unsigned) src_y < height) {
index = src_x + src_y * stride;
dst[y * stride + x] =
((src[index] * (s - frac_x) +
src[index + 1] * frac_x) * (s - frac_y) +
(src[index + stride] * (s - frac_x) +
src[index + stride + 1] * frac_x) * frac_y +
r) >> (shift * 2);
} else {
index = src_x + av_clip(src_y, 0, height) * stride;
dst[y * stride + x] =
((src[index] * (s - frac_x) +
src[index + 1] * frac_x) * s +
r) >> (shift * 2);
}
} else {
if ((unsigned) src_y < height) {
index = av_clip(src_x, 0, width) + src_y * stride;
dst[y * stride + x] =
((src[index] * (s - frac_y) +
src[index + stride] * frac_y) * s +
r) >> (shift * 2);
} else {
index = av_clip(src_x, 0, width) +
av_clip(src_y, 0, height) * stride;
dst[y * stride + x] = src[index];
}
}
vx += dxx;
vy += dyx;
}
ox += dxy;
oy += dyy;
}
}
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
{
c->gmc1 = gmc1_c;
c->gmc = ff_gmc_c;
if (ARCH_PPC)
ff_mpegvideodsp_init_ppc(c);
if (ARCH_X86)
ff_mpegvideodsp_init_x86(c);
}
|
{
"pile_set_name": "Github"
}
|
using System;
using System.Runtime.Serialization;
namespace KRPC.Client
{
/// <summary>
/// Thrown when a error occurs connecting to a server.
/// </summary>
[Serializable]
public class ConnectionException : Exception
{
/// <summary>
/// Construct an RPCException with no message.
/// </summary>
public ConnectionException ()
{
}
/// <summary>
/// Construct an RPCException with the given message.
/// </summary>
public ConnectionException (string message) : base (message)
{
}
/// <summary>
/// Construct an RPCException with the given message and inner exception.
/// </summary>
public ConnectionException (string message, Exception inner) : base (message, inner)
{
}
/// <summary>
/// Construct an RPCException with the given serialization info and streaming context.
/// </summary>
protected ConnectionException (SerializationInfo info, StreamingContext context) : base (info, context)
{
}
}
}
|
{
"pile_set_name": "Github"
}
|
package cn.edu.jxnu.awesome_campus.ui.education;
import java.util.ArrayList;
import cn.edu.jxnu.awesome_campus.event.EventModel;
import cn.edu.jxnu.awesome_campus.ui.base.TopNavigationFragment;
/**
* Created by MummyDing on 16-2-1.
* GitHub: https://github.com/MummyDing
* Blog: http://blog.csdn.net/mummyding
*/
public class EducationFragment extends TopNavigationFragment{
private static EducationFragment educationFragment;
protected static void addChildFragments() {
if(fragments == null) {
fragments = new ArrayList<>();
}else if(fragments.size()>0){
fragments.clear();
}
fragments.add(new ExamFragment());
fragments.add(new CourseScoreFragment());
}
public static EducationFragment newInstance(){
addChildFragments();
return new EducationFragment();
}
@Override
public void onEventComing(EventModel eventModel) {
}
}
|
{
"pile_set_name": "Github"
}
|
/*
* Globalize Culture zu
*
* http://github.com/jquery/globalize
*
* Copyright Software Freedom Conservancy, Inc.
* Dual licensed under the MIT or GPL Version 2 licenses.
* http://jquery.org/license
*
* This file was generated by the Globalize Culture Generator
* Translation: bugs found in this file need to be fixed in the generator
*/
(function( window, undefined ) {
var Globalize;
if ( typeof require !== "undefined" &&
typeof exports !== "undefined" &&
typeof module !== "undefined" ) {
// Assume CommonJS
Globalize = require( "globalize" );
} else {
// Global variable
Globalize = window.Globalize;
}
Globalize.addCultureInfo( "zu", "default", {
name: "zu",
englishName: "isiZulu",
nativeName: "isiZulu",
language: "zu",
numberFormat: {
percent: {
pattern: ["-%n","%n"]
},
currency: {
pattern: ["$-n","$ n"],
symbol: "R"
}
},
calendars: {
standard: {
days: {
names: ["iSonto","uMsombuluko","uLwesibili","uLwesithathu","uLwesine","uLwesihlanu","uMgqibelo"],
namesAbbr: ["Son.","Mso.","Bi.","Tha.","Ne.","Hla.","Mgq."]
},
months: {
names: ["uMasingana","uNhlolanja","uNdasa","uMbaso","uNhlaba","uNhlangulana","uNtulikazi","uNcwaba","uMandulo","uMfumfu","uLwezi","uZibandlela",""],
namesAbbr: ["Mas.","Nhlo.","Nda.","Mba.","Nhla.","Nhlang.","Ntu.","Ncwa.","Man.","Mfu.","Lwe.","Zib.",""]
},
patterns: {
d: "yyyy/MM/dd",
D: "dd MMMM yyyy",
t: "hh:mm tt",
T: "hh:mm:ss tt",
f: "dd MMMM yyyy hh:mm tt",
F: "dd MMMM yyyy hh:mm:ss tt",
M: "dd MMMM",
Y: "MMMM yyyy"
}
}
}
});
}( this ));
|
{
"pile_set_name": "Github"
}
|
# mashumaro (マシュマロ)
> **mashumaro** is a fast and well tested serialization framework on top of dataclasses.
[](https://travis-ci.org/Fatal1ty/mashumaro)
[](https://coveralls.io/github/Fatal1ty/mashumaro?branch=master)
[](https://pypi.python.org/pypi/mashumaro)
[](https://pypi.python.org/pypi/mashumaro)
[](https://opensource.org/licenses/Apache-2.0)
When using dataclasses, you often need to dump and load objects according to
the described scheme.
This framework not only adds this ability to serialize in different formats,
but also makes **serialization rapidly**.
Table of contents
--------------------------------------------------------------------------------
* [Installation](#installation)
* [Supported serialization formats](#supported-serialization-formats)
* [Supported field types](#supported-field-types)
* [Usage example](#usage-example)
* [How does it work?](#how-does-it-work)
* [API](#api)
* [User defined classes](#user-defined-classes)
Installation
--------------------------------------------------------------------------------
Use pip to install:
```shell
$ pip install mashumaro
```
Supported serialization formats
--------------------------------------------------------------------------------
This framework adds methods for dumping to and loading from the
following formats:
* plain dict
* json
* yaml
* msgpack
Plain dict can be useful when you need to pass a dict object to a
third-party library, such as a client for MongoDB.
Supported field types
--------------------------------------------------------------------------------
There is support for generic types from the standard *typing* module:
* List
* Tuple
* Set
* FrozenSet
* Deque
* Dict
* Mapping
* MutableMapping
* ChainMap
* Sequence
for special primitives from the *typing* module:
* Optional
* Any
for enumerations based on classes from the standard *enum* module:
* Enum
* IntEnum
* Flag
* IntFlag
for common built-in types:
* int
* float
* bool
* str
* bytes
* bytearray
for built-in datetime oriented types:
* datetime
* date
* time
* timedelta
* timezone
for other less popular built-in types:
* uuid.UUID
* decimal.Decimal
* fractions.Fraction
* os.PathLike (loads to Path)
for specific types like *NoneType*, nested dataclasses itself and
even [user defined classes](#user-defined-classes).
Usage example
--------------------------------------------------------------------------------
```python
from enum import Enum
from typing import Set
from dataclasses import dataclass
from mashumaro import DataClassJSONMixin
class PetType(Enum):
CAT = 'CAT'
MOUSE = 'MOUSE'
@dataclass(unsafe_hash=True)
class Pet(DataClassJSONMixin):
name: str
age: int
pet_type: PetType
@dataclass
class Person(DataClassJSONMixin):
first_name: str
second_name: str
age: int
pets: Set[Pet]
tom = Pet(name='Tom', age=5, pet_type=PetType.CAT)
jerry = Pet(name='Jerry', age=3, pet_type=PetType.MOUSE)
john = Person(first_name='John', second_name='Smith', age=18, pets={tom, jerry})
dump = john.to_json()
person = Person.from_json(dump)
# person == john
Pet.from_json('{"name": "Tom", "age": 5, "pet_type": "CAT"}')
# Pet(name='Tom', age=5, pet_type=<PetType.CAT: 'CAT'>)
```
How does it work?
--------------------------------------------------------------------------------
This framework works by taking the schema of the data and generating a
specific parser and builder for exactly that schema.
This is much faster than inspection of field types on every call of parsing or
building at runtime.
API
--------------------------------------------------------------------------------
Mashumaro provides a couple of mixins for each format.
#### `DataClassDictMixin.to_dict(use_bytes: bool, use_enum: bool, use_datetime: bool)`
Make a dictionary from dataclass object based on the dataclass schema provided.
Options include:
```python
use_bytes: False # False - convert bytes/bytearray objects to base64 encoded string, True - keep untouched
use_enum: False # False - convert enum objects to enum values, True - keep untouched
use_datetime: False # False - convert datetime oriented objects to ISO 8601 formatted string, True - keep untouched
```
#### `DataClassDictMixin.from_dict(data: Mapping, use_bytes: bool, use_enum: bool, use_datetime: bool)`
Make a new object from dict object based on the dataclass schema provided.
Options include:
```python
use_bytes: False # False - load bytes/bytearray objects from base64 encoded string, True - keep untouched
use_enum: False # False - load enum objects from enum values, True - keep untouched
use_datetime: False # False - load datetime oriented objects from ISO 8601 formatted string, True - keep untouched
```
#### `DataClassJSONMixin.to_json(encoder: Optional[Encoder], dict_params: Optional[Mapping], **encoder_kwargs)`
Make a JSON formatted string from dataclass object based on the dataclass
schema provided. Options include:
```
encoder # function called for json encoding, defaults to json.dumps
dict_params # dictionary of parameter values passed underhood to `to_dict` function
encoder_kwargs # keyword arguments for encoder function
```
#### `DataClassJSONMixin.from_json(data: Union[str, bytes, bytearray], decoder: Optional[Decoder], dict_params: Optional[Mapping], **decoder_kwargs)`
Make a new object from JSON formatted string based on the dataclass schema
provided. Options include:
```
decoder # function called for json decoding, defaults to json.loads
dict_params # dictionary of parameter values passed underhood to `from_dict` function
decoder_kwargs # keyword arguments for decoder function
```
#### `DataClassMessagePackMixin.to_msgpack(encoder: Optional[Encoder], dict_params: Optional[Mapping], **encoder_kwargs)`
Make a MessagePack formatted bytes object from dataclass object based on the
dataclass schema provided. Options include:
```
encoder # function called for MessagePack encoding, defaults to msgpack.packb
dict_params # dictionary of parameter values passed underhood to `to_dict` function
encoder_kwargs # keyword arguments for encoder function
```
#### `DataClassMessagePackMixin.from_msgpack(data: Union[str, bytes, bytearray], decoder: Optional[Decoder], dict_params: Optional[Mapping], **decoder_kwargs)`
Make a new object from MessagePack formatted data based on the
dataclass schema provided. Options include:
```
decoder # function called for MessagePack decoding, defaults to msgpack.unpackb
dict_params # dictionary of parameter values passed underhood to `from_dict` function
decoder_kwargs # keyword arguments for decoder function
```
#### `DataClassYAMLMixin.to_yaml(encoder: Optional[Encoder], dict_params: Optional[Mapping], **encoder_kwargs)`
Make an YAML formatted bytes object from dataclass object based on the
dataclass schema provided. Options include:
```
encoder # function called for YAML encoding, defaults to yaml.dump
dict_params # dictionary of parameter values passed underhood to `to_dict` function
encoder_kwargs # keyword arguments for encoder function
```
#### `DataClassYAMLMixin.from_yaml(data: Union[str, bytes], decoder: Optional[Decoder], dict_params: Optional[Mapping], **decoder_kwargs)`
Make a new object from YAML formatted data based on the
dataclass schema provided. Options include:
```
decoder # function called for YAML decoding, defaults to yaml.safe_load
dict_params # dictionary of parameter values passed underhood to `from_dict` function
decoder_kwargs # keyword arguments for decoder function
```
User defined classes
--------------------------------------------------------------------------------
You can define and use custom classes with *mashumaro*. There are two options
for customization. The first one is useful when you already have the separate
custom class and you want to serialize instances of it with *mashumaro*.
All what you need is to implement *SerializableType* interface:
```python
from typing import Dict
from datetime import datetime
from dataclasses import dataclass
from mashumaro import DataClassDictMixin
from mashumaro.types import SerializableType
class DateTime(datetime, SerializableType):
def _serialize(self) -> Dict[str, int]:
return {
"year": self.year,
"month": self.month,
"day": self.day,
"hour": self.hour,
"minute": self.minute,
"second": self.second,
}
@classmethod
def _deserialize(cls, value: Dict[str, int]) -> 'DateTime':
return DateTime(
year=value['year'],
month=value['month'],
day=value['day'],
hour=value['hour'],
minute=value['minute'],
second=value['second'],
)
@dataclass
class Holiday(DataClassDictMixin):
when: DateTime = DateTime.now()
new_year = Holiday(when=DateTime(2019, 1, 1, 12))
dictionary = new_year.to_dict()
# {'x': {'year': 2019, 'month': 1, 'day': 1, 'hour': 0, 'minute': 0, 'second': 0}}
assert Holiday.from_dict(dictionary) == new_year
```
The second option is useful when you want to change the serialization behaviour
for a class depending on some defined parameters. For this case you can create
the special class implementing *SerializationStrategy* interface:
```python
from datetime import datetime
from dataclasses import dataclass
from mashumaro import DataClassDictMixin
from mashumaro.types import SerializationStrategy
class FormattedDateTime(SerializationStrategy):
def __init__(self, fmt):
self.fmt = fmt
def _serialize(self, value: datetime) -> str:
return value.strftime(self.fmt)
def _deserialize(self, value: str) -> datetime:
return datetime.strptime(value, self.fmt)
@dataclass
class DateTimeFormats(DataClassDictMixin):
short: FormattedDateTime(fmt='%d%m%Y%H%M%S') = datetime.now()
verbose: FormattedDateTime(fmt='%A %B %d, %Y, %H:%M:%S') = datetime.now()
formats = DateTimeFormats(
short=datetime(2019, 1, 1, 12),
verbose=datetime(2019, 1, 1, 12),
)
dictionary = formats.to_dict()
# {'short': '01012019120000', 'verbose': 'Tuesday January 01, 2019, 12:00:00'}
assert DateTimeFormats.from_dict(dictionary) == formats
```
TODO
--------------------------------------------------------------------------------
* write benchmarks
* add optional validation
* add Union support (try to match types on each call)
* write custom useful types such as URL, Email etc
* write documentation
|
{
"pile_set_name": "Github"
}
|
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the Source EULA. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { DeploymentOptions } from '../../../mssql/src/mssql';
export interface IPublishSettings {
databaseName: string;
connectionUri: string;
upgradeExisting: boolean;
sqlCmdVariables?: Record<string, string>;
deploymentOptions?: DeploymentOptions;
}
export interface IGenerateScriptSettings {
databaseName: string;
connectionUri: string;
sqlCmdVariables?: Record<string, string>;
deploymentOptions?: DeploymentOptions;
}
|
{
"pile_set_name": "Github"
}
|
// main resolvers
exports.Query = {
me(rootQuery, args, context) {
return context.models.User.me()
}
}
// type resolvers
exports.User = {
fullName(user) {
return `${user.firstName} ${user.lastName}`
}
}
|
{
"pile_set_name": "Github"
}
|
#-- encoding: UTF-8
#-- copyright
# ChiliProject is a project management system.
#
# Copyright (C) 2010-2013 the ChiliProject Team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# See doc/COPYRIGHT.rdoc for more details.
#++
require 'cgi'
module Redmine
module Scm
module Adapters
class CommandFailed < StandardError #:nodoc:
end
class AbstractAdapter #:nodoc:
class << self
def client_command
""
end
# Returns the version of the scm client
# Eg: [1, 5, 0] or [] if unknown
def client_version
[]
end
# Returns the version string of the scm client
# Eg: '1.5.0' or 'Unknown version' if unknown
def client_version_string
v = client_version || 'Unknown version'
v.is_a?(Array) ? v.join('.') : v.to_s
end
# Returns true if the current client version is above
# or equals the given one
# If option is :unknown is set to true, it will return
# true if the client version is unknown
def client_version_above?(v, options={})
((client_version <=> v) >= 0) || (client_version.empty? && options[:unknown])
end
def client_available
true
end
def shell_quote(str)
if Redmine::Platform.mswin?
'"' + str.gsub(/"/, '\\"') + '"'
else
"'" + str.gsub(/'/, "'\"'\"'") + "'"
end
end
end
def initialize(url, root_url=nil, login=nil, password=nil,
path_encoding=nil)
@url = url
@login = login if login && !login.empty?
@password = (password || "") if @login
@root_url = root_url.blank? ? retrieve_root_url : root_url
end
def adapter_name
'Abstract'
end
def supports_cat?
true
end
def supports_annotate?
respond_to?('annotate')
end
def root_url
@root_url
end
def url
@url
end
# get info about the svn repository
def info
return nil
end
# Returns the entry identified by path and revision identifier
# or nil if entry doesn't exist in the repository
def entry(path=nil, identifier=nil)
parts = path.to_s.split(%r{[\/\\]}).select {|n| !n.blank?}
search_path = parts[0..-2].join('/')
search_name = parts[-1]
if search_path.blank? && search_name.blank?
# Root entry
Entry.new(:path => '', :kind => 'dir')
else
# Search for the entry in the parent directory
es = entries(search_path, identifier)
es ? es.detect {|e| e.name == search_name} : nil
end
end
# Returns an Entries collection
# or nil if the given path doesn't exist in the repository
def entries(path=nil, identifier=nil)
return nil
end
def branches
return nil
end
def tags
return nil
end
def default_branch
return nil
end
def properties(path, identifier=nil)
return nil
end
def revisions(path=nil, identifier_from=nil, identifier_to=nil, options={})
return nil
end
def diff(path, identifier_from, identifier_to=nil)
return nil
end
def cat(path, identifier=nil)
return nil
end
def with_leading_slash(path)
path ||= ''
(path[0,1]!="/") ? "/#{path}" : path
end
def with_trailling_slash(path)
path ||= ''
(path[-1,1] == "/") ? path : "#{path}/"
end
def without_leading_slash(path)
path ||= ''
path.gsub(%r{^/+}, '')
end
def without_trailling_slash(path)
path ||= ''
(path[-1,1] == "/") ? path[0..-2] : path
end
def shell_quote(str)
self.class.shell_quote(str)
end
private
def retrieve_root_url
info = self.info
info ? info.root_url : nil
end
def target(path)
path ||= ''
base = path.match(/^\//) ? root_url : url
shell_quote("#{base}/#{path}".gsub(/[?<>\*]/, ''))
end
def logger
self.class.logger
end
def shellout(cmd, &block)
self.class.shellout(cmd, &block)
end
def self.logger
RAILS_DEFAULT_LOGGER
end
def self.shellout(cmd, &block)
logger.debug "Shelling out: #{strip_credential(cmd)}" if logger && logger.debug?
if Rails.env == 'development'
# Capture stderr when running in dev environment
cmd = "#{cmd} 2>>#{RAILS_ROOT}/log/scm.stderr.log"
end
begin
if RUBY_VERSION < '1.9'
mode = "r+"
else
mode = "r+:ASCII-8BIT"
end
IO.popen(cmd, mode) do |io|
io.close_write
block.call(io) if block_given?
end
rescue Errno::ENOENT => e
msg = strip_credential(e.message)
# The command failed, log it and re-raise
logger.error("SCM command failed, make sure that your SCM binary (eg. svn) is in PATH (#{ENV['PATH']}): #{strip_credential(cmd)}\n with: #{msg}")
raise CommandFailed.new(msg)
end
end
# Hides username/password in a given command
def self.strip_credential(cmd)
q = (Redmine::Platform.mswin? ? '"' : "'")
cmd.to_s.gsub(/(\-\-(password|username))\s+(#{q}[^#{q}]+#{q}|[^#{q}]\S+)/, '\\1 xxxx')
end
def strip_credential(cmd)
self.class.strip_credential(cmd)
end
def scm_iconv(to, from, str)
return nil if str.nil?
return str if to == from
begin
Iconv.conv(to, from, str)
rescue Iconv::Failure => err
logger.error("failed to convert from #{from} to #{to}. #{err}")
nil
end
end
end
class Entries < Array
def sort_by_name
sort {|x,y|
if x.kind == y.kind
x.name.to_s <=> y.name.to_s
else
x.kind <=> y.kind
end
}
end
def revisions
revisions ||= Revisions.new(collect{|entry| entry.lastrev}.compact)
end
# Required since Ruby 1.9.3 as the core compact always returns an
# instance of Array. This method follows the spec for Array#compact
def compact
ary = self.dup
ary.compact!
ary
end
end
class Info
attr_accessor :root_url, :lastrev
def initialize(attributes={})
self.root_url = attributes[:root_url] if attributes[:root_url]
self.lastrev = attributes[:lastrev]
end
end
class Entry
attr_accessor :name, :path, :kind, :size, :lastrev
def initialize(attributes={})
self.name = attributes[:name] if attributes[:name]
self.path = attributes[:path] if attributes[:path]
self.kind = attributes[:kind] if attributes[:kind]
self.size = attributes[:size].to_i if attributes[:size]
self.lastrev = attributes[:lastrev]
end
def is_file?
'file' == self.kind
end
def is_dir?
'dir' == self.kind
end
def is_text?
Redmine::MimeType.is_type?('text', name)
end
end
class Revisions < Array
def latest
sort {|x,y|
unless x.time.nil? or y.time.nil?
x.time <=> y.time
else
0
end
}.last
end
end
class Revision
attr_accessor :scmid, :name, :author, :time, :message, :paths, :revision, :branch
attr_writer :identifier
def initialize(attributes={})
self.identifier = attributes[:identifier]
self.scmid = attributes[:scmid]
self.name = attributes[:name] || self.identifier
self.author = attributes[:author]
self.time = attributes[:time]
self.message = attributes[:message] || ""
self.paths = attributes[:paths]
self.revision = attributes[:revision]
self.branch = attributes[:branch]
end
# Returns the identifier of this revision; see also Changeset model
def identifier
(@identifier || revision).to_s
end
# Returns the readable identifier.
def format_identifier
identifier
end
end
class Annotate
attr_reader :lines, :revisions
def initialize
@lines = []
@revisions = []
end
def add_line(line, revision)
@lines << line
@revisions << revision
end
def content
content = lines.join("\n")
end
def empty?
lines.empty?
end
end
end
end
end
|
{
"pile_set_name": "Github"
}
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PValue, PCollection: one node of a dataflow graph.
A node of a dataflow processing graph is a PValue. Currently, there is only
one type: PCollection (a potentially very large set of arbitrary values).
Once created, a PValue belongs to a pipeline and has an associated
transform (of type PTransform), which describes how the value will be
produced when the pipeline gets executed.
"""
# pytype: skip-file
from __future__ import absolute_import
import collections
import itertools
from builtins import hex
from builtins import object
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Generic
from typing import Iterator
from typing import Optional
from typing import Sequence
from typing import TypeVar
from typing import Union
from past.builtins import unicode
from apache_beam import coders
from apache_beam import typehints
from apache_beam.internal import pickler
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
if TYPE_CHECKING:
from apache_beam.transforms import sideinputs
from apache_beam.transforms.core import ParDo
from apache_beam.transforms.core import Windowing
from apache_beam.pipeline import AppliedPTransform
from apache_beam.pipeline import Pipeline
from apache_beam.runners.pipeline_context import PipelineContext
__all__ = [
'PCollection',
'TaggedOutput',
'AsSingleton',
'AsIter',
'AsList',
'AsDict',
'EmptySideInput',
'Row',
]
T = TypeVar('T')
class PValue(object):
"""Base class for PCollection.
Dataflow users should not construct PValue objects directly in their
pipelines.
A PValue has the following main characteristics:
(1) Belongs to a pipeline. Added during object initialization.
(2) Has a transform that can compute the value if executed.
(3) Has a value which is meaningful if the transform was executed.
"""
def __init__(self,
pipeline, # type: Pipeline
tag=None, # type: Optional[str]
element_type=None, # type: Optional[Union[type,typehints.TypeConstraint]]
windowing=None, # type: Optional[Windowing]
is_bounded=True,
):
"""Initializes a PValue with all arguments hidden behind keyword arguments.
Args:
pipeline: Pipeline object for this PValue.
tag: Tag of this PValue.
element_type: The type of this PValue.
"""
self.pipeline = pipeline
self.tag = tag
self.element_type = element_type
# The AppliedPTransform instance for the application of the PTransform
# generating this PValue. The field gets initialized when a transform
# gets applied.
self.producer = None # type: Optional[AppliedPTransform]
self.is_bounded = is_bounded
if windowing:
self._windowing = windowing
def __str__(self):
return self._str_internal()
def __repr__(self):
return '<%s at %s>' % (self._str_internal(), hex(id(self)))
def _str_internal(self):
return "%s[%s.%s]" % (
self.__class__.__name__,
self.producer.full_label if self.producer else None,
self.tag)
def apply(self, *args, **kwargs):
"""Applies a transform or callable to a PValue.
Args:
*args: positional arguments.
**kwargs: keyword arguments.
The method will insert the pvalue as the next argument following an
optional first label and a transform/callable object. It will call the
pipeline.apply() method with this modified argument list.
"""
arglist = list(args)
arglist.insert(1, self)
return self.pipeline.apply(*arglist, **kwargs)
def __or__(self, ptransform):
return self.pipeline.apply(ptransform, self)
class PCollection(PValue, Generic[T]):
"""A multiple values (potentially huge) container.
Dataflow users should not construct PCollection objects directly in their
pipelines.
"""
def __eq__(self, other):
if isinstance(other, PCollection):
return self.tag == other.tag and self.producer == other.producer
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((self.tag, self.producer))
@property
def windowing(self):
# type: () -> Windowing
if not hasattr(self, '_windowing'):
assert self.producer is not None and self.producer.transform is not None
self._windowing = self.producer.transform.get_windowing(
self.producer.inputs)
return self._windowing
def __reduce_ex__(self, unused_version):
# Pickling a PCollection is almost always the wrong thing to do, but we
# can't prohibit it as it often gets implicitly picked up (e.g. as part
# of a closure).
return _InvalidUnpickledPCollection, ()
@staticmethod
def from_(pcoll):
# type: (PValue) -> PCollection
"""Create a PCollection, using another PCollection as a starting point.
Transfers relevant attributes.
"""
return PCollection(pcoll.pipeline, is_bounded=pcoll.is_bounded)
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.PCollection
return beam_runner_api_pb2.PCollection(
unique_name=self._unique_name(),
coder_id=context.coder_id_from_element_type(self.element_type),
is_bounded=beam_runner_api_pb2.IsBounded.BOUNDED
if self.is_bounded else beam_runner_api_pb2.IsBounded.UNBOUNDED,
windowing_strategy_id=context.windowing_strategies.get_id(
self.windowing))
def _unique_name(self):
# type: () -> str
if self.producer:
return '%d%s.%s' % (
len(self.producer.full_label), self.producer.full_label, self.tag)
else:
return 'PCollection%s' % id(self)
@staticmethod
def from_runner_api(proto, context):
# type: (beam_runner_api_pb2.PCollection, PipelineContext) -> PCollection
# Producer and tag will be filled in later, the key point is that the same
# object is returned for the same pcollection id.
# We pass None for the PCollection's Pipeline to avoid a cycle during
# deserialization. It will be populated soon after this call, in
# Pipeline.from_runner_api(). This brief period is the only time that
# PCollection.pipeline is allowed to be None.
return PCollection(
None, # type: ignore[arg-type]
element_type=context.element_type_from_coder_id(proto.coder_id),
windowing=context.windowing_strategies.get_by_id(
proto.windowing_strategy_id),
is_bounded=proto.is_bounded == beam_runner_api_pb2.IsBounded.BOUNDED)
class _InvalidUnpickledPCollection(object):
pass
class PBegin(PValue):
"""A pipeline begin marker used as input to create/read transforms.
The class is used internally to represent inputs to Create and Read
transforms. This allows us to have transforms that uniformly take PValue(s)
as inputs.
"""
pass
class PDone(PValue):
"""PDone is the output of a transform that has a trivial result such as Write.
"""
pass
class DoOutputsTuple(object):
"""An object grouping the multiple outputs of a ParDo or FlatMap transform."""
def __init__(self,
pipeline, # type: Pipeline
transform, # type: ParDo
tags, # type: Sequence[str]
main_tag # type: Optional[str]
):
self._pipeline = pipeline
self._tags = tags
self._main_tag = main_tag
self._transform = transform
# The ApplyPTransform instance for the application of the multi FlatMap
# generating this value. The field gets initialized when a transform
# gets applied.
self.producer = None # type: Optional[AppliedPTransform]
# Dictionary of PCollections already associated with tags.
self._pcolls = {} # type: Dict[Optional[str], PCollection]
def __str__(self):
return '<%s>' % self._str_internal()
def __repr__(self):
return '<%s at %s>' % (self._str_internal(), hex(id(self)))
def _str_internal(self):
return '%s main_tag=%s tags=%s transform=%s' % (
self.__class__.__name__, self._main_tag, self._tags, self._transform)
def __iter__(self):
# type: () -> Iterator[PCollection]
"""Iterates over tags returning for each call a (tag, pcollection) pair."""
if self._main_tag is not None:
yield self[self._main_tag]
for tag in self._tags:
yield self[tag]
def __getattr__(self, tag):
# type: (str) -> PCollection
# Special methods which may be accessed before the object is
# fully constructed (e.g. in unpickling).
if tag[:2] == tag[-2:] == '__':
return object.__getattr__(self, tag) # type: ignore
return self[tag]
def __getitem__(self, tag):
# type: (Union[int, str, None]) -> PCollection
# Accept int tags so that we can look at Partition tags with the
# same ints that we used in the partition function.
# TODO(gildea): Consider requiring string-based tags everywhere.
# This will require a partition function that does not return ints.
if isinstance(tag, int):
tag = str(tag)
if tag == self._main_tag:
tag = None
elif self._tags and tag not in self._tags:
raise ValueError(
"Tag '%s' is neither the main tag '%s' "
"nor any of the tags %s" % (tag, self._main_tag, self._tags))
# Check if we accessed this tag before.
if tag in self._pcolls:
return self._pcolls[tag]
assert self.producer is not None
if tag is not None:
self._transform.output_tags.add(tag)
pcoll = PCollection(self._pipeline, tag=tag, element_type=typehints.Any)
# Transfer the producer from the DoOutputsTuple to the resulting
# PCollection.
pcoll.producer = self.producer.parts[0]
# Add this as an output to both the inner ParDo and the outer _MultiParDo
# PTransforms.
if tag not in self.producer.parts[0].outputs:
self.producer.parts[0].add_output(pcoll, tag)
self.producer.add_output(pcoll, tag)
else:
# Main output is output of inner ParDo.
pval = self.producer.parts[0].outputs[None]
assert isinstance(pval,
PCollection), ("DoOutputsTuple should follow a ParDo.")
pcoll = pval
self._pcolls[tag] = pcoll
return pcoll
class TaggedOutput(object):
"""An object representing a tagged value.
ParDo, Map, and FlatMap transforms can emit values on multiple outputs which
are distinguished by string tags. The DoFn will return plain values
if it wants to emit on the main output and TaggedOutput objects
if it wants to emit a value on a specific tagged output.
"""
def __init__(self, tag, value):
# type: (str, Any) -> None
if not isinstance(tag, (str, unicode)):
raise TypeError(
'Attempting to create a TaggedOutput with non-string tag %s' %
(tag, ))
self.tag = tag
self.value = value
class AsSideInput(object):
"""Marker specifying that a PCollection will be used as a side input.
When a PCollection is supplied as a side input to a PTransform, it is
necessary to indicate how the PCollection should be made available
as a PTransform side argument (e.g. in the form of an iterable, mapping,
or single value). This class is the superclass of all the various
options, and should not be instantiated directly. (See instead AsSingleton,
AsIter, etc.)
"""
def __init__(self, pcoll):
# type: (PCollection) -> None
from apache_beam.transforms import sideinputs
self.pvalue = pcoll
self._window_mapping_fn = sideinputs.default_window_mapping_fn(
pcoll.windowing.windowfn)
def _view_options(self):
"""Internal options corresponding to specific view.
Intended for internal use by runner implementations.
Returns:
Tuple of options for the given view.
"""
return {
'window_mapping_fn': self._window_mapping_fn,
'coder': self._windowed_coder(),
}
@property
def element_type(self):
return typehints.Any
def _windowed_coder(self):
return coders.WindowedValueCoder(
coders.registry.get_coder(
self.pvalue.element_type or self.element_type),
self.pvalue.windowing.windowfn.get_window_coder())
# TODO(robertwb): Get rid of _from_runtime_iterable and _view_options
# in favor of _side_input_data().
def _side_input_data(self):
# type: () -> SideInputData
view_options = self._view_options()
from_runtime_iterable = type(self)._from_runtime_iterable
return SideInputData(
common_urns.side_inputs.ITERABLE.urn,
self._window_mapping_fn,
lambda iterable: from_runtime_iterable(iterable, view_options))
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.SideInput
return self._side_input_data().to_runner_api(context)
@staticmethod
def from_runner_api(proto, # type: beam_runner_api_pb2.SideInput
context # type: PipelineContext
):
# type: (...) -> _UnpickledSideInput
return _UnpickledSideInput(SideInputData.from_runner_api(proto, context))
@staticmethod
def _from_runtime_iterable(it, options):
raise NotImplementedError
def requires_keyed_input(self):
return False
class _UnpickledSideInput(AsSideInput):
def __init__(self, side_input_data):
# type: (SideInputData) -> None
self._data = side_input_data
self._window_mapping_fn = side_input_data.window_mapping_fn
@staticmethod
def _from_runtime_iterable(it, options):
return options['data'].view_fn(it)
def _view_options(self):
return {
'data': self._data,
# For non-fn-api runners.
'window_mapping_fn': self._data.window_mapping_fn,
'coder': self._windowed_coder(),
}
def _side_input_data(self):
return self._data
class SideInputData(object):
"""All of the data about a side input except for the bound PCollection."""
def __init__(self,
access_pattern, # type: str
window_mapping_fn, # type: sideinputs.WindowMappingFn
view_fn
):
self.access_pattern = access_pattern
self.window_mapping_fn = window_mapping_fn
self.view_fn = view_fn
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.SideInput
return beam_runner_api_pb2.SideInput(
access_pattern=beam_runner_api_pb2.FunctionSpec(
urn=self.access_pattern),
view_fn=beam_runner_api_pb2.FunctionSpec(
urn=python_urns.PICKLED_VIEWFN,
payload=pickler.dumps(self.view_fn)),
window_mapping_fn=beam_runner_api_pb2.FunctionSpec(
urn=python_urns.PICKLED_WINDOW_MAPPING_FN,
payload=pickler.dumps(self.window_mapping_fn)))
@staticmethod
def from_runner_api(proto, unused_context):
# type: (beam_runner_api_pb2.SideInput, PipelineContext) -> SideInputData
assert proto.view_fn.urn == python_urns.PICKLED_VIEWFN
assert (
proto.window_mapping_fn.urn == python_urns.PICKLED_WINDOW_MAPPING_FN)
return SideInputData(
proto.access_pattern.urn,
pickler.loads(proto.window_mapping_fn.payload),
pickler.loads(proto.view_fn.payload))
class AsSingleton(AsSideInput):
"""Marker specifying that an entire PCollection is to be used as a side input.
When a PCollection is supplied as a side input to a PTransform, it is
necessary to indicate whether the entire PCollection should be made available
as a PTransform side argument (in the form of an iterable), or whether just
one value should be pulled from the PCollection and supplied as the side
argument (as an ordinary value).
Wrapping a PCollection side input argument to a PTransform in this container
(e.g., data.apply('label', MyPTransform(), AsSingleton(my_side_input) )
selects the latter behavior.
The input PCollection must contain exactly one value per window, unless a
default is given, in which case it may be empty.
"""
_NO_DEFAULT = object()
def __init__(self, pcoll, default_value=_NO_DEFAULT):
# type: (PCollection, Any) -> None
super(AsSingleton, self).__init__(pcoll)
self.default_value = default_value
def __repr__(self):
return 'AsSingleton(%s)' % self.pvalue
def _view_options(self):
base = super(AsSingleton, self)._view_options()
if self.default_value != AsSingleton._NO_DEFAULT:
return dict(base, default=self.default_value)
return base
@staticmethod
def _from_runtime_iterable(it, options):
head = list(itertools.islice(it, 2))
if not head:
return options.get('default', EmptySideInput())
elif len(head) == 1:
return head[0]
raise ValueError(
'PCollection of size %d with more than one element accessed as a '
'singleton view. First two elements encountered are "%s", "%s".' %
(len(head), str(head[0]), str(head[1])))
@property
def element_type(self):
return self.pvalue.element_type
class AsIter(AsSideInput):
"""Marker specifying that an entire PCollection is to be used as a side input.
When a PCollection is supplied as a side input to a PTransform, it is
necessary to indicate whether the entire PCollection should be made available
as a PTransform side argument (in the form of an iterable), or whether just
one value should be pulled from the PCollection and supplied as the side
argument (as an ordinary value).
Wrapping a PCollection side input argument to a PTransform in this container
(e.g., data.apply('label', MyPTransform(), AsIter(my_side_input) ) selects the
former behavor.
"""
def __repr__(self):
return 'AsIter(%s)' % self.pvalue
@staticmethod
def _from_runtime_iterable(it, options):
return it
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.ITERABLE.urn,
self._window_mapping_fn,
lambda iterable: iterable)
@property
def element_type(self):
return typehints.Iterable[self.pvalue.element_type]
class AsList(AsSideInput):
"""Marker specifying that an entire PCollection is to be used as a side input.
Intended for use in side-argument specification---the same places where
AsSingleton and AsIter are used, but forces materialization of this
PCollection as a list.
Args:
pcoll: Input pcollection.
Returns:
An AsList-wrapper around a PCollection whose one element is a list
containing all elements in pcoll.
"""
@staticmethod
def _from_runtime_iterable(it, options):
return list(it)
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.ITERABLE.urn, self._window_mapping_fn, list)
class AsDict(AsSideInput):
"""Marker specifying a PCollection to be used as an indexable side input.
Intended for use in side-argument specification---the same places where
AsSingleton and AsIter are used, but returns an interface that allows
key lookup.
Args:
pcoll: Input pcollection. All elements should be key-value pairs (i.e.
2-tuples) with unique keys.
Returns:
An AsDict-wrapper around a PCollection whose one element is a dict with
entries for uniquely-keyed pairs in pcoll.
"""
@staticmethod
def _from_runtime_iterable(it, options):
return dict(it)
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.ITERABLE.urn, self._window_mapping_fn, dict)
class AsMultiMap(AsSideInput):
"""Marker specifying a PCollection to be used as an indexable side input.
Similar to AsDict, but multiple values may be associated per key, and
the keys are fetched lazily rather than all having to fit in memory.
Intended for use in side-argument specification---the same places where
AsSingleton and AsIter are used, but returns an interface that allows
key lookup.
"""
@staticmethod
def _from_runtime_iterable(it, options):
# Legacy implementation.
result = collections.defaultdict(list)
for k, v in it:
result[k].append(v)
return result
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.MULTIMAP.urn,
self._window_mapping_fn,
lambda x: x)
def requires_keyed_input(self):
return True
class EmptySideInput(object):
"""Value indicating when a singleton side input was empty.
If a PCollection was furnished as a singleton side input to a PTransform, and
that PCollection was empty, then this value is supplied to the DoFn in the
place where a value from a non-empty PCollection would have gone. This alerts
the DoFn that the side input PCollection was empty. Users may want to check
whether side input values are EmptySideInput, but they will very likely never
want to create new instances of this class themselves.
"""
pass
class Row(object):
"""A dynamic schema'd row object.
This objects attributes are initialized from the keywords passed into its
constructor, e.g. Row(x=3, y=4) will create a Row with two attributes x and y.
More importantly, when a Row object is returned from a `Map`, `FlatMap`, or
`DoFn` type inference is able to deduce the schema of the resulting
PCollection, e.g.
pc | beam.Map(lambda x: Row(x=x, y=0.5 * x))
when applied to a PCollection of ints will produce a PCollection with schema
`(x=int, y=float)`.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def as_dict(self):
return dict(self.__dict__)
def __iter__(self):
for _, value in sorted(self.__dict__.items()):
yield value
def __repr__(self):
return 'Row(%s)' % ', '.join(
'%s=%r' % kv for kv in sorted(self.__dict__.items()))
def __hash__(self):
return hash(type(sorted(self.__dict__.items())))
def __eq__(self, other):
return type(self) == type(other) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
def __reduce__(self):
return _make_Row, tuple(sorted(self.__dict__.items()))
def _make_Row(*items):
return Row(**dict(items))
|
{
"pile_set_name": "Github"
}
|
# -*- coding: utf-8 -*-
import torch
from itertools import accumulate
from onmt.constants import SubwordMarker
def make_batch_align_matrix(index_tensor, size=None, normalize=False):
"""
Convert a sparse index_tensor into a batch of alignment matrix,
with row normalize to the sum of 1 if set normalize.
Args:
index_tensor (LongTensor): ``(N, 3)`` of [batch_id, tgt_id, src_id]
size (List[int]): Size of the sparse tensor.
normalize (bool): if normalize the 2nd dim of resulting tensor.
"""
n_fill, device = index_tensor.size(0), index_tensor.device
value_tensor = torch.ones([n_fill], dtype=torch.float)
dense_tensor = torch.sparse_coo_tensor(
index_tensor.t(), value_tensor, size=size, device=device).to_dense()
if normalize:
row_sum = dense_tensor.sum(-1, keepdim=True) # sum by row(tgt)
# threshold on 1 to avoid div by 0
torch.nn.functional.threshold(row_sum, 1, 1, inplace=True)
dense_tensor.div_(row_sum)
return dense_tensor
def extract_alignment(align_matrix, tgt_mask, src_lens, n_best):
"""
Extract a batched align_matrix into its src indice alignment lists,
with tgt_mask to filter out invalid tgt position as EOS/PAD.
BOS already excluded from tgt_mask in order to match prediction.
Args:
align_matrix (Tensor): ``(B, tgt_len, src_len)``,
attention head normalized by Softmax(dim=-1)
tgt_mask (BoolTensor): ``(B, tgt_len)``, True for EOS, PAD.
src_lens (LongTensor): ``(B,)``, containing valid src length
n_best (int): a value indicating number of parallel translation.
* B: denote flattened batch as B = batch_size * n_best.
Returns:
alignments (List[List[FloatTensor|None]]): ``(batch_size, n_best,)``,
containing valid alignment matrix (or None if blank prediction)
for each translation.
"""
batch_size_n_best = align_matrix.size(0)
assert batch_size_n_best % n_best == 0
alignments = [[] for _ in range(batch_size_n_best // n_best)]
# treat alignment matrix one by one as each have different lengths
for i, (am_b, tgt_mask_b, src_len) in enumerate(
zip(align_matrix, tgt_mask, src_lens)):
valid_tgt = ~tgt_mask_b
valid_tgt_len = valid_tgt.sum()
if valid_tgt_len == 0:
# No alignment if not exist valid tgt token
valid_alignment = None
else:
# get valid alignment (sub-matrix from full paded aligment matrix)
am_valid_tgt = am_b.masked_select(valid_tgt.unsqueeze(-1)) \
.view(valid_tgt_len, -1)
valid_alignment = am_valid_tgt[:, :src_len] # only keep valid src
alignments[i // n_best].append(valid_alignment)
return alignments
def build_align_pharaoh(valid_alignment):
"""Convert valid alignment matrix to i-j (from 0) Pharaoh format pairs,
or empty list if it's None.
"""
align_pairs = []
if isinstance(valid_alignment, torch.Tensor):
tgt_align_src_id = valid_alignment.argmax(dim=-1)
for tgt_id, src_id in enumerate(tgt_align_src_id.tolist()):
align_pairs.append(str(src_id) + "-" + str(tgt_id))
align_pairs.sort(key=lambda x: int(x.split('-')[-1])) # sort by tgt_id
align_pairs.sort(key=lambda x: int(x.split('-')[0])) # sort by src_id
return align_pairs
def to_word_align(src, tgt, subword_align, m_src='joiner', m_tgt='joiner'):
"""Convert subword alignment to word alignment.
Args:
src (string): tokenized sentence in source language.
tgt (string): tokenized sentence in target language.
subword_align (string): align_pharaoh correspond to src-tgt.
m_src (string): tokenization mode used in src,
can be ["joiner", "spacer"].
m_tgt (string): tokenization mode used in tgt,
can be ["joiner", "spacer"].
Returns:
word_align (string): converted alignments correspand to
detokenized src-tgt.
"""
assert m_src in ["joiner", "spacer"], "Invalid value for argument m_src!"
assert m_tgt in ["joiner", "spacer"], "Invalid value for argument m_tgt!"
src, tgt = src.strip().split(), tgt.strip().split()
subword_align = {(int(a), int(b)) for a, b in (x.split("-")
for x in subword_align.split())}
src_map = (subword_map_by_spacer(src) if m_src == 'spacer'
else subword_map_by_joiner(src))
tgt_map = (subword_map_by_spacer(src) if m_tgt == 'spacer'
else subword_map_by_joiner(src))
word_align = list({"{}-{}".format(src_map[a], tgt_map[b])
for a, b in subword_align})
word_align.sort(key=lambda x: int(x.split('-')[-1])) # sort by tgt_id
word_align.sort(key=lambda x: int(x.split('-')[0])) # sort by src_id
return " ".join(word_align)
def subword_map_by_joiner(subwords, marker=SubwordMarker.JOINER):
"""Return word id for each subword token (annotate by joiner)."""
flags = [0] * len(subwords)
for i, tok in enumerate(subwords):
if tok.endswith(marker):
flags[i] = 1
if tok.startswith(marker):
assert i >= 1 and flags[i-1] != 1, \
"Sentence `{}` not correct!".format(" ".join(subwords))
flags[i-1] = 1
marker_acc = list(accumulate([0] + flags[:-1]))
word_group = [(i - maker_sofar) for i, maker_sofar
in enumerate(marker_acc)]
return word_group
def subword_map_by_spacer(subwords, marker=SubwordMarker.SPACER):
"""Return word id for each subword token (annotate by spacer)."""
word_group = list(accumulate([int(marker in x) for x in subwords]))
if word_group[0] == 1: # when dummy prefix is set
word_group = [item - 1 for item in word_group]
return word_group
|
{
"pile_set_name": "Github"
}
|
# Project-wide Gradle settings.
# IDE (e.g. Android Studio) users:
# Gradle settings configured through the IDE *will override*
# any settings specified in this file.
# For more details on how to configure your build environment visit
# http://www.gradle.org/docs/current/userguide/build_environment.html
# Specifies the JVM arguments used for the daemon process.
# The setting is particularly useful for tweaking memory settings.
org.gradle.jvmargs=-Xmx1536m
# When configured, Gradle will run in incubating parallel mode.
# This option should only be used with decoupled projects. More details, visit
# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
# org.gradle.parallel=true
|
{
"pile_set_name": "Github"
}
|
(function() {
// Stateline puts default collections of Landline maps together for you
// Requires jQuery and Raphael
var MapCanvas = Landline.Stateline = function(container, locality) {
this.paper = {};
this.events = {};
this.attrs = {};
this.lookup = {};
this.locality = locality;
this.container = $(container);
this.container.css("position", "relative");
this.container.height(this.container.width() * 0.70);
this.setupHtml();
var that = this;
$(window).resize(function() {
that.container.height(that.container.width() * 0.70);
that.setupHtml();
});
};
MapCanvas.CONTAINERS = {
"contiguous" : {el : "landline_contiguous"},
"alaska" : {el : "landline_alaska"},
"hawaii" : {el : "landline_hawaii"},
"dc" : {el : "landline_dc"}
};
MapCanvas.prototype.on = function(evt, cb) {
this.events[evt] = cb;
};
MapCanvas.prototype.style = function(fips, key, val) {
this.attrs[fips] = (this.attrs[fips] || {});
this.attrs[fips][key] = val;
};
MapCanvas.prototype.reLayout = function() {
for (container in MapCanvas.CONTAINERS) {
for (fips in this.attrs) {
var path = this.lookup[fips];
if (path) {
_(this.attrs[fips]).each(function(v, k) {
path.attr(k, v);
});
}
}
}
};
MapCanvas.prototype.setupHtml = function() {
var that = this;
var containers = MapCanvas.CONTAINERS;
containers["contiguous"] = _.extend(containers["contiguous"], {
width : this.container.width(),
height : this.container.height() * 0.85,
top : "0%",
left : 0.0
});
containers["alaska"] = _.extend(containers["alaska"], {
width : this.container.width() * 0.25,
height : this.container.height() * 0.27,
top : "63%",
left : 0.0
});
containers["hawaii"] = _.extend(containers["hawaii"], {
width : this.container.width() * 0.15,
height : this.container.height() * 0.21,
top : "70%",
left : 0.25
});
containers["dc"] = _.extend(containers["dc"], {
width : this.container.width() * 0.02,
height : this.container.height() * 0.08,
top : "34.5%",
left : 0.915
});
var setPositions = function(container) {
$("#" + containers[container].el)
.width(containers[container].width)
.height(containers[container].height)
.css("top", containers[container].top)
// calculate how many pixels left the % is,
// so Hawaii doesn't move around when the window is resized
.css("margin-left", that.container.width() * containers[container].left)
.css("position", "absolute");
};
for (container in containers) {
if (this.paper[container]) {
setPositions(container);
this.paper[container].setSize(containers[container].width, containers[container].height);
} else {
this.container.append("<div id='" + containers[container].el + "'></div>");
setPositions(container);
this.paper[container] = Raphael(containers[container].el)
this.paper[container].setViewBox(0, 0, containers[container].width, containers[container].height);
// draw the line for DC
if (container === "contiguous") {
var dcLineCoordPcts = [[0.88, 0.45], [0.91, 0.47]];
var dcLineCoordPixels = _(dcLineCoordPcts).map(function(pair) { return [containers[container].width * pair[0], containers[container].height * pair[1]] });
this.paper[container].path(["M", dcLineCoordPixels[0][0], dcLineCoordPixels[0][1], "L", dcLineCoordPixels[1][0], dcLineCoordPixels[1][1]] ).attr("stroke", "#cecece").attr("stroke-width", "0.5");
}
}
}
};
MapCanvas.prototype.createMap = function() {
var data;
var that = this;
var containers = MapCanvas.CONTAINERS;
if (this.locality === "states") data = window.StatelineStates;
if (this.locality === "counties") data = window.StatelineCounties;
for (container in containers) {
var localityMap = new Landline(data[container]).all();
localityMap.asSVG(containers[container].width, containers[container].height, function(svg, it) {
var path = that.paper[container].path(svg);
var fips = it.fips = it.get("c") ? it.get("s") + it.get("c") : it.get("s");
that.lookup[fips] = path;
path.attr("fill", "#cecece")
.attr('stroke-width', 0.5)
.attr('stroke', '#ffffff')
.attr('stroke-linejoin', 'bevel');
if (that.attrs[fips]) {
_(that.attrs[fips]).each(function(v, k) {
path.attr(k, v)
});
}
_(that.events).each(function(func, evt) {
path[evt](function(e) {
func(e, path, it);
});
});
});
}
};
}).call(this);
|
{
"pile_set_name": "Github"
}
|
# from http://www.johnvinyard.com/blog/?p=268
import numpy as np
from numpy.lib.stride_tricks import as_strided as ast
def norm_shape(shape):
'''
Normalize numpy array shapes so they're always expressed as a tuple,
even for one-dimensional shapes.
Parameters
shape - an int, or a tuple of ints
Returns
a shape tuple
'''
try:
i = int(shape)
return (i,)
except TypeError:
# shape was not a number
pass
try:
t = tuple(shape)
return t
except TypeError:
# shape was not iterable
pass
raise TypeError('shape must be an int, or a tuple of ints')
def sliding_window(a,ws,ss = None,flatten = True):
'''
Return a sliding window over a in any number of dimensions
Parameters:
a - an n-dimensional numpy array
ws - an int (a is 1D) or tuple (a is 2D or greater) representing the size
of each dimension of the window
ss - an int (a is 1D) or tuple (a is 2D or greater) representing the
amount to slide the window in each dimension. If not specified, it
defaults to ws.
flatten - if True, all slices are flattened, otherwise, there is an
extra dimension for each dimension of the input.
Returns
an array containing each n-dimensional window from a
'''
if None is ss:
# ss was not provided. the windows will not overlap in any direction.
ss = ws
ws = norm_shape(ws)
ss = norm_shape(ss)
# convert ws, ss, and a.shape to numpy arrays so that we can do math in every
# dimension at once.
ws = np.array(ws)
ss = np.array(ss)
shape = np.array(a.shape)
# ensure that ws, ss, and a.shape all have the same number of dimensions
ls = [len(shape),len(ws),len(ss)]
if 1 != len(set(ls)):
raise ValueError(\
'a.shape, ws and ss must all have the same length. They were %s' % str(ls))
# ensure that ws is smaller than a in every dimension
if np.any(ws > shape):
raise ValueError(\
'ws cannot be larger than a in any dimension.\
a.shape was %s and ws was %s' % (str(a.shape),str(ws)))
# how many slices will there be in each dimension?
newshape = norm_shape(((shape - ws) // ss) + 1)
# the shape of the strided array will be the number of slices in each dimension
# plus the shape of the window (tuple addition)
newshape += norm_shape(ws)
# the strides tuple will be the array's strides multiplied by step size, plus
# the array's strides (tuple addition)
newstrides = norm_shape(np.array(a.strides) * ss) + a.strides
strided = ast(a,shape = newshape,strides = newstrides)
if not flatten:
return strided
# Collapse strided so that it has one more dimension than the window. I.e.,
# the new array is a flat list of slices.
meat = len(ws) if ws.shape else 0
firstdim = (np.product(newshape[:-meat]),) if ws.shape else ()
dim = firstdim + (newshape[-meat:])
# remove any dimensions with size 1
dim = filter(lambda i : i != 1,dim)
return strided.reshape(dim)
|
{
"pile_set_name": "Github"
}
|
//
// Reaktion - An audio reactive animation toolkit for Unity.
//
// Copyright (C) 2013, 2014 Keijiro Takahashi
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
using UnityEngine;
using UnityEditor;
using System.Collections;
namespace Reaktion {
// Custom property drawer for TransformElement.
[CustomPropertyDrawer(typeof(ConstantMotion.TransformElement))]
class ConstantMotionElementDrawer : PropertyDrawer
{
// Labels and values for TransformMode.
static GUIContent[] modeLabels = {
new GUIContent("Off"),
new GUIContent("X Axis"),
new GUIContent("Y Axis"),
new GUIContent("Z Axis"),
new GUIContent("Arbitrary Vector"),
new GUIContent("Random Vector")
};
static int[] modeValues = { 0, 1, 2, 3, 4, 5 };
static int GetExpansionLevel(SerializedProperty property)
{
var mode = property.FindPropertyRelative("mode");
// Fully expand if it has different values.
if (mode.hasMultipleDifferentValues) return 2;
// "Off"
if (mode.enumValueIndex == 0) return 0;
// Fully expand if it's in Arbitrary mode.
if (mode.enumValueIndex == (int)ConstantMotion.TransformMode.Arbitrary) return 2;
// Expand one level.
return 1;
}
public override float GetPropertyHeight(SerializedProperty property, GUIContent label)
{
int rows = new int[]{1, 3, 4}[GetExpansionLevel(property)];
return EditorGUIUtility.singleLineHeight * rows +
EditorGUIUtility.standardVerticalSpacing * (rows - 1);
}
public override void OnGUI(Rect position, SerializedProperty property, GUIContent label)
{
EditorGUI.BeginProperty(position, label, property);
position.height = EditorGUIUtility.singleLineHeight;
var rowHeight = EditorGUIUtility.singleLineHeight + EditorGUIUtility.standardVerticalSpacing;
// Transform mode selector drop-down.
EditorGUI.IntPopup(position, property.FindPropertyRelative("mode"), modeLabels, modeValues, label);
position.y += rowHeight;
var expansion = GetExpansionLevel(property);
if (expansion > 0)
{
// Insert an indent.
position.x += 16;
position.width -= 16;
EditorGUIUtility.labelWidth -= 16;
if (expansion == 2)
{
// Vector box.
EditorGUI.PropertyField(position, property.FindPropertyRelative("arbitraryVector"), GUIContent.none);
position.y += rowHeight;
}
// Velocity box.
EditorGUI.PropertyField(position, property.FindPropertyRelative("velocity"), new GUIContent("Velocity"));
position.y += EditorGUIUtility.singleLineHeight + EditorGUIUtility.standardVerticalSpacing;
// Randomness slider.
EditorGUI.Slider(position, property.FindPropertyRelative("randomness"), 0, 1, new GUIContent("Randomness"));
}
EditorGUI.EndProperty();
}
}
[CustomEditor(typeof(ConstantMotion)), CanEditMultipleObjects]
public class ConstantMotionEditor : Editor
{
SerializedProperty propPosition;
SerializedProperty propRotation;
SerializedProperty propUseLocalCoordinate;
GUIContent labelLocalCoordinate;
void OnEnable()
{
propPosition = serializedObject.FindProperty("position");
propRotation = serializedObject.FindProperty("rotation");
propUseLocalCoordinate = serializedObject.FindProperty("useLocalCoordinate");
labelLocalCoordinate = new GUIContent("Local Coordinate");
}
public override void OnInspectorGUI()
{
serializedObject.Update();
EditorGUILayout.PropertyField(propPosition);
EditorGUILayout.PropertyField(propRotation);
EditorGUILayout.PropertyField(propUseLocalCoordinate, labelLocalCoordinate);
serializedObject.ApplyModifiedProperties();
}
}
} // namespace Reaktion
|
{
"pile_set_name": "Github"
}
|
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2005,2006,2007 INRIA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Mathieu Lacage <mathieu.lacage@sophia.inria.fr>
* Contributions: Timo Bingmann <timo.bingmann@student.kit.edu>
* Contributions: Tom Hewer <tomhewer@mac.com> for Two Ray Ground Model
* Pavel Boyko <boyko@iitp.ru> for matrix
*/
#include "propagation-loss-model.h"
#include "ns3/log.h"
#include "ns3/mobility-model.h"
#include "ns3/boolean.h"
#include "ns3/double.h"
#include "ns3/string.h"
#include "ns3/pointer.h"
#include <cmath>
namespace ns3 {
NS_LOG_COMPONENT_DEFINE ("PropagationLossModel");
// ------------------------------------------------------------------------- //
NS_OBJECT_ENSURE_REGISTERED (PropagationLossModel);
TypeId
PropagationLossModel::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::PropagationLossModel")
.SetParent<Object> ()
.SetGroupName ("Propagation")
;
return tid;
}
PropagationLossModel::PropagationLossModel ()
: m_next (0)
{
}
PropagationLossModel::~PropagationLossModel ()
{
}
void
PropagationLossModel::SetNext (Ptr<PropagationLossModel> next)
{
m_next = next;
}
Ptr<PropagationLossModel>
PropagationLossModel::GetNext ()
{
return m_next;
}
double
PropagationLossModel::CalcRxPower (double txPowerDbm,
Ptr<MobilityModel> a,
Ptr<MobilityModel> b) const
{
double self = DoCalcRxPower (txPowerDbm, a, b);
if (m_next != 0)
{
self = m_next->CalcRxPower (self, a, b);
}
return self;
}
int64_t
PropagationLossModel::AssignStreams (int64_t stream)
{
int64_t currentStream = stream;
currentStream += DoAssignStreams (stream);
if (m_next != 0)
{
currentStream += m_next->AssignStreams (currentStream);
}
return (currentStream - stream);
}
// ------------------------------------------------------------------------- //
NS_OBJECT_ENSURE_REGISTERED (RandomPropagationLossModel);
TypeId
RandomPropagationLossModel::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::RandomPropagationLossModel")
.SetParent<PropagationLossModel> ()
.SetGroupName ("Propagation")
.AddConstructor<RandomPropagationLossModel> ()
.AddAttribute ("Variable", "The random variable used to pick a loss every time CalcRxPower is invoked.",
StringValue ("ns3::ConstantRandomVariable[Constant=1.0]"),
MakePointerAccessor (&RandomPropagationLossModel::m_variable),
MakePointerChecker<RandomVariableStream> ())
;
return tid;
}
RandomPropagationLossModel::RandomPropagationLossModel ()
: PropagationLossModel ()
{
}
RandomPropagationLossModel::~RandomPropagationLossModel ()
{
}
double
RandomPropagationLossModel::DoCalcRxPower (double txPowerDbm,
Ptr<MobilityModel> a,
Ptr<MobilityModel> b) const
{
double rxc = -m_variable->GetValue ();
NS_LOG_DEBUG ("attenuation coefficient="<<rxc<<"Db");
return txPowerDbm + rxc;
}
int64_t
RandomPropagationLossModel::DoAssignStreams (int64_t stream)
{
m_variable->SetStream (stream);
return 1;
}
// ------------------------------------------------------------------------- //
NS_OBJECT_ENSURE_REGISTERED (FriisPropagationLossModel);
TypeId
FriisPropagationLossModel::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::FriisPropagationLossModel")
.SetParent<PropagationLossModel> ()
.SetGroupName ("Propagation")
.AddConstructor<FriisPropagationLossModel> ()
.AddAttribute ("Frequency",
"The carrier frequency (in Hz) at which propagation occurs (default is 5.15 GHz).",
DoubleValue (5.150e9),
MakeDoubleAccessor (&FriisPropagationLossModel::SetFrequency,
&FriisPropagationLossModel::GetFrequency),
MakeDoubleChecker<double> ())
.AddAttribute ("SystemLoss", "The system loss",
DoubleValue (1.0),
MakeDoubleAccessor (&FriisPropagationLossModel::m_systemLoss),
MakeDoubleChecker<double> ())
.AddAttribute ("MinLoss",
"The minimum value (dB) of the total loss, used at short ranges. Note: ",
DoubleValue (0.0),
MakeDoubleAccessor (&FriisPropagationLossModel::SetMinLoss,
&FriisPropagationLossModel::GetMinLoss),
MakeDoubleChecker<double> ())
;
return tid;
}
FriisPropagationLossModel::FriisPropagationLossModel ()
{
}
void
FriisPropagationLossModel::SetSystemLoss (double systemLoss)
{
m_systemLoss = systemLoss;
}
double
FriisPropagationLossModel::GetSystemLoss (void) const
{
return m_systemLoss;
}
void
FriisPropagationLossModel::SetMinLoss (double minLoss)
{
m_minLoss = minLoss;
}
double
FriisPropagationLossModel::GetMinLoss (void) const
{
return m_minLoss;
}
void
FriisPropagationLossModel::SetFrequency (double frequency)
{
m_frequency = frequency;
static const double C = 299792458.0; // speed of light in vacuum
m_lambda = C / frequency;
}
double
FriisPropagationLossModel::GetFrequency (void) const
{
return m_frequency;
}
double
FriisPropagationLossModel::DbmToW (double dbm) const
{
double mw = std::pow (10.0,dbm/10.0);
return mw / 1000.0;
}
double
FriisPropagationLossModel::DbmFromW (double w) const
{
double dbm = std::log10 (w * 1000.0) * 10.0;
return dbm;
}
double
FriisPropagationLossModel::DoCalcRxPower (double txPowerDbm,
Ptr<MobilityModel> a,
Ptr<MobilityModel> b) const
{
/*
* Friis free space equation:
* where Pt, Gr, Gr and P are in Watt units
* L is in meter units.
*
* P Gt * Gr * (lambda^2)
* --- = ---------------------
* Pt (4 * pi * d)^2 * L
*
* Gt: tx gain (unit-less)
* Gr: rx gain (unit-less)
* Pt: tx power (W)
* d: distance (m)
* L: system loss
* lambda: wavelength (m)
*
* Here, we ignore tx and rx gain and the input and output values
* are in dB or dBm:
*
* lambda^2
* rx = tx + 10 log10 (-------------------)
* (4 * pi * d)^2 * L
*
* rx: rx power (dB)
* tx: tx power (dB)
* d: distance (m)
* L: system loss (unit-less)
* lambda: wavelength (m)
*/
double distance = a->GetDistanceFrom (b);
if (distance < 3*m_lambda)
{
NS_LOG_WARN ("distance not within the far field region => inaccurate propagation loss value");
}
if (distance <= 0)
{
return txPowerDbm - m_minLoss;
}
double numerator = m_lambda * m_lambda;
double denominator = 16 * M_PI * M_PI * distance * distance * m_systemLoss;
double lossDb = -10 * log10 (numerator / denominator);
NS_LOG_DEBUG ("distance=" << distance<< "m, loss=" << lossDb <<"dB");
return txPowerDbm - std::max (lossDb, m_minLoss);
}
int64_t
FriisPropagationLossModel::DoAssignStreams (int64_t stream)
{
return 0;
}
// ------------------------------------------------------------------------- //
// -- Two-Ray Ground Model ported from NS-2 -- tomhewer@mac.com -- Nov09 //
NS_OBJECT_ENSURE_REGISTERED (TwoRayGroundPropagationLossModel);
TypeId
TwoRayGroundPropagationLossModel::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::TwoRayGroundPropagationLossModel")
.SetParent<PropagationLossModel> ()
.SetGroupName ("Propagation")
.AddConstructor<TwoRayGroundPropagationLossModel> ()
.AddAttribute ("Frequency",
"The carrier frequency (in Hz) at which propagation occurs (default is 5.15 GHz).",
DoubleValue (5.150e9),
MakeDoubleAccessor (&TwoRayGroundPropagationLossModel::SetFrequency,
&TwoRayGroundPropagationLossModel::GetFrequency),
MakeDoubleChecker<double> ())
.AddAttribute ("SystemLoss", "The system loss",
DoubleValue (1.0),
MakeDoubleAccessor (&TwoRayGroundPropagationLossModel::m_systemLoss),
MakeDoubleChecker<double> ())
.AddAttribute ("MinDistance",
"The distance under which the propagation model refuses to give results (m)",
DoubleValue (0.5),
MakeDoubleAccessor (&TwoRayGroundPropagationLossModel::SetMinDistance,
&TwoRayGroundPropagationLossModel::GetMinDistance),
MakeDoubleChecker<double> ())
.AddAttribute ("HeightAboveZ",
"The height of the antenna (m) above the node's Z coordinate",
DoubleValue (0),
MakeDoubleAccessor (&TwoRayGroundPropagationLossModel::m_heightAboveZ),
MakeDoubleChecker<double> ())
;
return tid;
}
TwoRayGroundPropagationLossModel::TwoRayGroundPropagationLossModel ()
{
}
void
TwoRayGroundPropagationLossModel::SetSystemLoss (double systemLoss)
{
m_systemLoss = systemLoss;
}
double
TwoRayGroundPropagationLossModel::GetSystemLoss (void) const
{
return m_systemLoss;
}
void
TwoRayGroundPropagationLossModel::SetMinDistance (double minDistance)
{
m_minDistance = minDistance;
}
double
TwoRayGroundPropagationLossModel::GetMinDistance (void) const
{
return m_minDistance;
}
void
TwoRayGroundPropagationLossModel::SetHeightAboveZ (double heightAboveZ)
{
m_heightAboveZ = heightAboveZ;
}
void
TwoRayGroundPropagationLossModel::SetFrequency (double frequency)
{
m_frequency = frequency;
static const double C = 299792458.0; // speed of light in vacuum
m_lambda = C / frequency;
}
double
TwoRayGroundPropagationLossModel::GetFrequency (void) const
{
return m_frequency;
}
double
TwoRayGroundPropagationLossModel::DbmToW (double dbm) const
{
double mw = std::pow (10.0,dbm / 10.0);
return mw / 1000.0;
}
double
TwoRayGroundPropagationLossModel::DbmFromW (double w) const
{
double dbm = std::log10 (w * 1000.0) * 10.0;
return dbm;
}
double
TwoRayGroundPropagationLossModel::DoCalcRxPower (double txPowerDbm,
Ptr<MobilityModel> a,
Ptr<MobilityModel> b) const
{
/*
* Two-Ray Ground equation:
*
* where Pt, Gt and Gr are in dBm units
* L, Ht and Hr are in meter units.
*
* Pr Gt * Gr * (Ht^2 * Hr^2)
* -- = (-------------------------)
* Pt d^4 * L
*
* Gt: tx gain (unit-less)
* Gr: rx gain (unit-less)
* Pt: tx power (dBm)
* d: distance (m)
* L: system loss
* Ht: Tx antenna height (m)
* Hr: Rx antenna height (m)
* lambda: wavelength (m)
*
* As with the Friis model we ignore tx and rx gain and output values
* are in dB or dBm
*
* (Ht * Ht) * (Hr * Hr)
* rx = tx + 10 log10 (-----------------------)
* (d * d * d * d) * L
*/
double distance = a->GetDistanceFrom (b);
if (distance <= m_minDistance)
{
return txPowerDbm;
}
// Set the height of the Tx and Rx antennae
double txAntHeight = a->GetPosition ().z + m_heightAboveZ;
double rxAntHeight = b->GetPosition ().z + m_heightAboveZ;
// Calculate a crossover distance, under which we use Friis
/*
*
* dCross = (4 * pi * Ht * Hr) / lambda
*
*/
double dCross = (4 * M_PI * txAntHeight * rxAntHeight) / m_lambda;
double tmp = 0;
if (distance <= dCross)
{
// We use Friis
double numerator = m_lambda * m_lambda;
tmp = M_PI * distance;
double denominator = 16 * tmp * tmp * m_systemLoss;
double pr = 10 * std::log10 (numerator / denominator);
NS_LOG_DEBUG ("Receiver within crossover (" << dCross << "m) for Two_ray path; using Friis");
NS_LOG_DEBUG ("distance=" << distance << "m, attenuation coefficient=" << pr << "dB");
return txPowerDbm + pr;
}
else // Use Two-Ray Pathloss
{
tmp = txAntHeight * rxAntHeight;
double rayNumerator = tmp * tmp;
tmp = distance * distance;
double rayDenominator = tmp * tmp * m_systemLoss;
double rayPr = 10 * std::log10 (rayNumerator / rayDenominator);
NS_LOG_DEBUG ("distance=" << distance << "m, attenuation coefficient=" << rayPr << "dB");
return txPowerDbm + rayPr;
}
}
int64_t
TwoRayGroundPropagationLossModel::DoAssignStreams (int64_t stream)
{
return 0;
}
// ------------------------------------------------------------------------- //
NS_OBJECT_ENSURE_REGISTERED (LogDistancePropagationLossModel);
TypeId
LogDistancePropagationLossModel::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::LogDistancePropagationLossModel")
.SetParent<PropagationLossModel> ()
.SetGroupName ("Propagation")
.AddConstructor<LogDistancePropagationLossModel> ()
.AddAttribute ("Exponent",
"The exponent of the Path Loss propagation model",
DoubleValue (3.0),
MakeDoubleAccessor (&LogDistancePropagationLossModel::m_exponent),
MakeDoubleChecker<double> ())
.AddAttribute ("ReferenceDistance",
"The distance at which the reference loss is calculated (m)",
DoubleValue (1.0),
MakeDoubleAccessor (&LogDistancePropagationLossModel::m_referenceDistance),
MakeDoubleChecker<double> ())
.AddAttribute ("ReferenceLoss",
"The reference loss at reference distance (dB). (Default is Friis at 1m with 5.15 GHz)",
DoubleValue (46.6777),
MakeDoubleAccessor (&LogDistancePropagationLossModel::m_referenceLoss),
MakeDoubleChecker<double> ())
;
return tid;
}
LogDistancePropagationLossModel::LogDistancePropagationLossModel ()
{
}
void
LogDistancePropagationLossModel::SetPathLossExponent (double n)
{
m_exponent = n;
}
void
LogDistancePropagationLossModel::SetReference (double referenceDistance, double referenceLoss)
{
m_referenceDistance = referenceDistance;
m_referenceLoss = referenceLoss;
}
double
LogDistancePropagationLossModel::GetPathLossExponent (void) const
{
return m_exponent;
}
double
LogDistancePropagationLossModel::DoCalcRxPower (double txPowerDbm,
Ptr<MobilityModel> a,
Ptr<MobilityModel> b) const
{
double distance = a->GetDistanceFrom (b);
if (distance <= m_referenceDistance)
{
return txPowerDbm - m_referenceLoss;
}
/**
* The formula is:
* rx = 10 * log (Pr0(tx)) - n * 10 * log (d/d0)
*
* Pr0: rx power at reference distance d0 (W)
* d0: reference distance: 1.0 (m)
* d: distance (m)
* tx: tx power (dB)
* rx: dB
*
* Which, in our case is:
*
* rx = rx0(tx) - 10 * n * log (d/d0)
*/
double pathLossDb = 10 * m_exponent * std::log10 (distance / m_referenceDistance);
double rxc = -m_referenceLoss - pathLossDb;
NS_LOG_DEBUG ("distance="<<distance<<"m, reference-attenuation="<< -m_referenceLoss<<"dB, "<<
"attenuation coefficient="<<rxc<<"db");
return txPowerDbm + rxc;
}
int64_t
LogDistancePropagationLossModel::DoAssignStreams (int64_t stream)
{
return 0;
}
// ------------------------------------------------------------------------- //
NS_OBJECT_ENSURE_REGISTERED (ThreeLogDistancePropagationLossModel);
TypeId
ThreeLogDistancePropagationLossModel::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::ThreeLogDistancePropagationLossModel")
.SetParent<PropagationLossModel> ()
.SetGroupName ("Propagation")
.AddConstructor<ThreeLogDistancePropagationLossModel> ()
.AddAttribute ("Distance0",
"Beginning of the first (near) distance field",
DoubleValue (1.0),
MakeDoubleAccessor (&ThreeLogDistancePropagationLossModel::m_distance0),
MakeDoubleChecker<double> ())
.AddAttribute ("Distance1",
"Beginning of the second (middle) distance field.",
DoubleValue (200.0),
MakeDoubleAccessor (&ThreeLogDistancePropagationLossModel::m_distance1),
MakeDoubleChecker<double> ())
.AddAttribute ("Distance2",
"Beginning of the third (far) distance field.",
DoubleValue (500.0),
MakeDoubleAccessor (&ThreeLogDistancePropagationLossModel::m_distance2),
MakeDoubleChecker<double> ())
.AddAttribute ("Exponent0",
"The exponent for the first field.",
DoubleValue (1.9),
MakeDoubleAccessor (&ThreeLogDistancePropagationLossModel::m_exponent0),
MakeDoubleChecker<double> ())
.AddAttribute ("Exponent1",
"The exponent for the second field.",
DoubleValue (3.8),
MakeDoubleAccessor (&ThreeLogDistancePropagationLossModel::m_exponent1),
MakeDoubleChecker<double> ())
.AddAttribute ("Exponent2",
"The exponent for the third field.",
DoubleValue (3.8),
MakeDoubleAccessor (&ThreeLogDistancePropagationLossModel::m_exponent2),
MakeDoubleChecker<double> ())
.AddAttribute ("ReferenceLoss",
"The reference loss at distance d0 (dB). (Default is Friis at 1m with 5.15 GHz)",
DoubleValue (46.6777),
MakeDoubleAccessor (&ThreeLogDistancePropagationLossModel::m_referenceLoss),
MakeDoubleChecker<double> ())
;
return tid;
}
ThreeLogDistancePropagationLossModel::ThreeLogDistancePropagationLossModel ()
{
}
double
ThreeLogDistancePropagationLossModel::DoCalcRxPower (double txPowerDbm,
Ptr<MobilityModel> a,
Ptr<MobilityModel> b) const
{
double distance = a->GetDistanceFrom (b);
NS_ASSERT (distance >= 0);
// See doxygen comments for the formula and explanation
double pathLossDb;
if (distance < m_distance0)
{
pathLossDb = 0;
}
else if (distance < m_distance1)
{
pathLossDb = m_referenceLoss
+ 10 * m_exponent0 * std::log10 (distance / m_distance0);
}
else if (distance < m_distance2)
{
pathLossDb = m_referenceLoss
+ 10 * m_exponent0 * std::log10 (m_distance1 / m_distance0)
+ 10 * m_exponent1 * std::log10 (distance / m_distance1);
}
else
{
pathLossDb = m_referenceLoss
+ 10 * m_exponent0 * std::log10 (m_distance1 / m_distance0)
+ 10 * m_exponent1 * std::log10 (m_distance2 / m_distance1)
+ 10 * m_exponent2 * std::log10 (distance / m_distance2);
}
NS_LOG_DEBUG ("ThreeLogDistance distance=" << distance << "m, " <<
"attenuation=" << pathLossDb << "dB");
return txPowerDbm - pathLossDb;
}
int64_t
ThreeLogDistancePropagationLossModel::DoAssignStreams (int64_t stream)
{
return 0;
}
// ------------------------------------------------------------------------- //
NS_OBJECT_ENSURE_REGISTERED (NakagamiPropagationLossModel);
TypeId
NakagamiPropagationLossModel::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::NakagamiPropagationLossModel")
.SetParent<PropagationLossModel> ()
.SetGroupName ("Propagation")
.AddConstructor<NakagamiPropagationLossModel> ()
.AddAttribute ("Distance1",
"Beginning of the second distance field. Default is 80m.",
DoubleValue (80.0),
MakeDoubleAccessor (&NakagamiPropagationLossModel::m_distance1),
MakeDoubleChecker<double> ())
.AddAttribute ("Distance2",
"Beginning of the third distance field. Default is 200m.",
DoubleValue (200.0),
MakeDoubleAccessor (&NakagamiPropagationLossModel::m_distance2),
MakeDoubleChecker<double> ())
.AddAttribute ("m0",
"m0 for distances smaller than Distance1. Default is 1.5.",
DoubleValue (1.5),
MakeDoubleAccessor (&NakagamiPropagationLossModel::m_m0),
MakeDoubleChecker<double> ())
.AddAttribute ("m1",
"m1 for distances smaller than Distance2. Default is 0.75.",
DoubleValue (0.75),
MakeDoubleAccessor (&NakagamiPropagationLossModel::m_m1),
MakeDoubleChecker<double> ())
.AddAttribute ("m2",
"m2 for distances greater than Distance2. Default is 0.75.",
DoubleValue (0.75),
MakeDoubleAccessor (&NakagamiPropagationLossModel::m_m2),
MakeDoubleChecker<double> ())
.AddAttribute ("ErlangRv",
"Access to the underlying ErlangRandomVariable",
StringValue ("ns3::ErlangRandomVariable"),
MakePointerAccessor (&NakagamiPropagationLossModel::m_erlangRandomVariable),
MakePointerChecker<ErlangRandomVariable> ())
.AddAttribute ("GammaRv",
"Access to the underlying GammaRandomVariable",
StringValue ("ns3::GammaRandomVariable"),
MakePointerAccessor (&NakagamiPropagationLossModel::m_gammaRandomVariable),
MakePointerChecker<GammaRandomVariable> ());
;
return tid;
}
NakagamiPropagationLossModel::NakagamiPropagationLossModel ()
{
}
double
NakagamiPropagationLossModel::DoCalcRxPower (double txPowerDbm,
Ptr<MobilityModel> a,
Ptr<MobilityModel> b) const
{
// select m parameter
double distance = a->GetDistanceFrom (b);
NS_ASSERT (distance >= 0);
double m;
if (distance < m_distance1)
{
m = m_m0;
}
else if (distance < m_distance2)
{
m = m_m1;
}
else
{
m = m_m2;
}
// the current power unit is dBm, but Watt is put into the Nakagami /
// Rayleigh distribution.
double powerW = std::pow (10, (txPowerDbm - 30) / 10);
double resultPowerW;
// switch between Erlang- and Gamma distributions: this is only for
// speed. (Gamma is equal to Erlang for any positive integer m.)
unsigned int int_m = static_cast<unsigned int>(std::floor (m));
if (int_m == m)
{
resultPowerW = m_erlangRandomVariable->GetValue (int_m, powerW / m);
}
else
{
resultPowerW = m_gammaRandomVariable->GetValue (m, powerW / m);
}
double resultPowerDbm = 10 * std::log10 (resultPowerW) + 30;
NS_LOG_DEBUG ("Nakagami distance=" << distance << "m, " <<
"power=" << powerW <<"W, " <<
"resultPower=" << resultPowerW << "W=" << resultPowerDbm << "dBm");
return resultPowerDbm;
}
int64_t
NakagamiPropagationLossModel::DoAssignStreams (int64_t stream)
{
m_erlangRandomVariable->SetStream (stream);
m_gammaRandomVariable->SetStream (stream + 1);
return 2;
}
// ------------------------------------------------------------------------- //
NS_OBJECT_ENSURE_REGISTERED (FixedRssLossModel);
TypeId
FixedRssLossModel::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::FixedRssLossModel")
.SetParent<PropagationLossModel> ()
.SetGroupName ("Propagation")
.AddConstructor<FixedRssLossModel> ()
.AddAttribute ("Rss", "The fixed receiver Rss.",
DoubleValue (-150.0),
MakeDoubleAccessor (&FixedRssLossModel::m_rss),
MakeDoubleChecker<double> ())
;
return tid;
}
FixedRssLossModel::FixedRssLossModel ()
: PropagationLossModel ()
{
}
FixedRssLossModel::~FixedRssLossModel ()
{
}
void
FixedRssLossModel::SetRss (double rss)
{
m_rss = rss;
}
double
FixedRssLossModel::DoCalcRxPower (double txPowerDbm,
Ptr<MobilityModel> a,
Ptr<MobilityModel> b) const
{
return m_rss;
}
int64_t
FixedRssLossModel::DoAssignStreams (int64_t stream)
{
return 0;
}
// ------------------------------------------------------------------------- //
NS_OBJECT_ENSURE_REGISTERED (MatrixPropagationLossModel);
TypeId
MatrixPropagationLossModel::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::MatrixPropagationLossModel")
.SetParent<PropagationLossModel> ()
.SetGroupName ("Propagation")
.AddConstructor<MatrixPropagationLossModel> ()
.AddAttribute ("DefaultLoss", "The default value for propagation loss, dB.",
DoubleValue (std::numeric_limits<double>::max ()),
MakeDoubleAccessor (&MatrixPropagationLossModel::m_default),
MakeDoubleChecker<double> ())
;
return tid;
}
MatrixPropagationLossModel::MatrixPropagationLossModel ()
: PropagationLossModel (), m_default (std::numeric_limits<double>::max ())
{
}
MatrixPropagationLossModel::~MatrixPropagationLossModel ()
{
}
void
MatrixPropagationLossModel::SetDefaultLoss (double loss)
{
m_default = loss;
}
void
MatrixPropagationLossModel::SetLoss (Ptr<MobilityModel> ma, Ptr<MobilityModel> mb, double loss, bool symmetric)
{
NS_ASSERT (ma != 0 && mb != 0);
MobilityPair p = std::make_pair (ma, mb);
std::map<MobilityPair, double>::iterator i = m_loss.find (p);
if (i == m_loss.end ())
{
m_loss.insert (std::make_pair (p, loss));
}
else
{
i->second = loss;
}
if (symmetric)
{
SetLoss (mb, ma, loss, false);
}
}
double
MatrixPropagationLossModel::DoCalcRxPower (double txPowerDbm,
Ptr<MobilityModel> a,
Ptr<MobilityModel> b) const
{
std::map<MobilityPair, double>::const_iterator i = m_loss.find (std::make_pair (a, b));
if (i != m_loss.end ())
{
return txPowerDbm - i->second;
}
else
{
return txPowerDbm - m_default;
}
}
int64_t
MatrixPropagationLossModel::DoAssignStreams (int64_t stream)
{
return 0;
}
// ------------------------------------------------------------------------- //
NS_OBJECT_ENSURE_REGISTERED (RangePropagationLossModel);
TypeId
RangePropagationLossModel::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::RangePropagationLossModel")
.SetParent<PropagationLossModel> ()
.SetGroupName ("Propagation")
.AddConstructor<RangePropagationLossModel> ()
.AddAttribute ("MaxRange",
"Maximum Transmission Range (meters)",
DoubleValue (250),
MakeDoubleAccessor (&RangePropagationLossModel::m_range),
MakeDoubleChecker<double> ())
;
return tid;
}
RangePropagationLossModel::RangePropagationLossModel ()
{
}
double
RangePropagationLossModel::DoCalcRxPower (double txPowerDbm,
Ptr<MobilityModel> a,
Ptr<MobilityModel> b) const
{
double distance = a->GetDistanceFrom (b);
if (distance <= m_range)
{
return txPowerDbm;
}
else
{
return -1000;
}
}
int64_t
RangePropagationLossModel::DoAssignStreams (int64_t stream)
{
return 0;
}
// ------------------------------------------------------------------------- //
} // namespace ns3
|
{
"pile_set_name": "Github"
}
|
/**********
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
**********/
// "liveMedia"
// Copyright (c) 1996-2019 Live Networks, Inc. All rights reserved.
// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
// on demand, from a H264 Elementary Stream video file.
// C++ header
#ifndef _H264_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
#define _H264_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
#include "FileServerMediaSubsession.hh"
#endif
class H264VideoFileServerMediaSubsession: public FileServerMediaSubsession {
public:
static H264VideoFileServerMediaSubsession*
createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource);
// Used to implement "getAuxSDPLine()":
void checkForAuxSDPLine1();
void afterPlayingDummy1();
protected:
H264VideoFileServerMediaSubsession(UsageEnvironment& env,
char const* fileName, Boolean reuseFirstSource);
// called only by createNew();
virtual ~H264VideoFileServerMediaSubsession();
void setDoneFlag() { fDoneFlag = ~0; }
protected: // redefined virtual functions
virtual char const* getAuxSDPLine(RTPSink* rtpSink,
FramedSource* inputSource);
virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
unsigned& estBitrate);
virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
unsigned char rtpPayloadTypeIfDynamic,
FramedSource* inputSource);
private:
char* fAuxSDPLine;
char fDoneFlag; // used when setting up "fAuxSDPLine"
RTPSink* fDummyRTPSink; // ditto
};
#endif
|
{
"pile_set_name": "Github"
}
|
package ONVIF::Device::Types::AppearanceExtension;
use strict;
use warnings;
__PACKAGE__->_set_element_form_qualified(1);
sub get_xmlns { 'http://www.onvif.org/ver10/schema' };
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use Class::Std::Fast::Storable constructor => 'none';
use base qw(SOAP::WSDL::XSD::Typelib::ComplexType);
Class::Std::initialize();
{ # BLOCK to scope variables
__PACKAGE__->_factory(
[ qw(
) ],
{
},
{
},
{
}
);
} # end BLOCK
1;
=pod
=head1 NAME
ONVIF::Device::Types::AppearanceExtension
=head1 DESCRIPTION
Perl data type class for the XML Schema defined complexType
AppearanceExtension from the namespace http://www.onvif.org/ver10/schema.
=head2 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=back
=head1 METHODS
=head2 new
Constructor. The following data structure may be passed to new():
{ # ONVIF::Device::Types::AppearanceExtension
},
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
|
{
"pile_set_name": "Github"
}
|
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Typedef
Foam::IOsystemCall
Description
Instance of the generic IOOutputFilter for systemCall.
\*---------------------------------------------------------------------------*/
#ifndef IOsystemCall_H
#define IOsystemCall_H
#include "systemCall.H"
#include "IOOutputFilter.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
typedef IOOutputFilter<systemCall> IOsystemCall;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //
|
{
"pile_set_name": "Github"
}
|
#include "WavTapDevice.hpp"
#include "WavTapEngine.hpp"
#include <IOKit/audio/IOAudioControl.h>
#include <IOKit/audio/IOAudioLevelControl.h>
#include <IOKit/audio/IOAudioToggleControl.h>
#include <IOKit/audio/IOAudioDefines.h>
#include <IOKit/IOLib.h>
#define super IOAudioDevice
OSDefineMetaClassAndStructors(WavTapDevice, IOAudioDevice)
const SInt32 WavTapDevice::kVolumeMax = 99;
const SInt32 WavTapDevice::kGainMax = 99;
bool WavTapDevice::initHardware(IOService *provider) {
if (!super::initHardware(provider)) {
return false;
}
setDeviceName("WavTap");
setDeviceShortName("WavTap");
setManufacturerName("WavTap");
if (!createAudioEngines()){
return false;
}
return true;
}
bool WavTapDevice::createAudioEngines() {
OSDictionary *audioEngineDict = OSDynamicCast(OSDictionary, getProperty(AUDIO_ENGINE_KEY));
WavTapEngine *audioEngine = new WavTapEngine;
audioEngine->init(audioEngineDict);
initControls(audioEngine);
activateAudioEngine(audioEngine);
audioEngine->release();
return true;
}
#define addControl(control, handler) \
if (!control) {\
IOLog("WavTap failed to add control.\n"); \
return false; \
} \
control->setValueChangeHandler(handler, this); \
audioEngine->addDefaultAudioControl(control); \
control->release();
bool WavTapDevice::initControls(WavTapEngine* audioEngine) {
IOAudioControl *control = NULL;
for (UInt32 channel = 0; channel <= NUM_CHANS; channel++) {
mGain[channel] = kVolumeMax;
mVolume[channel] = kVolumeMax;
mMuteIn[channel] = false;
mMuteOut[channel] = false;
}
const char *channelNameMap[NUM_CHANS+1] = { kIOAudioControlChannelNameAll, kIOAudioControlChannelNameLeft, kIOAudioControlChannelNameRight, kIOAudioControlChannelNameCenter, kIOAudioControlChannelNameLeftRear, kIOAudioControlChannelNameRightRear, kIOAudioControlChannelNameSub };
for (UInt32 channel = 7; channel <= NUM_CHANS; channel++) {
channelNameMap[channel] = "Unknown Channel";
}
for (unsigned channel = 0; channel <= NUM_CHANS; channel++) {
control = IOAudioLevelControl::createVolumeControl(WavTapDevice::kVolumeMax, 0, WavTapDevice::kVolumeMax, (-40 << 16) + (32768), 0, channel, channelNameMap[channel], channel, kIOAudioControlUsageOutput);
addControl(control, (IOAudioControl::IntValueChangeHandler)volumeChangeHandler);
control = IOAudioLevelControl::createVolumeControl(WavTapDevice::kGainMax, 0, WavTapDevice::kGainMax, 0, (40 << 16) + (32768), channel, channelNameMap[channel], channel, kIOAudioControlUsageInput);
addControl(control, (IOAudioControl::IntValueChangeHandler)gainChangeHandler);
}
control = IOAudioToggleControl::createMuteControl(false, kIOAudioControlChannelIDAll, kIOAudioControlChannelNameAll, 0, kIOAudioControlUsageOutput);
addControl(control, (IOAudioControl::IntValueChangeHandler)outputMuteChangeHandler);
control = IOAudioToggleControl::createMuteControl(false, kIOAudioControlChannelIDAll, kIOAudioControlChannelNameAll, 0, kIOAudioControlUsageInput);
addControl(control, (IOAudioControl::IntValueChangeHandler)inputMuteChangeHandler);
return true;
}
IOReturn WavTapDevice::volumeChangeHandler(IOService *target, IOAudioControl *volumeControl, SInt32 oldValue, SInt32 newValue) {
IOReturn result = kIOReturnBadArgument;
WavTapDevice *audioDevice = (WavTapDevice *)target;
if (audioDevice) {
result = audioDevice->volumeChanged(volumeControl, oldValue, newValue);
}
return result;
}
IOReturn WavTapDevice::volumeChanged(IOAudioControl *volumeControl, SInt32 oldValue, SInt32 newValue) {
if (volumeControl) {
mVolume[volumeControl->getChannelID()] = newValue;
}
return kIOReturnSuccess;
}
IOReturn WavTapDevice::outputMuteChangeHandler(IOService *target, IOAudioControl *muteControl, SInt32 oldValue, SInt32 newValue) {
IOReturn result = kIOReturnBadArgument;
WavTapDevice *audioDevice = (WavTapDevice*)target;
if (audioDevice) {
result = audioDevice->outputMuteChanged(muteControl, oldValue, newValue);
}
return result;
}
IOReturn WavTapDevice::outputMuteChanged(IOAudioControl *muteControl, SInt32 oldValue, SInt32 newValue) {
if (muteControl) {
mMuteOut[muteControl->getChannelID()] = newValue;
}
return kIOReturnSuccess;
}
IOReturn WavTapDevice::gainChangeHandler(IOService *target, IOAudioControl *gainControl, SInt32 oldValue, SInt32 newValue) {
IOReturn result = kIOReturnBadArgument;
WavTapDevice *audioDevice = (WavTapDevice *)target;
if (audioDevice) {
result = audioDevice->gainChanged(gainControl, oldValue, newValue);
}
return result;
}
IOReturn WavTapDevice::gainChanged(IOAudioControl *gainControl, SInt32 oldValue, SInt32 newValue) {
if (gainControl) {
mGain[gainControl->getChannelID()] = newValue;
}
return kIOReturnSuccess;
}
IOReturn WavTapDevice::inputMuteChangeHandler(IOService *target, IOAudioControl *muteControl, SInt32 oldValue, SInt32 newValue) {
IOReturn result = kIOReturnBadArgument;
WavTapDevice *audioDevice = (WavTapDevice*)target;
if (audioDevice) {
result = audioDevice->inputMuteChanged(muteControl, oldValue, newValue);
}
return result;
}
IOReturn WavTapDevice::inputMuteChanged(IOAudioControl *muteControl, SInt32 oldValue, SInt32 newValue) {
if (muteControl) {
mMuteIn[muteControl->getChannelID()] = newValue;
}
return kIOReturnSuccess;
}
|
{
"pile_set_name": "Github"
}
|
#!/usr/bin/env sh
cd "$(dirname "$0")"
. "./.common.sh"
cd ../..
PATH="$PWD/sandbox/composer/bin:$PATH"
if ! chkcmd 'peridot'; then
echo ' error: "peridot" command not found.'
echo ' Execute "./support/tools/init" first.'
exit 1
fi
coverage_index="$PWD/sandbox/code-coverage-report/index.html"
## It's is not created automatically.
mkdir -p "$(dirname "$coverage_index")"
reporter=html-code-coverage
if test $# -eq 1; then
reporter=$1
fi
cd sandbox
peridot_arguments="-c \"../support/peridot.php\" -r $reporter -g \"*.php\" \"../specs/\""
if php -m | grep -i 'xdebug' > /dev/null; then
echo ' info: using Xdebug.'
eval "peridot $peridot_arguments"
elif chkcmd 'phpdbg'; then
echo ' info: using phpdbg.'
echo
echo ' type: run'
echo ' quit'
echo
eval "phpdbg -e \"$(which peridot)\" $peridot_arguments"
else
echo ' error: no profiling tool found.'
exit 1
fi
if test -f "$coverage_index" && chkcmd 'open'; then
open "$coverage_index"
fi
|
{
"pile_set_name": "Github"
}
|
#!/usr/bin/python
"""
(C) Copyright 2020 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
from __future__ import print_function
import os
import re
import random
from avocado import fail_on
from apricot import TestWithServers
from daos_racer_utils import DaosRacerCommand
from agent_utils import include_local_host
from command_utils import CommandFailure
from general_utils import check_file_exists, get_host_data, get_log_file
class ZeroConfigTest(TestWithServers):
"""Test class for zero-config tests.
Test Class Description:
Test to verify that client application to libdaos can access a running
DAOS system with & without any special environment variable definitions.
:avocado: recursive
"""
def setUp(self):
"""Set up for zero-config test."""
self.setup_start_servers = False
super(ZeroConfigTest, self).setUp()
def get_port_cnt(self, hosts, dev, port_counter):
"""Get the port count info for device names specified.
Args:
hosts (list): list of hosts
dev (str): device to get counter information for
port_counter (str): port counter to get information from
Returns:
dict: a dictionary of data values for each NodeSet key
"""
b_path = "/sys/class/infiniband/{}".format(dev)
file = os.path.join(b_path, "ports/1/counters", port_counter)
# Check if if exists for the host
check_result = check_file_exists(hosts, file)
if not check_result[0]:
self.fail("{}: {} not found".format(check_result[1], file))
cmd = "cat {}".format(file)
text = "port_counter"
error = "Error obtaining {} info".format(port_counter)
return get_host_data(hosts, cmd, text, error, 20)
def get_log_info(self, hosts, dev, env_state, log_file):
"""Get information from daos.log file to verify device used.
Args:
hosts (list): list of hosts
dev (str): device to get counter information for
env_state (bool): set state for OFI_INTERFACE env variable
log_file (str): log file to verify
Returns:
bool: status of whether correct device was used.
"""
cmd = "head -50 {}".format(log_file)
err = "Error getting log data."
pattern = r"Using\s+client\s+provided\s+OFI_INTERFACE:\s+{}".format(dev)
detected = 0
for output in get_host_data(hosts, cmd, log_file, err).values():
detected = len(re.findall(pattern, output))
self.log.info(
"Found %s instances of client setting up OFI_INTERFACE=%s",
detected, dev)
# Verify
status = True
if env_state and detected != 1:
status = False
elif not env_state and detected == 1:
status = False
return status
@fail_on(CommandFailure)
def verify_client_run(self, exp_iface, env):
"""Verify the interface assigned by running a libdaos client.
Args:
exp_iface (str): expected interface to check.
env (bool): add OFI_INTERFACE variable to exported variables of
client command.
Returns:
bool: returns status
"""
hfi_map = {"ib0": "hfi1_0", "ib1": "hfi1_1"}
# Get counter values for hfi devices before and after
cnt_before = self.get_port_cnt(
self.hostlist_clients, hfi_map[exp_iface], "port_rcv_data")
# get the dmg config file for daos_racer
dmg = self.get_dmg_command()
# Let's run daos_racer as a client
daos_racer = DaosRacerCommand(self.bin,
self.hostlist_clients[0], dmg)
daos_racer.get_params(self)
# Update env_name list to add OFI_INTERFACE if needed.
if env:
daos_racer.update_env_names(["OFI_INTERFACE"])
# Setup the environment and logfile
logf = "daos_racer_{}_{}.log".format(exp_iface, env)
# Add FI_LOG_LEVEL to get more info on device issues
racer_env = daos_racer.get_environment(self.server_managers[0], logf)
racer_env["FI_LOG_LEVEL"] = "info"
daos_racer.set_environment(racer_env)
# Run client
daos_racer.run()
# Verify output and port count to check what iface CaRT init with.
cnt_after = self.get_port_cnt(
self.hostlist_clients, hfi_map[exp_iface], "port_rcv_data")
diff = 0
for cnt_b, cnt_a in zip(cnt_before.values(), cnt_after.values()):
diff = int(cnt_a) - int(cnt_b)
self.log.info("Port [%s] count difference: %s", exp_iface, diff)
# Read daos.log to verify device used and prevent false positives
self.assertTrue(
self.get_log_info(
self.hostlist_clients, exp_iface, env, get_log_file(logf)))
# If we don't see data going through the device, fail
status = True
if diff <= 0:
self.log.info("No traffic seen through device: %s", exp_iface)
status = False
else:
status = True
return status
def test_env_set_unset(self):
"""JIRA ID: DAOS-4880.
Test Description:
Test starting a daos_server process on 2 different numa
nodes and verify that client can start when OFI_INTERFACE is set
or unset. The test expects that the server will have two interfaces
available: hfi_0 and hfi_1.
:avocado: tags=all,pr,hw,small,zero_config,env_set
"""
env_state = self.params.get("env_state", '/run/zero_config/*')
dev_info = {"ib0": 0, "ib1": 1}
exp_iface = random.choice(dev_info.keys())
# Configure the daos server
config_file = self.get_config_file(self.server_group, "server")
self.add_server_manager(config_file)
self.configure_manager(
"server",
self.server_managers[0],
self.hostlist_servers,
self.hostfile_servers_slots,
self.hostlist_servers)
self.assertTrue(
self.server_managers[0].set_config_value(
"fabric_iface", exp_iface),
"Error updating daos_server 'fabric_iface' config opt")
self.assertTrue(
self.server_managers[0].set_config_value(
"pinned_numa_node", dev_info[exp_iface]),
"Error updating daos_server 'pinned_numa_node' config opt")
# Start the daos server
self.start_server_managers()
# Verify
err = []
if not self.verify_client_run(exp_iface, env_state):
err.append("Failed run with expected dev: {}".format(exp_iface))
self.assertEqual(len(err), 0, "{}".format("\n".join(err)))
|
{
"pile_set_name": "Github"
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.ml;
// $example on$
import java.util.Arrays;
import org.apache.spark.ml.Pipeline;
import org.apache.spark.ml.PipelineModel;
import org.apache.spark.ml.PipelineStage;
import org.apache.spark.ml.classification.LogisticRegression;
import org.apache.spark.ml.feature.HashingTF;
import org.apache.spark.ml.feature.Tokenizer;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
// $example off$
import org.apache.spark.sql.SparkSession;
/**
* Java example for simple text document 'Pipeline'.
*/
public class JavaPipelineExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
.builder()
.appName("JavaPipelineExample")
.getOrCreate();
// $example on$
// Prepare training documents, which are labeled.
Dataset<Row> training = spark.createDataFrame(Arrays.asList(
new JavaLabeledDocument(0L, "a b c d e spark", 1.0),
new JavaLabeledDocument(1L, "b d", 0.0),
new JavaLabeledDocument(2L, "spark f g h", 1.0),
new JavaLabeledDocument(3L, "hadoop mapreduce", 0.0)
), JavaLabeledDocument.class);
// Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr.
Tokenizer tokenizer = new Tokenizer()
.setInputCol("text")
.setOutputCol("words");
HashingTF hashingTF = new HashingTF()
.setNumFeatures(1000)
.setInputCol(tokenizer.getOutputCol())
.setOutputCol("features");
LogisticRegression lr = new LogisticRegression()
.setMaxIter(10)
.setRegParam(0.001);
Pipeline pipeline = new Pipeline()
.setStages(new PipelineStage[] {tokenizer, hashingTF, lr});
// Fit the pipeline to training documents.
PipelineModel model = pipeline.fit(training);
// Prepare test documents, which are unlabeled.
Dataset<Row> test = spark.createDataFrame(Arrays.asList(
new JavaDocument(4L, "spark i j k"),
new JavaDocument(5L, "l m n"),
new JavaDocument(6L, "spark hadoop spark"),
new JavaDocument(7L, "apache hadoop")
), JavaDocument.class);
// Make predictions on test documents.
Dataset<Row> predictions = model.transform(test);
for (Row r : predictions.select("id", "text", "probability", "prediction").collectAsList()) {
System.out.println("(" + r.get(0) + ", " + r.get(1) + ") --> prob=" + r.get(2)
+ ", prediction=" + r.get(3));
}
// $example off$
spark.stop();
}
}
|
{
"pile_set_name": "Github"
}
|
gcr.io/ml-pipeline/ml-pipeline-dataproc-predict:be19cbc2591a48d2ef5ca715c34ecae8223cf454
|
{
"pile_set_name": "Github"
}
|
#!/bin/bash
# /etc/initcpio/install/sd-plymouth — mkinitcpio/systemd hook for plymouth
build() {
add_dir /dev/pts
add_dir /usr/share/plymouth/themes
add_dir /run/plymouth
DATADIR="/usr/share/plymouth"
PLYMOUTH_LOGO_FILE="${DATADIR}/arch-logo.png"
PLYMOUTH_THEME_NAME="$(/usr/bin/plymouth-set-default-theme)"
PLYMOUTH_THEME_DIR="${DATADIR}/themes/${PLYMOUTH_THEME_NAME}"
PLYMOUTH_IMAGE_DIR=$(grep "ImageDir *= *" ${PLYMOUTH_THEME_DIR}/${PLYMOUTH_THEME_NAME}.plymouth | sed 's/ImageDir *= *//')
PLYMOUTH_PLUGIN_PATH="$(plymouth --get-splash-plugin-path)"
PLYMOUTH_MODULE_NAME="$(grep "ModuleName *= *" ${PLYMOUTH_THEME_DIR}/${PLYMOUTH_THEME_NAME}.plymouth | sed 's/ModuleName *= *//')"
add_binary /usr/bin/plymouthd
add_binary /usr/bin/plymouth
add_file ${DATADIR}/themes/text/text.plymouth
add_binary ${PLYMOUTH_PLUGIN_PATH}/text.so
add_file ${DATADIR}/themes/details/details.plymouth
add_binary ${PLYMOUTH_PLUGIN_PATH}/details.so
add_file "${PLYMOUTH_LOGO_FILE}"
add_file /etc/os-release
add_file /etc/plymouth/plymouthd.conf
add_file ${DATADIR}/plymouthd.defaults
if [ -f "/usr/share/fonts/TTF/DejaVuSans.ttf" -o -f "/usr/share/fonts/cantarell/Cantarell-Thin.otf" ]; then
add_binary ${PLYMOUTH_PLUGIN_PATH}/label.so
add_file "/etc/fonts/fonts.conf"
fi
if [ -f "/usr/share/fonts/TTF/DejaVuSans.ttf" ]; then
add_file "/usr/share/fonts/TTF/DejaVuSans.ttf"
add_file "/etc/fonts/conf.d/57-dejavu-sans.conf"
fi
if [ -f "/usr/share/fonts/cantarell/Cantarell-Thin.otf" ]; then
add_file "/usr/share/fonts/cantarell/Cantarell-Thin.otf"
add_file "/usr/share/fonts/cantarell/Cantarell-Regular.otf"
add_file "/etc/fonts/conf.d/60-latin.conf"
fi
if [ ! -f ${PLYMOUTH_PLUGIN_PATH}/${PLYMOUTH_MODULE_NAME}.so ]; then
echo "The default plymouth plugin (${PLYMOUTH_MODULE_NAME}) doesn't exist" > /dev/stderr
exit 1
fi
add_binary ${PLYMOUTH_PLUGIN_PATH}/${PLYMOUTH_MODULE_NAME}.so
add_binary ${PLYMOUTH_PLUGIN_PATH}/renderers/drm.so
add_binary ${PLYMOUTH_PLUGIN_PATH}/renderers/frame-buffer.so
if [ -d ${PLYMOUTH_THEME_DIR} ]; then
add_full_dir ${PLYMOUTH_THEME_DIR}
fi
if [ "${PLYMOUTH_IMAGE_DIR}" != "${PLYMOUTH_THEME_DIR}" -a -d ${PLYMOUTH_IMAGE_DIR} ]; then
add_full_dir ${PLYMOUTH_IMAGE_DIR}
fi
add_udev_rule 70-uaccess.rules
add_udev_rule 71-seat.rules
map add_systemd_unit \
systemd-ask-password-plymouth.path \
systemd-ask-password-plymouth.service \
plymouth-halt.service \
plymouth-kexec.service \
plymouth-poweroff.service \
plymouth-quit-wait.service \
plymouth-quit.service \
plymouth-read-write.service \
plymouth-reboot.service \
plymouth-start.service \
plymouth-switch-root.service
}
help() {
cat <<HELPEOF
This hook includes plymouth in a systemd-based initramfs image.
HELPEOF
}
|
{
"pile_set_name": "Github"
}
|
[
{
"type": "api-change",
"category": "EC2",
"description": "As part of this release we are introducing EC2 On-Demand Capacity Reservations. With On-Demand Capacity Reservations, customers can reserve the exact EC2 capacity they need, and can keep it only for as long as they need it."
}
]
|
{
"pile_set_name": "Github"
}
|
/*
* Copyright (C) 2012 Indragie Karunaratne <i@indragie.com>
* All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name of Indragie Karunaratne nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
@class SNRSearchViewController;
@interface SNRSearchWindowController : NSWindowController
@property (nonatomic, assign) BOOL openedViaShortcut;
@property (nonatomic, retain) IBOutlet SNRSearchViewController *searchViewController;
+ (SNRSearchWindowController*)sharedWindowController;
- (IBAction)hideWindow:(id)sender;
- (IBAction)toggleVisible:(id)sender;
@end
|
{
"pile_set_name": "Github"
}
|
<ScrollView sdkExampleTitle sdkToggleNavButton>
<StackLayout>
<GridLayout rows="auto, auto, auto, auto">
<Label class="h3 p-15" row="0" text="Normalized Paths" textWrap="true"></Label>
<Label class="p-l-15 p-r-15 p-b-15" row="1" [text]="'File in documents folder: ' + documents" textWrap="true"></Label>
<Label class="p-l-15 p-r-15 p-b-15" row="2" [text]="'File in current app folder: ' + currentApp" textWrap="true"></Label>
<Label class="p-l-15 p-r-15 p-b-15" row="3" [text]="'File in temp folder: ' + temp" textWrap="true"></Label>
</GridLayout>
<GridLayout rows="auto, auto, auto, auto" columns="*, *">
<Label class="h3 p-15" row="0" col="0" colSpan="2" text="Writing content to file" textWrap="true"></Label>
<TextField class="p-15" row="1" col="0" hint="Enter Sample Text" [(ngModel)]="textContentToBeSaved" editable="true"></TextField>
<Button class="btn btn-primary btn-active" row="1" col="1" text="Save" (tap)="onSaveContentToFile()"></Button>
<Label class="p-15" row="2" col="0" text="Is content saved!?" textWrap="true"></Label>
<Label class="p-15" row="2" col="1" [text]="isContentSaved" textWrap="true"></Label>
<Label class="p-15" row="3" col="0" colSpan="2" [text]="'Saved content: ' + savedContent" textWrap="true"></Label>
</GridLayout>
</StackLayout>
</ScrollView>
|
{
"pile_set_name": "Github"
}
|
## Unordered
Asterisks tight:
* asterisk 1
* asterisk 2
* asterisk 3
Asterisks loose:
* asterisk 1
* asterisk 2
* asterisk 3
* * *
Pluses tight:
+ Plus 1
+ Plus 2
+ Plus 3
Pluses loose:
+ Plus 1
+ Plus 2
+ Plus 3
* * *
Minuses tight:
- Minus 1
- Minus 2
- Minus 3
Minuses loose:
- Minus 1
- Minus 2
- Minus 3
## Ordered
Tight:
1. First
2. Second
3. Third
and:
1. One
2. Two
3. Three
Loose using tabs:
1. First
2. Second
3. Third
and using spaces:
1. One
2. Two
3. Three
Multiple paragraphs:
1. Item 1, graf one.
Item 2. graf two. The quick brown fox jumped over the lazy dog's
back.
2. Item 2.
3. Item 3.
## Nested
* Tab
* Tab
* Tab
Here's another:
1. First
2. Second:
* Fee
* Fie
* Foe
3. Third
Same thing but with paragraphs:
1. First
2. Second:
* Fee
* Fie
* Foe
3. Third
This was an error in Markdown 1.0.1:
* this
* sub
that
|
{
"pile_set_name": "Github"
}
|
package com.deliveredtechnologies.rulebook.model;
import com.deliveredtechnologies.rulebook.NameValueReferableMap;
import com.deliveredtechnologies.rulebook.NameValueReferableTypeConvertibleMap;
import com.deliveredtechnologies.rulebook.Result;
import com.deliveredtechnologies.rulebook.RuleState;
import com.deliveredtechnologies.rulebook.FactMap;
import com.deliveredtechnologies.rulebook.Fact;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Predicate;
/**
* Tests for {@link GoldenRule}.
*/
public class GoldenRuleTest {
@Test
public void addFactsShouldAddFactsToTheRule() {
NameValueReferableMap<String> facts = new FactMap<>();
Rule<String, Object> rule = new GoldenRule<>(String.class);
facts.setValue("fact1", "Fact One");
facts.setValue("fact2", "Fact Two");
rule.addFacts(new Fact<>("hello", "world"));
rule.addFacts(facts);
Assert.assertEquals(3, rule.getFacts().size());
Assert.assertEquals("Fact One", rule.getFacts().getValue("fact1"));
Assert.assertEquals("Fact Two", rule.getFacts().getValue("fact2"));
Assert.assertEquals("world", rule.getFacts().getValue("hello"));
}
@Test
public void setFactsShouldOverwriteExistingFacts() {
NameValueReferableMap<String> facts = new FactMap<>();
Rule<String, Object> rule = new GoldenRule<>(String.class);
facts.setValue("fact1", "Fact One");
facts.setValue("fact2", "Fact Two");
rule.addFacts(new Fact<>("hello", "world"));
rule.setFacts(facts);
Assert.assertEquals(2, rule.getFacts().size());
Assert.assertEquals("Fact One", rule.getFacts().getValue("fact1"));
Assert.assertEquals("Fact Two", rule.getFacts().getValue("fact2"));
Assert.assertTrue(facts == rule.getFacts());
}
@Test
public void setConditionShouldSetTheCondition() {
Predicate<NameValueReferableTypeConvertibleMap<String>> condition = facts -> true;
Rule<String, Object> rule = new GoldenRule<>(String.class);
rule.setCondition(condition);
Assert.assertTrue(condition == rule.getCondition());
}
@Test
@SuppressWarnings("unchecked")
public void setRuleStateShouldSetTheRuleState() {
Rule rule = new GoldenRule(Object.class);
rule.setRuleState(RuleState.BREAK);
Assert.assertEquals(RuleState.BREAK, rule.getRuleState());
rule.setRuleState(RuleState.NEXT);
Assert.assertEquals(RuleState.NEXT, rule.getRuleState());
}
@Test
public void addingActionsAddsActionsToTheActionList() {
Consumer<NameValueReferableTypeConvertibleMap<String>> consumer = facts -> facts.setValue("fact1", "Fact1");
BiConsumer<NameValueReferableTypeConvertibleMap<String>, Result<String>> biConsumer =
(facts, result) -> result.setValue("result");
Rule<String, String> rule = new GoldenRule<>(String.class);
rule.addAction(consumer);
rule.addAction(biConsumer);
Assert.assertTrue(rule.getActions().contains(consumer));
Assert.assertTrue(rule.getActions().contains(biConsumer));
Assert.assertEquals(2, rule.getActions().size());
}
@Test
public void settingTheResultSetsTheResult() {
Rule<String, String> rule = new GoldenRule<>(String.class);
Assert.assertFalse(rule.getResult().isPresent());
rule.setResult(new Result<>("My Result"));
Assert.assertEquals("My Result", rule.getResult().get().getValue());
}
@Test
@SuppressWarnings("unchecked")
public void addingDuplicateActionsFindsOnlyOneActionAdded() {
Rule<String, String> rule = new GoldenRule<>(String.class);
Result<String> result = new Result<>("result value");
rule.setResult(result);
rule.setCondition(whatever -> true);
Consumer<NameValueReferableTypeConvertibleMap<String>> consumer = Mockito.mock(Consumer.class);
rule.addAction(consumer);
rule.addAction(consumer);
BiConsumer<NameValueReferableTypeConvertibleMap<String>, Result<String>> biConsumer =
Mockito.mock(BiConsumer.class);
rule.addAction(biConsumer);
rule.addAction(biConsumer);
rule.addAction(consumer);
rule.invoke(new FactMap<>());
Mockito.verify(consumer, Mockito.times(1))
.accept(Mockito.any(NameValueReferableTypeConvertibleMap.class));
Mockito.verify(biConsumer, Mockito.times(1))
.accept(Mockito.any(NameValueReferableTypeConvertibleMap.class), Mockito.any(Result.class));
}
@Test(expected = RuleException.class)
public void rulesSetToErrorOnFailureThrowExceptionsInWhen() {
Rule<String, String> rule = new GoldenRule<>(String.class, RuleChainActionType.ERROR_ON_FAILURE);
rule.setCondition(facts -> facts.getValue("some fact").equals("nothing"));
rule.invoke(new FactMap<String>());
}
@Test(expected = RuleException.class)
public void rulesToErrorOnFailureThrowExceptionsInActions() {
Rule<String, String> rule = new GoldenRule<>(String.class, RuleChainActionType.ERROR_ON_FAILURE);
rule.setCondition(facts -> true);
rule.addAction(facts -> System.out.println(facts.getValue("some fact").toLowerCase()));
rule.invoke(new FactMap<String>());
}
}
|
{
"pile_set_name": "Github"
}
|
using System;
public struct Struct {
public int I;
public override string ToString () {
return I.ToString();
}
}
public static class Program {
public static void Main (string[] args) {
var a = new Struct[10];
for (var i = 0; i < a.Length; i++)
a[i] = new Struct {
I = i
};
foreach (var s in a)
Console.WriteLine(s);
}
}
|
{
"pile_set_name": "Github"
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.timeline.security;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import org.junit.Assert;
import org.junit.Test;
public class TestTimelineACLsManager {
@Test
public void testYarnACLsNotEnabled() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, false);
TimelineACLsManager timelineACLsManager =
new TimelineACLsManager(conf);
TimelineEntity entity = new TimelineEntity();
entity.addPrimaryFilter(
TimelineStore.SystemFilter.ENTITY_OWNER
.toString(), "owner");
Assert.assertTrue(
"Always true when ACLs are not enabled",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("user"), entity));
}
@Test
public void testYarnACLsEnabled() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "admin");
TimelineACLsManager timelineACLsManager =
new TimelineACLsManager(conf);
TimelineEntity entity = new TimelineEntity();
entity.addPrimaryFilter(
TimelineStore.SystemFilter.ENTITY_OWNER
.toString(), "owner");
Assert.assertTrue(
"Owner should be allowed to access",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("owner"), entity));
Assert.assertFalse(
"Other shouldn't be allowed to access",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("other"), entity));
Assert.assertTrue(
"Admin should be allowed to access",
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("admin"), entity));
}
@Test
public void testCorruptedOwnerInfo() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "owner");
TimelineACLsManager timelineACLsManager =
new TimelineACLsManager(conf);
TimelineEntity entity = new TimelineEntity();
try {
timelineACLsManager.checkAccess(
UserGroupInformation.createRemoteUser("owner"), entity);
Assert.fail("Exception is expected");
} catch (YarnException e) {
Assert.assertTrue("It's not the exact expected exception", e.getMessage()
.contains("is corrupted."));
}
}
}
|
{
"pile_set_name": "Github"
}
|
// (C) Copyright John Maddock 2001.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org/libs/config for most recent version.
// MACRO: BOOST_HAS_PTHREAD_MUTEXATTR_SETTYPE
// TITLE: pthread_mutexattr_settype
// DESCRIPTION: The platform supports POSIX API pthread_mutexattr_settype.
#include <pthread.h>
namespace boost_has_pthread_mutexattr_settype{
void f()
{
// this is never called, it just has to compile:
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
int type = 0;
pthread_mutexattr_settype(&attr, type);
}
int test()
{
return 0;
}
}
|
{
"pile_set_name": "Github"
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.