text stringlengths 1 1.05M |
|---|
import { IClosable } from '../ui';
export interface IDepositAddressData {
address: string;
memo?: string;
}
export interface IHoldingCurrencyCode {
currencyCode: string;
}
export type BalanceModalNames = 'depositCoins' | 'withdrawCoins' | 'simplex';
export interface IBalanceModalsState {
depositCoins: IDepositCoinsModal;
withdrawCoins: IWithdrawCoinsModal;
simplex: ISimplexModal;
}
export interface ISimplexModal extends IClosable {
address: IDepositAddressData | null;
currency: string | null;
}
export interface IDepositCoinsModal extends IClosable {
currencyCode: string | null;
address: IDepositAddressData | null;
}
export interface IWithdrawCoinsModal extends IClosable {
currencyCode: string | null;
}
export interface ISetBalanceModalPropsPayload<T extends BalanceModalNames> {
name: T;
props: Partial<IBalanceModalsState[T]>;
}
export interface IWithdrawCoinsFormData {
amount: number;
address: string;
}
export interface ICurrencyBalance {
code: string;
value: number;
}
export type UsersBalance = Record<string, ICurrencyBalance[]>;
export interface IWithdrawSettings {
withdrawFeePercentage: number;
blockchainCommisionPercentage: number;
minimumComissionAmount: number;
}
export interface IBalanceDict {
[code: string]: number;
}
export interface ISavedWithdrawalAddress {
[label: string]: string;
}
export interface ISavedWithdrawalAddresses {
[code: string]: ISavedWithdrawalAddress[];
}
|
<filename>src/js/electron/menu/actions/searchActions.ts
import {
appendQueryCountBy,
appendQueryExclude,
appendQueryIn,
appendQueryInclude,
appendQueryNotIn,
appendQuerySortBy
} from "../../../flows/searchBar/actions"
import {downloadPcap} from "../../../flows/downloadPcap"
import {submitSearch} from "../../../flows/submitSearch/mod"
import {viewLogDetail} from "../../../flows/viewLogDetail"
import ErrorFactory from "../../../models/ErrorFactory"
import Layout from "../../../state/Layout/actions"
import Modal from "../../../state/Modal"
import Notice from "../../../state/Notice"
import SearchBar from "../../../state/SearchBar"
import action from "./action"
import brim from "../../../brim"
import open from "../../../lib/open"
import scrollToLog from "../../../flows/scrollToLog"
import tab from "../../../state/Tab"
import virusTotal from "../../../services/virusTotal"
import {zng} from "zealot"
import {createCell} from "../../../brim/cell"
function buildSearchActions() {
return {
countBy: action({
name: "search-cell-menu-count-by",
label: "Count by field",
listener(dispatch, data: zng.SerializedField) {
const f = zng.Field.deserialize(data)
dispatch(appendQueryCountBy(f))
dispatch(submitSearch())
}
}),
detail: action({
name: "search-cell-menu-detail",
label: "Open details",
listener(dispatch, data: zng.SerializedRecord) {
const record = zng.Record.deserialize(data)
dispatch(Layout.showRightSidebar())
dispatch(viewLogDetail(record))
}
}),
exclude: action({
name: "search-cell-menu-exclude",
label: "Filter != value",
listener(dispatch, data: zng.SerializedField) {
dispatch(appendQueryExclude(zng.Field.deserialize(data)))
dispatch(submitSearch())
}
}),
freshInclude: action({
name: "search-cell-menu-fresh-include",
label: "New search with this value",
listener(dispatch, data: zng.SerializedField) {
const cell = createCell(zng.Field.deserialize(data))
dispatch(SearchBar.clearSearchBar())
dispatch(SearchBar.changeSearchBarInput(cell.queryableValue()))
dispatch(submitSearch())
}
}),
fromTime: action({
name: "search-cell-menu-from-time",
label: 'Use as "start" time',
listener(dispatch, data: zng.SerializedField) {
const field = zng.Field.deserialize(data)
if (field.data.getType() === "time") {
dispatch(
tab.setFrom(
brim.time((field.data as zng.Primitive).toDate()).toTs()
)
)
dispatch(submitSearch())
}
}
}),
groupByDrillDown: action({
name: "search-cell-menu-pivot-to-logs",
label: "Pivot to logs",
listener(dispatch, program: string, data: zng.SerializedRecord) {
const record = zng.Record.deserialize(data)
const newProgram = brim
.program(program)
.drillDown(record)
.string()
if (newProgram) {
dispatch(SearchBar.clearSearchBar())
dispatch(SearchBar.changeSearchBarInput(newProgram))
dispatch(submitSearch())
}
}
}),
include: action({
name: "search-cell-menu-include",
label: "Filter = value",
listener(dispatch, data: zng.SerializedField) {
dispatch(appendQueryInclude(zng.Field.deserialize(data)))
dispatch(submitSearch())
}
}),
in: action({
name: "search-cell-menu-in",
label: "Filter in field",
listener(dispatch, data: zng.SerializedField) {
dispatch(appendQueryIn(createCell(zng.Field.deserialize(data))))
dispatch(submitSearch())
}
}),
jumpToTime: action({
name: "search-cell-menu-show-context",
label: "View in full context",
listener(
dispatch,
fieldData: zng.SerializedField,
recordData: zng.SerializedRecord
) {
const field = zng.Field.deserialize(fieldData)
const record = zng.Record.deserialize(recordData)
const brimTime = brim.time((field.data as zng.Primitive).toDate())
if (field.data.type === "time") {
dispatch(tab.setFrom(brimTime.subtract(1, "minutes").toTs()))
dispatch(tab.setTo(brimTime.add(1, "minutes").toTs()))
dispatch(SearchBar.clearSearchBar())
dispatch(submitSearch())
.then(() => {
dispatch(scrollToLog(record))
})
.catch((error) => {
console.error(error)
dispatch(Notice.set(ErrorFactory.create(error)))
})
}
}
}),
notIn: action({
name: "search-cell-menu-not-in",
label: "Filter not in field",
listener(dispatch, data: zng.SerializedField) {
dispatch(appendQueryNotIn(createCell(zng.Field.deserialize(data))))
dispatch(submitSearch())
}
}),
logResult: action({
name: "search-cell-menu-log-result",
label: "Log result to console",
listener(
_dispatch,
field: zng.SerializedField,
log: zng.SerializedRecord
) {
console.log(JSON.stringify(log))
console.log(JSON.stringify(field))
}
}),
pcaps: action({
name: "search-cell-menu-pcaps",
label: "Download PCAPS",
listener(dispatch, data: zng.SerializedRecord) {
dispatch(downloadPcap(zng.Record.deserialize(data)))
}
}),
sortAsc: action({
name: "search-cell-menu-sort-asc",
label: "Sort A...Z",
listener(dispatch, data: zng.SerializedField) {
const field = zng.Field.deserialize(data)
dispatch(appendQuerySortBy(field.name, "asc"))
dispatch(submitSearch())
}
}),
sortDesc: action({
name: "search-cell-menu-sort-desc",
label: "Sort Z...A",
listener(dispatch, data: zng.SerializedField) {
const field = zng.Field.deserialize(data)
dispatch(appendQuerySortBy(field.name, "desc"))
dispatch(submitSearch())
}
}),
toTime: action({
name: "search-cell-menu-to-time",
label: 'Use as "end" time',
listener(dispatch, data: zng.SerializedField) {
const field = zng.Field.deserialize(data)
if (field.data.type === "time") {
dispatch(
tab.setTo(
brim
.time((field.data as zng.Primitive).toDate())
.add(1, "ms")
.toTs()
)
)
dispatch(submitSearch())
}
}
}),
virusTotalRightclick: action({
name: "search-cell-menu-virus-total",
label: "VirusTotal Lookup",
listener(dispatch, data: zng.SerializedField) {
const field = zng.Field.deserialize(data)
if (field.data instanceof zng.Primitive && field.data.isSet()) {
open(virusTotal.url(field.data.getValue() as string))
}
}
}),
whoisRightclick: action({
name: "search-cell-menu-who-is",
label: "Whois Lookup",
listener(dispatch, data: zng.SerializedField) {
const field = zng.Field.deserialize(data)
dispatch(Modal.show("whois", {addr: field.data.value}))
}
})
}
}
export default buildSearchActions()
|
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/micro/memory_helpers.h"
#include <cstddef>
#include <cstdint>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
uint8_t* AlignPointerUp(uint8_t* data, size_t alignment) {
std::uintptr_t data_as_uintptr_t = reinterpret_cast<std::uintptr_t>(data);
uint8_t* aligned_result = reinterpret_cast<uint8_t*>(
((data_as_uintptr_t + (alignment - 1)) / alignment) * alignment);
return aligned_result;
}
uint8_t* AlignPointerDown(uint8_t* data, size_t alignment) {
std::uintptr_t data_as_uintptr_t = reinterpret_cast<std::uintptr_t>(data);
uint8_t* aligned_result =
reinterpret_cast<uint8_t*>((data_as_uintptr_t / alignment) * alignment);
return aligned_result;
}
size_t AlignSizeUp(size_t size, size_t alignment) {
size_t aligned_size = (((size + (alignment - 1)) / alignment) * alignment);
return aligned_size;
}
TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size) {
switch (type) {
case kTfLiteFloat32:
*size = sizeof(float);
break;
case kTfLiteInt16:
*size = sizeof(int16_t);
break;
case kTfLiteInt32:
*size = sizeof(int32_t);
break;
case kTfLiteUInt8:
*size = sizeof(uint8_t);
break;
case kTfLiteInt8:
*size = sizeof(int8_t);
break;
case kTfLiteInt64:
*size = sizeof(int64_t);
break;
case kTfLiteBool:
*size = sizeof(bool);
break;
case kTfLiteComplex64:
*size = sizeof(float) * 2;
break;
case kTfLiteComplex128:
*size = sizeof(double) * 2;
break;
default:
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor,
size_t* bytes, size_t* type_size,
ErrorReporter* error_reporter) {
int element_count = 1;
// If flatbuffer_tensor.shape == nullptr, then flatbuffer_tensor is a scalar
// so has 1 element.
if (flatbuffer_tensor.shape() != nullptr) {
for (size_t n = 0; n < flatbuffer_tensor.shape()->Length(); ++n) {
element_count *= flatbuffer_tensor.shape()->Get(n);
}
}
TfLiteType tf_lite_type;
TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(),
&tf_lite_type, error_reporter));
TF_LITE_ENSURE_STATUS(TfLiteTypeSizeOf(tf_lite_type, type_size));
*bytes = element_count * (*type_size);
return kTfLiteOk;
}
TfLiteStatus AllocateOutputDimensionsFromInput(TfLiteContext* context,
const TfLiteTensor* input1,
const TfLiteTensor* input2,
TfLiteTensor* output) {
const TfLiteTensor* input = nullptr;
TF_LITE_ENSURE(context, input1->dims != nullptr);
TF_LITE_ENSURE(context, input2->dims != nullptr);
TF_LITE_ENSURE(context, output->dims->size == 0);
input = input1->dims->size > input2->dims->size ? input1 : input2;
TF_LITE_ENSURE(context, output->type == input->type);
size_t size;
TfLiteTypeSizeOf(input->type, &size);
const int dimensions_count = tflite::GetTensorShape(input).DimensionsCount();
for (int i = 0; i < dimensions_count; i++) {
size *= input->dims->data[i];
}
output->bytes = size;
TF_LITE_ENSURE_STATUS(context->AllocatePersistentBuffer(
context, TfLiteIntArrayGetSizeInBytes(size),
reinterpret_cast<void**>(&output->dims)));
output->dims->size = input->dims->size;
for (int i = 0; i < dimensions_count; i++) {
output->dims->data[i] = input->dims->data[i];
}
return kTfLiteOk;
}
} // namespace tflite
|
class Expression:
def to_expr_string(self) -> str:
raise NotImplementedError("Subclasses must implement to_expr_string method")
def __str__(self) -> str:
return str(self.opkind)
class BinaryExpression(Expression):
def __init__(self, opkind: str, left: Expression, right: Expression):
self.opkind = opkind
self.left = left
self.right = right
def to_expr_string(self) -> str:
return f"({self.left.to_expr_string()} {self.opkind} {self.right.to_expr_string()})" |
<filename>Sources/SWExtensions.h
//
// SWExtensions.h
// SWExtensions
//
// Created by <NAME> on 2019/6/18.
// Copyright © 2019 Seven. All rights reserved.
//
#import <UIKit/UIKit.h>
//! Project version number for SWExtensions.
FOUNDATION_EXPORT double SWExtensionsVersionNumber;
//! Project version string for SWExtensions.
FOUNDATION_EXPORT const unsigned char SWExtensionsVersionString[];
// In this header, you should import all the public headers of your framework using statements like #import <SWExtensions/PublicHeader.h>
|
#!/usr/bin/env bash
# Copyright 2010-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
# Call this script from one level above, e.g. from the s3/ directory. It puts
# its output in data/local/.
# The parts of the output of this that will be needed are
# [in data/local/dict/ ]
# lexicon.txt
# extra_questions.txt
# nonsilence_phones.txt
# optional_silence.txt
# silence_phones.txt
# run this from ../
dir=data/local/dict
mkdir -p $dir
# (1) Get the CMU dictionary
svn co https://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict \
$dir/cmudict || exit 1;
# can add -r 10966 for strict compatibility.
#(2) Dictionary preparation:
# Make phones symbol-table (adding in silence and verbal and non-verbal noises at this point).
# We are adding suffixes _B, _E, _S for beginning, ending, and singleton phones.
# silence phones, one per line.
(echo SIL; echo SPN; echo NSN) > $dir/silence_phones.txt
echo SIL > $dir/optional_silence.txt
# nonsilence phones; on each line is a list of phones that correspond
# really to the same base phone.
cat $dir/cmudict/cmudict.0.7a.symbols | perl -ane 's:\r::; print;' | \
perl -e 'while(<>){
chop; m:^([^\d]+)(\d*)$: || die "Bad phone $_";
$phones_of{$1} .= "$_ "; }
foreach $list (values %phones_of) {print $list . "\n"; } ' \
> $dir/nonsilence_phones.txt || exit 1;
# A few extra questions that will be added to those obtained by automatically clustering
# the "real" phones. These ask about stress; there's also one for silence.
cat $dir/silence_phones.txt| awk '{printf("%s ", $1);} END{printf "\n";}' > $dir/extra_questions.txt || exit 1;
cat $dir/nonsilence_phones.txt | perl -e 'while(<>){ foreach $p (split(" ", $_)) {
$p =~ m:^([^\d]+)(\d*)$: || die "Bad phone $_"; $q{$2} .= "$p "; } } foreach $l (values %q) {print "$l\n";}' \
>> $dir/extra_questions.txt || exit 1;
grep -v ';;;' $dir/cmudict/cmudict.0.7a | \
perl -ane 'if(!m:^;;;:){ s:(\S+)\(\d+\) :$1 :; print; }' \
> $dir/lexicon1_raw_nosil.txt || exit 1;
# Add to cmudict the silences, noises etc.
(echo '!SIL SIL'; echo '<SPOKEN_NOISE> SPN'; echo '<UNK> SPN'; echo '<NOISE> NSN'; ) | \
cat - $dir/lexicon1_raw_nosil.txt | uniq > $dir/lexicon2_raw.txt || exit 1;
# lexicon.txt is without the _B, _E, _S, _I markers.
# This is the input to wsj_format_data.sh
cp $dir/lexicon2_raw.txt $dir/lexicon.txt
echo "Dictionary preparation succeeded"
|
#!/bin/sh
wget https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
wget https://repo.ius.io/ius-release-el7.rpm
rpm -ivh epel-release-latest-7.noarch.rpm
rpm -ivh ius-release-el7.rpm
yum --enablerepo=ius-archive install -y php74*
systemctl start php-fpm
systemctl enable php-fpm
rm -f epel-release-latest-7.noarch.rpm
rm -f ius-release-el7.rpm
|
#!/usr/bin/bash
# Copyright (c) 2021. Huawei Technologies Co.,Ltd.ALL rights reserved.
# This program is licensed under Mulan PSL v2.
# You can use it according to the terms and conditions of the Mulan PSL v2.
# http://license.coscl.org.cn/MulanPSL2
# THIS PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# #############################################
# @Author : wangshan
# @Contact : wangshan@163.com
# @Date : 2020-10-15
# @License : Mulan PSL v2
# @Desc : pg_dump
# ############################################
source ../common/lib.sh
function pre_test() {
LOG_INFO "Start to prepare the test environment."
postgresql_install
LOG_INFO "End to prepare the test environment."
}
function run_test() {
LOG_INFO "Start to run test."
su - postgres -c "pg_dump -Z 1 -Ft -Fd testdb -f tmpdir >testfile"
CHECK_RESULT $?
grep "grant" /var/lib/pgsql/testfile && rm -rf /var/lib/pgsql/tmpdir
CHECK_RESULT $? 1
su - postgres -c "pg_dump testdb -Z 0" | grep "database dump complete"
CHECK_RESULT $?
pg_dump -? | grep "Usage:"
CHECK_RESULT $?
su - postgres -c "pg_dump testdb --column-inserts >testfile"
CHECK_RESULT $?
grep "INSERT INTO public.test (id, val) VALUES (10000" /var/lib/pgsql/testfile
CHECK_RESULT $?
su - postgres -c "pg_dump testdb --enable-row-security >testfile"
CHECK_RESULT $?
grep "SET row_security = on" /var/lib/pgsql/testfile
CHECK_RESULT $?
su - postgres -c "pg_dump testdb --exclude-table-data=tab_big >testfile"
CHECK_RESULT $?
grep "Data for Name: test; Type: TABLE DATA; Schema: " /var/lib/pgsql/testfile
CHECK_RESULT $?
su - postgres -c "pg_dump testdb --inserts >testfile"
CHECK_RESULT $?
grep "INSERT INTO public.test VALUES (1000" /var/lib/pgsql/testfile
CHECK_RESULT $?
su - postgres -c "pg_dump testdb -h 127.0.0.1 -U postgres -w -p 5432"
CHECK_RESULT $?
LOG_INFO "End to run test."
}
function post_test() {
LOG_INFO "Start to restore the test environment."
systemctl stop postgresql
DNF_REMOVE
rm -rf /var/lib/pgsql/*
LOG_INFO "End to restore the test environment."
}
main "$@"
|
<gh_stars>1-10
#include <Common.hpp>
#include "../API-Headers/ExampleCTypes.hpp"
CECS_MODULE("ExampleCTypes")
using namespace vkp;
static vkpBuildVersioner BV1(1, VERSION_NUMBER);
extern "C" {
// --- User defined C-API functions ---
int ExampleCTypes_StructInOut(void* initDataPtr) { // Allocated in python
_ERRI(initDataPtr == nullptr,"initDataPtr pointer was nullptr")
ExampleCTypes_InitData* initData = (ExampleCTypes_InitData*)initDataPtr;
_ERRI(nullptr == initData->string_, "string_ is nullptr")
_ERRI(nullptr == initData->vectorFloat_, "vectorFloat_ is nullptr")
dbg_(63,"ExampleCTypes_StructInOut(): === START ===")
dbg_(63,"string_: "<<string(initData->string_))
dbg_(63,"integer_: "<<initData->integer_)
dbg_(63,"float_: "<<initData->float_)
dbg_(63,"boolean_: "<<initData->boolean_)
dbg_(63,"vectorFloat_: ")
for (int i=0; i < initData->vectorFloatSize_; i++) {
dbg_(63," - ["<<initData->vectorFloat_[i]<<"]")
}
// Update some of the values.
initData->integer_ = -1;
initData->float_ = -1.0;
dbg_(63,"ExampleCTypes_StructInOut(): === END ===")
return 0;
}
int ExampleCTypes_ScalarsIn(int integer_, float float_, char byte_) {
dbg_(63,"integer_: "<<integer_)
dbg_(63,"float_: "<<float_)
dbg_(63,"byte_: "<<byte_)
return 0;
}
int ExampleCTypes_VectorsInOut(int* integer_, float* float_, char* bytes_) { // Allocated in python, known size = 2 for all
_ERRI(nullptr==integer_,"integer_ is nullptr")
_ERRI(nullptr==float_,"float_ is nullptr")
_ERRI(nullptr==bytes_,"bytes_ is nullptr")
for (int i=0; i < 2; i++) {
dbg_(63,"integer_: "<<integer_[i]<<", float_: "<<float_[i]<<", bytes: "<<bytes_[i])
integer_[i] = -1;
float_[i] = -1.0;
bytes_[i] = 'z';
}
return 0;
}
int ExampleCTypes_VectorsOut(int** integer_, float** float_, char** bytes_) { // Allocated in C/C++, known size = 2 for all
static vector<int> vInt;
static vector<float> vFloat;
static vector<char> vByte;
vInt.push_back(10); vInt.push_back(20);
vFloat.push_back(30); vFloat.push_back(40);
vByte.push_back('a'); vByte.push_back('b');
*integer_ = vInt.data();
*float_ = vFloat.data();
*bytes_ = vByte.data();
return 0;
}
// --- Standard C-API functions ---
int ExampleCTypes_version(void** versionPtr){ // Allocated in C, char**
_ERRI(nullptr == versionPtr,"NULL pointer provided!")
*versionPtr = const_cast<char*>(BV1.version.c_str());
return 0;
}
int ExampleCTypes_error(void** errorPtr){ // Allocated in C, char**
if (errorPtr == NULL) return -1;
*errorPtr = const_cast<char*>(__ECSOBJ__.str());
if (_NERR_ != 0) return -1;
return 0;
}
void ExampleCTypes_clearErrors() {
_ECSCLS_
}
void* ExampleCTypes_cecs() {
return __ECSOBJ__.cecs();
}
int ExampleCTypes_setcecs(void* errorPtr) {
__ECSOBJ__.ConnectTo(errorPtr);
return 0;
}
}; // extern C
|
export const filePathPrefix = '/assets/files';
export const CVPath = filePathPrefix + '/jobs/cv_Xieyang_Liu.pdf'; |
'use strict';
(function () {
//register the controller
angular.module("umbraco").controller('VWA.SpiderController', ['$scope', '$http', '$timeout', '$routeParams', '$location', 'appState', 'navigationService', 'notificationsService', function SpiderController($scope, $http, $timeout, $routeParams, $location, appState, navigationService, notificationsService) {
//setup scope vars
$scope.page = {};
$scope.page.loading = false;
$scope.page.nameLocked = false;
$scope.page.menu = {};
$scope.page.menu.currentSection = appState.getSectionState("currentSection");
$scope.page.menu.currentNode = null;
//set a property on the scope equal to the current route id
$scope.id = $routeParams.id;
//$scope.model = $scope.model || {};
// Data eigenschap, spider classes van server
$scope.Data = {};
$scope.Tabs = [{ id: 0, label: 'Algemeen', alias: 'Algemeen' }, { id: 1, label: 'Vaste mapping', alias: 'Vaste mapping' }
, { id: 2, label: 'Property mapping', alias: 'Property mapping' }
, { id: 3, label: 'Uitsluit mapping', alias: 'Uitsluit mapping' }
, { id: 4, label: 'Geolocatie', alias: 'Geolocatie' }];
$scope.actionInProgress = false;
$scope.bulkStatus = '';
$scope.Data.Name = '';
$scope.Data.Description = '';
$scope.Data.SoldSpecID = "0";
$scope.Data.DetailLinkRuleSpec = [];
$scope.Data.RootNodeProperty = null;
$scope.SpiderSites = [];
// Data inlezen vanaf server
$scope.GetSpider = function (spiderID) {
// Alleen als er een site gekozen is.
if ($scope.SpiderSiteID <= 0) {
return;
}
$http.get('backoffice/ETCConnector/Spider/GetProperties/' + spiderID, {
cache: false
}).then(function (response) {
// todo, jsonspecs, kwam voorheen met de route mee.
$scope.Specs = response.data;
$http.get('backoffice/ETCConnector/Spider/GetSite/' + spiderID, {
cache: false
}).then(function (response) {
// todo, jsonspecs, kwam voorheen met de route mee.
// $scope.Specs = jsonspecs;
//for (var a in response.data.SpiderSpecs) {
// response.data.SpiderSpecs[a].Rule.SpecID = response.data.SpiderSpecs[a].Rule.SpecID.toString();
//}
$scope.Data = response.data;
CompleteData($scope);
// $scope.Data.RootNodeProperty =
//{
// "label": "Type kas", "description": "Wat wordt het model van de kas",
// "view": "contentpicker",
// "config":
// {
// "multiPicker": "1",
// "showOpenButton": "0",
// "showEditButton": "0",
// "showPathOnHover": "0", "idType": "int",
// "startNode": {
// "type": "content", "id": 1925
// }, "filter": null,
// "minNumber": null,
// "maxNumber": null
// }, "hideLabel": false,
// "validation": {
// "mandatory": false,
// "pattern": null
// },
// "id": 8232,
// "value": "1926,1927,1928",
// "alias": "modelKas",
// "editor": "Umbraco.MultiNodeTreePicker"
//};
}, function (error) {
notificationsService.error("Fout bij het laden", error);
});
}, function (error) {
notificationsService.error("Fout bij het laden", error);
});
}
$scope.DoSpider = function (ignoreProperties) {
var url = ignoreProperties ? "backoffice/ETCConnector/Spider/GetStartSpiderSkipProperties/" : "backoffice/ETCConnector/Spider/GetStartSpider/";
$scope.actionInProgress = true;
$scope.bulkStatus = 'Bezig met spideren';
$http.get(url + $scope.Data.SpiderSiteID, {
cache: false
}).then(function (response) {
$scope.actionInProgress = false;
notificationsService.add({ 'headline': 'Success', 'message': 'Document succesvol processed', type: 'success', sticky: true });
//notificationsService.success("Document gespiderd", response.data);
// todo, jsonspecs, kwam voorheen met de route mee.
// $scope.Specs = jsonspecs;
//for (var a in response.data.SpiderSpecs) {
// response.data.SpiderSpecs[a].Rule.SpecID = response.data.SpiderSpecs[a].Rule.SpecID.toString();
//}
}, function (error) {
$scope.actionInProgress = false;
notificationsService.add({ 'headline': 'Failure', 'message': 'Error processing ' + error.data, type: 'error', sticky: true });
});
}
$timeout(function () {
navigationService.syncTree({ tree: "ETCConnector", path: $scope.id }).then(function (syncArgs) {
$scope.page.menu.currentNode = syncArgs.node;
});
}, 100);
function CompleteData($scope) {
$scope.Data.IDRuleSpec = { "Fixed": true, "Rule": $scope.Data.IDRule };
$scope.Data.DetailLinkRuleSpec = [];
for (var ruleID in $scope.Data.DetailLinkRule) {
$scope.Data.DetailLinkRuleSpec.push({ "Fixed": true, "Rule": $scope.Data.DetailLinkRule[ruleID] });
}
$scope.Data.PagerRuleSpec = { "Fixed": true, "Rule": $scope.Data.PagerRule };
$scope.Data.CategoryRuleSpec = { "Fixed": true, "Rule": $scope.Data.CategoryRule };
$scope.Data.SoldSpecID = $scope.Data.SoldSpecID.toString();
}
// De spiderssites ophalen
//$scope.GetSpiderSites = function () {
// $http.get('../Content/sys/update/spiderdata.ashx?cmd=getsites').then(function (response) {
// $scope.SpiderSites = response.data;
// if ($scope.SpiderSiteID == 0 && $scope.SpiderSites.length) {
// $scope.SpiderSiteID = $scope.SpiderSites[0].Key;
// }
// }, function (error) {
// alert(error);
// });
//}
// Regels opslaan op de server
$scope.Commit = function (a, b) {
if (!$scope.Data.Name || !$scope.Data.Name.length) {
notificationsService.error("Niet opgeslagen", 'naam is een verplicht veld');
return;
}
if (!$scope.Data.SpiderDatabase && (!$scope.Data.ListSelector || !$scope.Data.ListSelector.length)) {
notificationsService.error("Niet opgeslagen", 'Object selector is een verplicht veld');
return;
}
for (var specID in $scope.Data.SpiderSpecs) {
$scope.Data.SpiderSpecs[specID].Rule.SpecID *= 1;
}
$http.post('backoffice/ETCConnector/Spider/Save/', $scope.Data).then(function (res) {
if (!res.data.ok) {
notificationsService.error("Niet opgeslagen", res.data.message);
return;
}
$scope.Data = res.data.data;
$scope.contentForm.$dirty = false;
CompleteData($scope);
if ($scope.SpiderSiteID == 0) {
$location.path("vwa-spider/ETCConnector/edit/" + $scope.Data.SpiderSiteID.toString());
}
$scope.SpiderSiteID = $scope.Data.SpiderSiteID.toString();
notificationsService.success("Document Published", "Gelukt, de wijzigingen zijn succesvol opgelagen");
}, function (err) {
notificationsService.error("Niet opgeslagen", err);
});
return false;
}
// Delete eigenschap zetten, zodat deze bij een commit actie op de server verwijderd kunnen worden.,
$scope.Delete = function (spec, index) {
spec.Rule.Deleted = true;
}
// Filter, alleen regels tonen die niet verwijderd zijn, en die geen exclude regel zijn
$scope.ruleFilter = function (spec) {
return !spec.Rule.Deleted && !spec.Rule.Exclude;
}
// Filter, alleen regels tonen die niet verwijderd zijn, en die wel exclude regel zijn.
$scope.ruleExcludeFilter = function (spec) {
return !spec.Rule.Deleted && spec.Rule.Exclude;
}
// SpidersiteID in de gaten houden, bij wijzigingen de data opnieuwe ophalen
//$scope.$watch('SpiderSiteID', function (newValue, oldValue) {
// if (newValue != oldValue)
// $scope.GetSpider(newValue);
//});
// Regel toevoegen, lege regel ophalen op server en toevoegen aan collectie, angulars databinding doet de rest.
$scope.Add = function (exclude) {
$http.get('backoffice/ETCConnector/Spider/GetBlankRule/' + $scope.SpiderSiteID).then(function (response) {
response.data.Rule.Exclude = !!(exclude);
$scope.Data.SpiderSpecs.push(response.data);
}, function (error) {
notificationsService.error("Fout bij het laden regel", error);
});
}
$scope.AddDetailLink = function () {
$http.get('backoffice/ETCConnector/Spider/GetBlankDetailRule/' + $scope.SpiderSiteID).then(function (response) {
$scope.Data.DetailLinkRule.push(response.data);
$scope.Data.DetailLinkRuleSpec.push({ "Fixed": true, "Rule": $scope.Data.DetailLinkRule[$scope.Data.DetailLinkRule.length - 1].Rule });
}, function (error) {
notificationsService.error("Fout bij het laden regel", error);
});
}
// Lege Mapping regel toevoegen, angulars databinding doet de rest.
$scope.addMapping = function (spiderSpec) {
spiderSpec.Rule.Mapping.push({ "First": "", "Seconde": "" });
}
// Regel controleren, op server worden 3 sites gespirder op de opgegeven regel.
$scope.check = function (spiderSpec) {
notificationsService.error("TOOD ", "Not implemented yet");
return false;
//top.HourGlassShow("Bezig met ophalen gegevens, moment aub");
//$http.get('../Content/sys/update/spiderdata.ashx?spidersite=' + $scope.SpiderSiteID + "&cmd=checkrule&ruleID=" + spiderSpec.Rule.RuleID).then(function (response) {
// spiderSpec.Examples = response.data;
//}, function (error) {
// notificationsService.error("Fout bij het laden", error);
//});
}
// SpidersiteID zetten, ID wordt gezet boveningestelde watch roept GetSpiderSite aan.
$scope.SpiderSiteID = $scope.id;
$scope.GetSpider($scope.SpiderSiteID);
}]).directive('spiderRule', function () {
return {
templateUrl: '/app_plugins/ETCConnector/Directives/spider-rule.html',
restrict: 'E',
transclude: true,
scope: {
Specs: '=specs',
SpiderSpec: '=spiderspec'
}, link: function (scope, element, attr) {
var a = scope;
}, controller: ['$scope', function ($scope) {
// var SpiderSpec = $scope.SpiderSpec;
//$scope.$watch('SpiderSpec.SpecID', function (newValue, oldValue) {
// if (newValue != oldValue)
// SpiderSpec.SpecID = SpiderSpec.SpecID * 1;
//});
$scope.Delete = function (spec, index) {
spec.Rule.Deleted = true;
}
}]
}
}).directive('databaseRule', function () {
return {
templateUrl: '/app_plugins/ETCConnector/Directives/database-rule.html',
restrict: 'E',
transclude: true,
scope: {
Specs: '=specs',
SpiderSpec: '=spiderspec'
}, link: function (scope, element, attr) {
var a = scope;
}, controller: ['$scope', function ($scope) {
// var SpiderSpec = $scope.SpiderSpec;
//$scope.$watch('SpiderSpec.SpecID', function (newValue, oldValue) {
// if (newValue != oldValue)
// SpiderSpec.SpecID = SpiderSpec.SpecID * 1;
//});
$scope.Delete = function (spec, index) {
spec.Rule.Deleted = true;
}
}]
}
});
//adds the resource to umbraco.resources module:
angular.module('umbraco.resources').factory('spiderSitesResource',
function ($q, $http) {
//the factory object returned
return {
//this cals the Api Controller we setup earlier
getAll: function () {
return $http.get("backoffice/Spider/Spider/getall");
},
doSpider: function () {
return $http.get("backoffice/Spider/Spider/Spider");
}
};
}
);
})();
|
public class Rectangle extends GeometricShape {
private double length;
private double width;
public Rectangle(double length, double width) {
this.length = length;
this.width = width;
}
@Override
public double calculateArea() {
return length * width;
}
@Override
public double calculatePerimeter() {
return 2 * (length + width);
}
}
public class Circle extends GeometricShape {
private double radius;
public Circle(double radius) {
this.radius = radius;
}
@Override
public double calculateArea() {
return Math.PI * radius * radius;
}
@Override
public double calculatePerimeter() {
return 2 * Math.PI * radius;
}
} |
npm publish dist/@pebula/ngrid
npm publish dist/@pebula/ngrid-material
|
use std::env;
fn main() {
let input = env::args().nth(1).expect("No expression was given.");
let output = optimize_expression(input);
println!("{}", output);
}
fn optimize_expression(expr: String) -> String {
let tokens = expr.split_whitespace().collect::<Vec<_>>();
let mut stack = vec![];
let mut output = vec![];
for token in tokens {
match token {
"+" | "-" | "*" | "/" => {
while let Some(op) = stack.pop() {
if precedence(token) <= precedence(op) {
output.push(op);
} else {
stack.push(op);
break;
}
}
stack.push(token);
}
_ => output.push(token),
};
}
while let Some(op) = stack.pop() {
output.push(op);
}
output.join(" ")
}
fn precedence(op: &str) -> u32 {
match op {
"+" | "-" => 0,
"*" | "/" => 1,
_ => panic!("Invalid operator: {}", op),
}
} |
import re
def validate_email(email):
email_regex = r'^[\w\.-]+@[\w\.-]+\.\w{2,4}$'
return re.match(email_regex, email) != None |
#!/bin/bash
if [ "$TRAVIS_REPO_SLUG" == "Azbesciak/BuildingInfo" ] && [ "$TRAVIS_JDK_VERSION" == "oraclejdk8" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_BRANCH" == "master" ]; then
echo -e "Publishing javadoc...\n"
cp -R spring-boot-server/build/javadoc $HOME/javadoc-latest
cd $HOME
git config --global user.email "travis@travis-ci.org"
git config --global user.name "travis-ci"
git clone --quiet --branch=gh-pages https://${GH_TOKEN}@github.com/Azbesciak/BuildingInfo gh-pages > /dev/null
cd gh-pages
git rm -rf ./javadoc
cp -Rf $HOME/javadoc-latest ./javadoc
git add -f .
git commit -m "Latest javadoc on successful travis build $TRAVIS_BUILD_NUMBER auto-pushed to gh-pages"
git push -fq origin gh-pages > /dev/null
echo -e "Published Javadoc to gh-pages.\n"
fi
|
<filename>flybirds/core/driver/ui_driver.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
ui driver proxy
"""
from flybirds.core.global_context import GlobalContext
def air_bdd_screen_size(dr_instance):
return GlobalContext.ui_driver.air_bdd_screen_size(dr_instance)
def init_driver():
return GlobalContext.ui_driver.init_driver()
def close_driver():
return GlobalContext.ui_driver.close_driver()
|
<filename>redis-persistence/src/main/java/com/netflix/conductor/dao/index/ElasticSearchDAO.java
/**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.netflix.conductor.dao.index;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.conductor.annotations.Trace;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.config.Configuration;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.execution.ApplicationException;
import com.netflix.conductor.core.execution.ApplicationException.Code;
import com.netflix.conductor.core.utils.RetryUtil;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.dao.index.query.parser.Expression;
import com.netflix.conductor.dao.index.query.parser.ParserException;
import com.netflix.conductor.metrics.Monitors;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.get.GetField;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.QueryStringQueryBuilder;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.sort.SortBuilders;
import org.elasticsearch.search.sort.SortOrder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.inject.Inject;
import javax.inject.Singleton;
import java.io.InputStream;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.TimeZone;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
/**
* @author Viren
*
*/
@Trace
@Singleton
public class ElasticSearchDAO implements IndexDAO {
private static Logger logger = LoggerFactory.getLogger(ElasticSearchDAO.class);
private static final String WORKFLOW_DOC_TYPE = "workflow";
private static final String TASK_DOC_TYPE = "task";
private static final String LOG_DOC_TYPE = "task";
private static final String EVENT_DOC_TYPE = "event";
private static final String MSG_DOC_TYPE = "message";
private static final String className = ElasticSearchDAO.class.getSimpleName();
private String indexName;
private String logIndexName;
private String logIndexPrefix;
private ObjectMapper objectMapper;
private Client elasticSearchClient;
private static final TimeZone GMT = TimeZone.getTimeZone("GMT");
private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMWW");
private final ExecutorService executorService;
static {
SIMPLE_DATE_FORMAT.setTimeZone(GMT);
}
@Inject
public ElasticSearchDAO(Client elasticSearchClient, Configuration config, ObjectMapper objectMapper) {
this.objectMapper = objectMapper;
this.elasticSearchClient = elasticSearchClient;
this.indexName = config.getProperty("workflow.elasticsearch.index.name", null);
try {
initIndex();
updateIndexName(config);
Executors.newScheduledThreadPool(1).scheduleAtFixedRate(() -> updateIndexName(config), 0, 1, TimeUnit.HOURS);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
int corePoolSize = 6;
int maximumPoolSize = 12;
long keepAliveTime = 1L;
this.executorService = new ThreadPoolExecutor(corePoolSize,
maximumPoolSize,
keepAliveTime,
TimeUnit.MINUTES,
new LinkedBlockingQueue<>());
}
private void updateIndexName(Configuration config) {
this.logIndexPrefix = config.getProperty("workflow.elasticsearch.tasklog.index.name", "task_log");
this.logIndexName = this.logIndexPrefix + "_" + SIMPLE_DATE_FORMAT.format(new Date());
try {
elasticSearchClient.admin().indices().prepareGetIndex().addIndices(logIndexName).execute().actionGet();
} catch (IndexNotFoundException infe) {
try {
elasticSearchClient.admin().indices().prepareCreate(logIndexName).execute().actionGet();
} catch (IndexAlreadyExistsException ignored) {
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
}
/**
* Initializes the index with required templates and mappings.
*/
private void initIndex() throws Exception {
//0. Add the index template
GetIndexTemplatesResponse result = elasticSearchClient.admin().indices().prepareGetTemplates("wfe_template").execute().actionGet();
if(result.getIndexTemplates().isEmpty()) {
logger.info("Creating the index template 'wfe_template'");
InputStream stream = ElasticSearchDAO.class.getResourceAsStream("/template.json");
byte[] templateSource = IOUtils.toByteArray(stream);
try {
elasticSearchClient.admin().indices().preparePutTemplate("wfe_template").setSource(templateSource).execute().actionGet();
}catch(Exception e) {
logger.error(e.getMessage(), e);
}
}
//1. Create the required index
try {
elasticSearchClient.admin().indices().prepareGetIndex().addIndices(indexName).execute().actionGet();
}catch(IndexNotFoundException infe) {
try {
elasticSearchClient.admin().indices().prepareCreate(indexName).execute().actionGet();
}catch(IndexAlreadyExistsException ignored) {}
}
//2. Mapping for the workflow document type
GetMappingsResponse response = elasticSearchClient.admin().indices().prepareGetMappings(indexName).addTypes(WORKFLOW_DOC_TYPE).execute().actionGet();
if(response.mappings().isEmpty()) {
logger.info("Adding the workflow type mappings");
InputStream stream = ElasticSearchDAO.class.getResourceAsStream("/wfe_type.json");
byte[] bytes = IOUtils.toByteArray(stream);
String source = new String(bytes);
try {
elasticSearchClient.admin().indices().preparePutMapping(indexName).setType(WORKFLOW_DOC_TYPE).setSource(source).execute().actionGet();
}catch(Exception e) {
logger.error(e.getMessage(), e);
}
}
}
@Override
public void indexWorkflow(Workflow workflow) {
try {
String id = workflow.getWorkflowId();
WorkflowSummary summary = new WorkflowSummary(workflow);
byte[] doc = objectMapper.writeValueAsBytes(summary);
UpdateRequest req = new UpdateRequest(indexName, WORKFLOW_DOC_TYPE, id);
req.doc(doc);
req.upsert(doc);
req.retryOnConflict(5);
updateWithRetry(req,"Index workflow into doc_type workflow");
} catch (Throwable e) {
logger.error("Indexing failed {}", e.getMessage(), e);
}
}
@Override
public CompletableFuture<Void> asyncIndexWorkflow(Workflow workflow) {
return CompletableFuture.runAsync(() -> indexWorkflow(workflow), executorService);
}
@Override
public void indexTask(Task task) {
try {
String id = task.getTaskId();
TaskSummary summary = new TaskSummary(task);
byte[] doc = objectMapper.writeValueAsBytes(summary);
UpdateRequest req = new UpdateRequest(indexName, TASK_DOC_TYPE, id);
req.doc(doc);
req.upsert(doc);
updateWithRetry(req, "Index task into doc_type of task");
} catch (Throwable e) {
logger.error("Indexing failed {}", e.getMessage(), e);
}
}
@Override
public CompletableFuture<Void> asyncIndexTask(Task task) {
return CompletableFuture.runAsync(() -> indexTask(task), executorService);
}
@Override
public void addTaskExecutionLogs(List<TaskExecLog> taskExecLogs) {
if (taskExecLogs.isEmpty()) {
return;
}
try {
BulkRequestBuilder bulkRequestBuilder = elasticSearchClient.prepareBulk();
for (TaskExecLog taskExecLog : taskExecLogs) {
IndexRequest request = new IndexRequest(logIndexName, LOG_DOC_TYPE);
request.source(objectMapper.writeValueAsBytes(taskExecLog));
bulkRequestBuilder.add(request);
}
new RetryUtil<BulkResponse>().retryOnException(() -> bulkRequestBuilder.execute().actionGet(),
null, BulkResponse::hasFailures, "Indexing all execution logs into doc_type task", "addTaskExecutionLogs");
} catch (Throwable e) {
logger.error("Indexing failed {}", e.getMessage(), e);
}
}
@Override
public CompletableFuture<Void> asyncAddTaskExecutionLogs(List<TaskExecLog> logs) {
return CompletableFuture.runAsync(() -> addTaskExecutionLogs(logs), executorService);
}
public List<TaskExecLog> getTaskExecutionLogs(String taskId) {
try {
QueryBuilder qf;
Expression expression = Expression.fromString("taskId='" + taskId + "'");
qf = expression.getFilterBuilder();
BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(qf);
QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery("*");
BoolQueryBuilder fq = QueryBuilders.boolQuery().must(stringQuery).must(filterQuery);
final SearchRequestBuilder srb = elasticSearchClient.prepareSearch(logIndexPrefix + "*").setQuery(fq).setTypes(TASK_DOC_TYPE).addSort(SortBuilders.fieldSort("createdTime").order(SortOrder.ASC).unmappedType("long"));
SearchResponse response = srb.execute().actionGet();
SearchHit[] hits = response.getHits().getHits();
List<TaskExecLog> logs = new ArrayList<>(hits.length);
for(SearchHit hit : hits) {
String source = hit.getSourceAsString();
TaskExecLog tel = objectMapper.readValue(source, TaskExecLog.class);
logs.add(tel);
}
return logs;
}catch(Exception e) {
logger.error(e.getMessage(), e);
}
return null;
}
@Override
public void addMessage(String queue, Message msg) {
// Run all indexing other than workflow indexing in a separate threadpool
Map<String, Object> doc = new HashMap<>();
doc.put("messageId", msg.getId());
doc.put("payload", msg.getPayload());
doc.put("queue", queue);
doc.put("created", System.currentTimeMillis());
IndexRequest request = new IndexRequest(logIndexName, MSG_DOC_TYPE);
request.source(doc);
new RetryUtil<>().retryOnException(() -> elasticSearchClient.index(request).actionGet(), null,
null, "Indexing document in for docType: message", "addMessage");
}
@Override
public void addEventExecution(EventExecution eventExecution) {
try {
byte[] doc = objectMapper.writeValueAsBytes(eventExecution);
String id = eventExecution.getName() + "." + eventExecution.getEvent() + "." + eventExecution.getMessageId() + "." + eventExecution.getId();
UpdateRequest req = new UpdateRequest(logIndexName, EVENT_DOC_TYPE, id);
req.doc(doc);
req.upsert(doc);
req.retryOnConflict(5);
updateWithRetry(req,"Update Event execution for doc_type event");
} catch (Throwable e) {
logger.error("Indexing failed {}", e.getMessage(), e);
}
}
@Override
public CompletableFuture<Void> asyncAddEventExecution(EventExecution eventExecution) {
return CompletableFuture.runAsync(() -> addEventExecution(eventExecution), executorService);
}
private void updateWithRetry(UpdateRequest request, String operationDescription) {
try {
new RetryUtil<UpdateResponse>().retryOnException(() -> elasticSearchClient.update(request).actionGet(), null,
null, operationDescription, "updateWithRetry");
} catch (Exception e) {
Monitors.error(className, "index");
logger.error("Indexing failed for {}, {}", request.index(), request.type(), e.getMessage());
}
}
@Override
public SearchResult<String> searchWorkflows(String query, String freeText, int start, int count, List<String> sort) {
try {
return search(query, start, count, sort, freeText, WORKFLOW_DOC_TYPE);
} catch (ParserException e) {
throw new ApplicationException(Code.BACKEND_ERROR, e.getMessage(), e);
}
}
@Override
public SearchResult<String> searchTasks(String query, String freeText, int start, int count, List<String> sort) {
try {
return search(query, start, count, sort, freeText, TASK_DOC_TYPE);
} catch (ParserException e) {
throw new ApplicationException(Code.BACKEND_ERROR, e.getMessage(), e);
}
}
@Override
public void removeWorkflow(String workflowId) {
try {
DeleteRequest req = new DeleteRequest(indexName, WORKFLOW_DOC_TYPE, workflowId);
DeleteResponse response = elasticSearchClient.delete(req).actionGet();
if (!response.isFound()) {
logger.error("Index removal failed - document not found by id " + workflowId);
}
} catch (Throwable e) {
logger.error("Index removal failed failed {}", e.getMessage(), e);
Monitors.error(className, "remove");
}
}
@Override
public CompletableFuture<Void> asyncRemoveWorkflow(String workflowId) {
return CompletableFuture.runAsync(() -> removeWorkflow(workflowId), executorService);
}
@Override
public void updateWorkflow(String workflowInstanceId, String[] keys, Object[] values) {
if (keys.length != values.length) {
throw new IllegalArgumentException("Number of keys and values should be same.");
}
UpdateRequest request = new UpdateRequest(indexName, WORKFLOW_DOC_TYPE, workflowInstanceId);
Map<String, Object> source = IntStream.range(0, keys.length).boxed()
.collect(Collectors.toMap(i -> keys[i], i -> values[i]));
request.doc(source);
logger.debug("Updating workflow {} with {}", workflowInstanceId, source);
new RetryUtil<>().retryOnException(() -> elasticSearchClient.update(request).actionGet(), null, null,
"Updating index for doc_type workflow", "updateWorkflow");
}
@Override
public CompletableFuture<Void> asyncUpdateWorkflow(String workflowInstanceId, String[] keys, Object[] values) {
return CompletableFuture.runAsync(() -> updateWorkflow(workflowInstanceId, keys, values), executorService);
}
@Override
public String get(String workflowInstanceId, String fieldToGet) {
Object value = null;
GetRequest request = new GetRequest(indexName, WORKFLOW_DOC_TYPE, workflowInstanceId).fields(fieldToGet);
GetResponse response = elasticSearchClient.get(request).actionGet();
Map<String, GetField> fields = response.getFields();
if(fields == null) {
return null;
}
GetField field = fields.get(fieldToGet);
if(field != null) value = field.getValue();
if(value != null) {
return value.toString();
}
return null;
}
private SearchResult<String> search(String structuredQuery, int start, int size, List<String> sortOptions, String freeTextQuery, String docType) throws ParserException {
QueryBuilder qf = QueryBuilders.matchAllQuery();
if(StringUtils.isNotEmpty(structuredQuery)) {
Expression expression = Expression.fromString(structuredQuery);
qf = expression.getFilterBuilder();
}
BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(qf);
QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery(freeTextQuery);
BoolQueryBuilder fq = QueryBuilders.boolQuery().must(stringQuery).must(filterQuery);
final SearchRequestBuilder srb = elasticSearchClient.prepareSearch(indexName).setQuery(fq).setTypes(docType).setNoFields().setFrom(start).setSize(size);
if(sortOptions != null){
sortOptions.forEach(sortOption -> {
SortOrder order = SortOrder.ASC;
String field = sortOption;
int indx = sortOption.indexOf(':');
if(indx > 0){ //Can't be 0, need the field name at-least
field = sortOption.substring(0, indx);
order = SortOrder.valueOf(sortOption.substring(indx+1));
}
srb.addSort(field, order);
});
}
List<String> result = new LinkedList<>();
SearchResponse response = srb.execute().actionGet();
response.getHits().forEach(hit -> result.add(hit.getId()));
long count = response.getHits().getTotalHits();
return new SearchResult<>(count, result);
}
}
|
int multiplyTwoNumbers(int firstNumber, int secondNumber) {
return firstNumber * secondNumber;
}
int result = multiplyTwoNumbers(3, 4);
printf("Result: %d", result); |
<reponame>hotspacode/neeza
package io.github.hotspacode.neeza.base.log;
import java.util.logging.Handler;
import java.util.logging.Level;
import java.util.logging.Logger;
public class NeezaLog extends BaseLog {
private static final Logger logger = Logger.getLogger("NEEZA");
private static final String FILE_NAME = "neeza.log";
private static Handler logHandler = null;
static {
logHandler = makeLogger(FILE_NAME, logger);
}
public static void info(String detail, Object... params) {
log(logger, Level.INFO, detail, params);
}
public static void info(String detail, Throwable e) {
log(logger, Level.INFO, detail, e);
}
public static void warn(String detail, Object... params) {
log(logger, Level.WARNING, detail, params);
}
public static void warn(String detail, Throwable e) {
log(logger, Level.WARNING, detail, e);
}
}
|
#!/bin/bash
#export GERONIMO_HOME=/home/hogstrom/geronimo/geronimo/modules/assembly/target/geronimo-1.1-SNAPSHOT
java -jar ${GERONIMO_HOME}/bin/deployer.jar --user system --password manager stop Trade
#java -jar ${GERONIMO_HOME}/bin/deployer.jar --user system --password manager stop TradeDataSource
#java -jar ${GERONIMO_HOME}/bin/deployer.jar --user system --password manager stop TradeJMS
java -jar ${GERONIMO_HOME}/bin/deployer.jar --user system --password manager undeploy Trade
#java -jar ${GERONIMO_HOME}/bin/deployer.jar --user system --password manager undeploy TradeDataSource
#java -jar ${GERONIMO_HOME}/bin/deployer.jar --user system --password manager undeploy TradeJMS
|
package me.minidigger.minicraft.model;
import com.mojang.brigadier.CommandDispatcher;
import com.mojang.brigadier.tree.CommandNode;
import java.util.Collection;
public interface CommandSource {
String getName();
void sendMessage(String message);
default void sendSmartUsage(CommandDispatcher<CommandSource> dispatcher, CommandNode<CommandSource> node) {
Collection<String> usage = dispatcher.getSmartUsage(node, this).values();
for (String line : usage) {
sendMessage("* " + line);
}
}
default void sendAllUsage(CommandDispatcher<CommandSource> dispatcher, CommandNode<CommandSource> node) {
String[] usage = dispatcher.getAllUsage(node, this, false);
for (String line : usage) {
sendMessage("* " + line);
}
}
}
|
class DocToHtml:
def __init__(self, converter_type: str):
self.converter_type = converter_type
def convert_to_html(self, doc: str) -> str:
# Placeholder for the conversion logic based on the converter_type
if self.converter_type == "ROBOT":
# Example conversion logic for "ROBOT" type
return f"<html><body>{doc}</body></html>"
else:
return f"<html><body>{doc}</body></html>"
class DocumentConverter:
def __init__(self, doc: str):
self.doc = doc
def first_line(self) -> str:
return self.doc.splitlines()[0] if self.doc else ""
@property
def html_doc(self) -> str:
return DocToHtml("ROBOT").convert_to_html(self.doc) if self.doc else "" |
<filename>h2o-test-support/src/main/java/water/test/WebsocketClient.java
package water.test;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import org.glassfish.tyrus.client.ClientManager;
import water.H2O;
import javax.websocket.*;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Map;
import java.util.Optional;
import static org.junit.Assert.fail;
public class WebsocketClient extends Endpoint {
private final Session sess;
private final Gson gson = new Gson();
private Map<String, String> receivedMessage;
private String overflowMessage;
public WebsocketClient() throws URISyntaxException, IOException, DeploymentException {
String destUri = "ws://" + H2O.getIpPortString() + "/3/Steam.websocket";
ClientManager client = ClientManager.createClient();
ClientEndpointConfig cec = ClientEndpointConfig.Builder.create().build();
sess = client.connectToServer(this, cec, new URI(destUri));
}
public void close() throws IOException {
sess.close(new CloseReason(CloseReason.CloseCodes.NORMAL_CLOSURE, "Test Done"));
}
@OnOpen
public void onOpen(Session session, EndpointConfig config) {
session.addMessageHandler(String.class, message -> {
//LOG.info("Received message from H2O: " + message);
synchronized (this) {
if (receivedMessage != null) {
//LOG.info("Received message not stored as last message was not picked up yet.");
overflowMessage = message;
} else {
receivedMessage = gson.fromJson(message, new TypeToken<Map<String, String>>() {}.getType());
}
this.notifyAll();
}
});
}
public void sendMessage(Object msg) throws IOException {
final String msgStr = gson.toJson(msg);
//LOG.info("Sending message to H2O: " + msgStr);
sess.getBasicRemote().sendText(msgStr);
}
public Map<String, String> waitToReceiveMessage(String message) {
return waitToReceiveMessage(message, 10_000);
}
public Map<String, String> waitToReceiveMessage(String message, int timeoutMillis) {
return waitToReceiveMessage(message, timeoutMillis, true).get();
}
public synchronized Optional<Map<String, String>> waitToReceiveMessage(String message, int timeoutMillis, boolean failOnNone) {
if (overflowMessage != null) {
fail("Message received but not handled: " + overflowMessage);
}
try {
this.wait(timeoutMillis);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
if (receivedMessage != null) {
Map<String, String> res = receivedMessage;
receivedMessage = null;
return Optional.of(res);
} else if (failOnNone) {
fail("Expected " + message + ", but no message received from H2O.");
}
return Optional.empty();
}
}
|
import hash from './hash';
import equals from './equals';
export default function computeObjectEquals(objA, objB) {
// Objects having different hash code are not equal
// also prevents mutually recursive structures to stack overflow
if (hash(objA) !== hash(objB)) {
return false;
}
/// Process equality for object literals:
/// object literals may have equal hash code, we process equality by each property.
/// regular 'class' instances have different hash code, hence do not fall into following code.
/// object objA is direct descendant of Object hence no need to check 'hasOwnProperty'
var val, prop;
for (prop in objA) {
val = objA[prop];
/// Object methods are not considered for equality
if (typeof val === 'function') {
continue;
}
if (!equals(val, objB[prop])) {
return false;
}
}
/// no need to browse objB properties, all properties of objA is checked against objB
/// it is very unlikely for object literals with the same hash code to have different properties
/// even in such a rare case, objects are considered equal
return true;
}
|
'use strict';
// MODULES //
var continued_fraction = require( 'math-continued-fraction' );
// FUNCTIONS //
var upper_incomplete_gamma_fract = require( './upper_incomplete_gamma_fract' );
// UPPER GAMMA FRACTION //
/**
* FUNCTION: lower_incomplete_gamma_series( a, z, eps )
* Evaluate the lower incomplete gamma integral via a series expansion and divide by gamma(z) to normalise.
*
* @param {Number} a - function parameter
* @param {Number} z - function parameter
* @param {Number} eps - tolerance for continued fraction
* @returns {Number} function value
*/
function upper_gamma_fraction( a, z, eps ) {
var f = upper_incomplete_gamma_fract( a, z );
return 1 / ( z - a + 1 + continued_fraction( f, { 'tolerance': eps } ) );
} // end FUNCTION upper_gamma_fraction()
// EXPORTS //
module.exports = upper_gamma_fraction;
|
#!/bin/bash
#
# Copyright IBM Corp, Scoir, Inc. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
echo "Running $0"
function filterExcludedFiles {
CHECK=`echo "$CHECK" | grep -v .png$ | grep -v .rst$ | grep -v ^.git/ \
| grep -v .pem$ | grep -v .block$ | grep -v .tx$ | grep -v ^LICENSE$ | grep -v _sk$ \
| grep -v .key$ | grep -v .crt$ | grep -v \\.gen.go$ | grep -v \\.json$ | grep -v Gopkg.lock$ \
| grep -v .md$ | grep -v ^vendor/ | grep -v ^build/ | grep -v .pb.go$ | grep -v ci.properties$ \
| grep -v go.sum$ | grep -v gomocks | sort -u`
}
CHECK=$(git diff --name-only --diff-filter=ACMRTUXB HEAD)
REMOTE_REF=$(git log -1 --pretty=format:"%d" | grep '[(].*\/' | wc -l)
# If CHECK is empty then there is no working directory changes: fallback to last two commits.
# Else if REMOTE_REF=0 then working copy commits are even with remote: only use the working copy changes.
# Otherwise assume that the change is amending the previous commit: use both last two commit and working copy changes.
if [[ -z "${CHECK}" ]] || [[ "${REMOTE_REF}" -eq 0 ]]; then
if [[ ! -z "${CHECK}" ]]; then
echo "Examining last commit and working directory changes"
CHECK+=$'\n'
else
echo "Examining last commit changes"
fi
LAST_COMMITS=($(git log -2 --pretty=format:"%h"))
CHECK+=$(git diff-tree --no-commit-id --name-only --diff-filter=ACMRTUXB -r ${LAST_COMMITS[1]} ${LAST_COMMITS[0]})
else
echo "Examining working directory changes"
fi
filterExcludedFiles
if [[ -z "$CHECK" ]]; then
echo "All files are excluded from having license headers"
exit 0
fi
missing=`echo "$CHECK" | xargs ls -d 2>/dev/null | xargs grep -L "SPDX-License-Identifier"`
if [[ -z "$missing" ]]; then
echo "All files have SPDX-License-Identifier headers"
exit 0
fi
echo "The following files are missing SPDX-License-Identifier headers:"
echo "$missing"
echo
echo "Please replace the Apache license header comment text with:"
echo "SPDX-License-Identifier: Apache-2.0"
echo
echo "Checking committed files for traditional Apache License headers ..."
missing=`echo "$missing" | xargs ls -d 2>/dev/null | xargs grep -L "http://www.apache.org/licenses/LICENSE-2.0"`
if [[ -z "$missing" ]]; then
echo "All remaining files have Apache 2.0 headers"
exit 0
fi
echo "The following files are missing traditional Apache 2.0 headers:"
echo "$missing"
echo "Fatal Error - All files must have a license header"
exit 1 |
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.ic_no_transfer = void 0;
var ic_no_transfer = {
"viewBox": "0 0 24 24",
"children": [{
"name": "rect",
"attribs": {
"fill": "none",
"height": "24",
"width": "24"
},
"children": []
}, {
"name": "path",
"attribs": {
"d": "M21.19,21.19L2.81,2.81L1.39,4.22L4,6.83V16c0,0.88,0.39,1.67,1,2.22V20c0,0.55,0.45,1,1,1h1c0.55,0,1-0.45,1-1v-1h8v1 c0,0.55,0.45,1,1,1h1c0.05,0,0.09-0.02,0.14-0.03l1.64,1.64L21.19,21.19z M7.5,17C6.67,17,6,16.33,6,15.5C6,14.67,6.67,14,7.5,14 S9,14.67,9,15.5C9,16.33,8.33,17,7.5,17z M6,11V8.83L8.17,11H6z M8.83,6L5.78,2.95C7.24,2.16,9.48,2,12,2c4.42,0,8,0.5,8,4v10 c0,0.35-0.08,0.67-0.19,0.98L13.83,11H18V6H8.83z"
},
"children": []
}]
};
exports.ic_no_transfer = ic_no_transfer; |
def inorder_traversal(root):
if root:
inorder_traversal(root.left)
print(root.val)
inorder_traversal(root.right) |
#!/bin/bash -e
ctx logger info "Stopping vellum node"
sudo monit unmonitor -g vellum
sudo service vellum stop
sudo monit unmonitor clearwater_cluster_manager
sudo monit unmonitor clearwater_config_manager
sudo monit unmonitor -g etcd
|
package com.winricklabs.mouse.websocket.message;
public class Message {
private double leftMotor;
private double rightMotor;
public Message(double leftMotor, double rightMotor) {
this.leftMotor = leftMotor;
this.rightMotor = rightMotor;
}
}
|
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_a_priori_absorption.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_all_shapes_hybrid.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_all_shapes_meso.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_all_shapes_micro.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_communication.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_communication_chemical.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_crowding.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_flow.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_flow_closed_hybrid.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_hybrid.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_pipe_reaction_diffusion.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_pipe_reaction_diffusion_microscopic.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_point_diffusion.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_reactor.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_reactor_2nd_order.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_reactor_microscopic.txt;
valgrind --leak-check=full --show-reachable=no ./accord_dub_debug.out accord_config_sample_surface.txt; |
<reponame>Exaphis/Synchronous<filename>frontend/src/components/WorkspaceNicknameChangeDialog.js
import * as React from "react";
import { GenericFieldChangeDialog } from "./GenericFieldChangeDialog";
export function WorkspaceNicknameChangeDialog(props) {
const { isOpen, onRequestClose, onNicknameUpdateAsync } = props;
return (
<GenericFieldChangeDialog
isOpen={isOpen}
onRequestClose={onRequestClose}
onChangeField={onNicknameUpdateAsync}
fieldName={"workspace nickname"}
/>
);
}
|
def Levenshtein_distance(str1, str2):
'''
Function to compute the Levenshtein distance between two strings
'''
# Get lengths of both strings
m = len(str1)
n = len(str2)
# Create a matrices to store values
Distance = [[0 for x in range(n+1)] for x in range(m+1)]
# Populate first row and column of the matrix
for i in range(1, m+1):
Distance[i][0] = i
for j in range(1, n+1):
Distance[0][j] = j
# Iterate through each character of the two strings
for i in range(1, m+1):
for j in range(1, n+1):
# Compute the smallest distance
D_ins = Distance[i][j-1] + 1
D_del = Distance[i-1][j] + 1
D_sub = Distance[i-1][j-1]
if str1[i-1] != str2[j-1]:
D_sub += 1
Distance[i][j] = min(D_ins, D_del, D_sub)
# Return the Levenshtein distance
return Distance[m][n]
# Test Levenshtein distance
str1 = 'kitten'
str2 = 'sitting'
print("Levenshtein Distance between {} and {} is {}".format(str1, str2, Levenshtein_distance(str1, str2))) |
<reponame>JenKinY/MallVipManage<gh_stars>1-10
package com.yingnuo.service;
import com.yingnuo.dao.UserDao;
import com.yingnuo.domain.Admin;
import com.yingnuo.domain.User;
import com.yingnuo.util.DataSourceUtils;
import org.apache.commons.dbutils.QueryRunner;
import org.apache.commons.dbutils.handlers.BeanHandler;
import org.apache.commons.dbutils.handlers.ScalarHandler;
import javax.security.auth.login.LoginException;
import java.sql.SQLException;
import java.util.List;
public class UserService {
UserDao dao = new UserDao();
// 登录
public User login(String phone, String password) throws LoginException {
try {
User user = dao.findUserByPhoneAndPassword(phone, password);
if (user != null) {
return user;
}
throw new LoginException("手机号或密码有误");
} catch (SQLException e) {
e.printStackTrace();
throw new LoginException("登录失败");
}
}
// 更新
public Boolean upgradeUserByPhone(User user) throws Exception {
try {
return dao.upgradeUserByPhone(user);
} catch (SQLException e) {
e.printStackTrace();
throw new Exception("更新失败");
}
}
// 获取所有用户
public List<User> findAllUser() throws LoginException {
try {
List<User> userList = dao.findAllUser();
if (userList != null) {
return userList;
}
throw new LoginException("获取所有用户出错");
} catch (SQLException e) {
e.printStackTrace();
throw new LoginException("获取所有用户失败");
}
}
// 根据user_id删除用户
public Boolean deleteUserByUserId(String user_id) throws LoginException {
try {
return dao.deleteUserByUserId(user_id);
} catch (SQLException e) {
e.printStackTrace();
throw new LoginException("删除用户失败");
}
}
// 根据user_id修改用户
public Boolean updateUserByUserId(User user) throws LoginException {
try {
return dao.updateUserByUserId(user);
} catch (SQLException e) {
e.printStackTrace();
throw new LoginException("修改用户失败");
}
}
// 根据手机号修改用户信息 (username,gender,address)
public Boolean upgradeUserPointByPhone(User user) throws LoginException{
try {
return dao.upgradeUserPointByPhone(user);
} catch (SQLException e) {
e.printStackTrace();
throw new LoginException("修改积分失败");
}
}
// 添加一个用户
public Boolean addUser(User user) throws LoginException {
try {
return dao.addUser(user);
} catch (SQLException e) {
e.printStackTrace();
throw new LoginException("添加用户失败");
}
}
// 获取用户量
public long countUser() throws LoginException {
try {
return dao.countUser();
} catch (SQLException e) {
e.printStackTrace();
throw new LoginException("获取用户量失败");
}
}
// 获取所有积分
public Double allPoint() throws LoginException {
try {
return dao.allPoint();
} catch (SQLException e) {
e.printStackTrace();
throw new LoginException("获取所有积分失败");
}
}
// 根据手机号查找用户
public User findUserByPhone(String phone) throws LoginException {
try {
return dao.findUserByPhone(phone);
} catch (SQLException e) {
e.printStackTrace();
throw new LoginException("查找失败");
}
}
}
|
<reponame>m00n-als/redux-regexp-router
/**
* Created by sidchik on 29.03.17.
*/
export Route from './Route';
export Switch from './Switch';
export Link from './Link'; |
<filename>libpandabase/mem/code_allocator.cpp
/*
* Copyright (c) 2021 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "code_allocator.h"
#include "os/mem.h"
#include "trace/trace.h"
#include "mem/base_mem_stats.h"
#include "trace/trace.h"
#include <securec.h>
#include <cstring>
namespace panda {
const Alignment CodeAllocator::PAGE_LOG_ALIGN = GetLogAlignment(os::mem::GetPageSize());
CodeAllocator::CodeAllocator(BaseMemStats *mem_stats)
: arenaAllocator_([&]() {
trace::ScopedTrace scoped_trace(__PRETTY_FUNCTION__);
// Do not set up mem_stats in internal arena allocator, because we will manage memstats here.
return ArenaAllocator(SpaceType::SPACE_TYPE_CODE, nullptr);
}()),
memStats_(mem_stats),
codeRangeStart_(nullptr),
codeRangeEnd_(nullptr)
{
ASSERT(PAGE_LOG_ALIGN >= LOG_ALIGN_MIN);
ASSERT(PAGE_LOG_ALIGN <= LOG_ALIGN_MAX);
}
CodeAllocator::~CodeAllocator()
{
codeRangeStart_ = nullptr;
codeRangeEnd_ = nullptr;
}
void *CodeAllocator::AllocateCode(size_t size, const void *code_buff)
{
trace::ScopedTrace scoped_trace("Allocate Code");
void *code_ptr = arenaAllocator_.Alloc(size, PAGE_LOG_ALIGN);
if (UNLIKELY(code_ptr == nullptr || memcpy_s(code_ptr, size, code_buff, size) != EOK)) {
return nullptr;
}
ProtectCode(os::mem::MapRange<std::byte>(static_cast<std::byte *>(code_ptr), size));
memStats_->RecordAllocateRaw(size, SpaceType::SPACE_TYPE_CODE);
CodeRangeUpdate(code_ptr, size);
return code_ptr;
}
os::mem::MapRange<std::byte> CodeAllocator::AllocateCodeUnprotected(size_t size)
{
trace::ScopedTrace scoped_trace("Allocate Code");
void *code_ptr = arenaAllocator_.Alloc(size, PAGE_LOG_ALIGN);
if (UNLIKELY(code_ptr == nullptr)) {
return os::mem::MapRange<std::byte>(nullptr, 0);
}
memStats_->RecordAllocateRaw(size, SpaceType::SPACE_TYPE_CODE);
CodeRangeUpdate(code_ptr, size);
return os::mem::MapRange<std::byte>(static_cast<std::byte *>(code_ptr), size);
}
/* static */
void CodeAllocator::ProtectCode(os::mem::MapRange<std::byte> mem_range)
{
mem_range.MakeReadExec();
}
bool CodeAllocator::InAllocatedCodeRange(const void *pc)
{
os::memory::ReadLockHolder rlock(code_range_lock_);
return (pc >= codeRangeStart_) && (pc <= codeRangeEnd_);
}
void CodeAllocator::CodeRangeUpdate(void *ptr, size_t size)
{
os::memory::WriteLockHolder rwlock(code_range_lock_);
if (ptr < codeRangeStart_ || codeRangeStart_ == nullptr) {
codeRangeStart_ = ptr;
}
void *buffer_end = ToVoidPtr(ToUintPtr(ptr) + size);
if (buffer_end > codeRangeEnd_ || codeRangeEnd_ == nullptr) {
codeRangeEnd_ = buffer_end;
}
}
} // namespace panda
|
#ifndef LODTALK_VMCONTEXT_HPP_
#define LODTALK_VMCONTEXT_HPP_
#include <string>
#include <stdio.h>
#include <functional>
#include <unordered_map>
#include "Lodtalk/Definitions.h"
#include "Lodtalk/ObjectModel.hpp"
#ifdef _MSC_VER
# pragma warning( push )
# pragma warning( disable: 4251 )
#endif
namespace Lodtalk
{
class InterpreterProxy;
class MemoryManager;
class GarbageCollector;
class SpecialRuntimeObjects;
class AbstractClassFactory;
class SystemDictionary;
typedef int (*PrimitiveFunction) (InterpreterProxy *proxy);
typedef std::function<void (InterpreterProxy *)> WithInterpreterBlock;
/**
* VM Context
*/
class LODTALK_VM_EXPORT VMContext
{
public:
VMContext();
~VMContext();
MemoryManager *getMemoryManager();
SpecialRuntimeObjects *getSpecialRuntimeObjects();
void executeDoIt(const std::string &code);
void executeScript(const std::string &code, const std::string &name = "unnamed", const std::string &basePath = ".");
void executeScriptFromFile(FILE *file, const std::string &name = "unnamed", const std::string &basePath = ".");
void executeScriptFromFileNamed(const std::string &fileName);
void withInterpreter(const WithInterpreterBlock &block);
// Garbage collection interface
void disableGC();
void enableGC();
void registerGCRoot(Oop *gcroot, size_t size);
void unregisterGCRoot(Oop *gcroot);
void registerThreadForGC();
void unregisterThreadForGC();
bool garbageCollectionSafePoint();
void registerNativeObject(Oop object);
// Object memory
uint8_t *allocateObjectMemory(size_t objectSize, bool bigObject);
ObjectHeader *newObject(size_t fixedSlotCount, size_t indexableSize, ObjectFormat format, int classIndex, int identityHash = -1);
ObjectHeader *basicNativeNewFromClassIndex(size_t classIndex);
ObjectHeader *basicNativeNewFromFactory(AbstractClassFactory *factory);
// Some object creation / accessing
int64_t readIntegerObject(const Ref<Oop> &ref);
double readDoubleObject(const Ref<Oop> &ref);
Oop positiveInt32ObjectFor(uint32_t value);
Oop positiveInt64ObjectFor(uint64_t value);
Oop signedInt32ObjectFor(int32_t value);
Oop signedInt64ObjectFor(int64_t value);
uint32_t positiveInt32ValueOf(Oop object);
uint64_t positiveInt64ValueOf(Oop object);
Oop floatObjectFor(double value);
double floatValueOf(Oop object);
// Class table
Oop getClassFromIndex(int classIndex);
Oop getClassFromOop(Oop oop);
void registerClassInTable(Oop clazz);
size_t getFixedSlotCountOfClass(Oop clazzOop);
// Class testing
bool isClassOrMetaclass(Oop oop);
bool isMetaclass(Oop oop);
bool isClass(Oop oop);
// Global variables
Oop setGlobalVariable(const char *name, Oop value);
Oop setGlobalVariable(Oop name, Oop value);
Oop getGlobalFromName(const char *name);
Oop getGlobalFromSymbol(Oop symbol);
Oop getGlobalValueFromName(const char *name);
Oop getGlobalValueFromSymbol(Oop symbol);
// Object creation
Oop makeByteString(const std::string &content);
Oop makeByteSymbol(const std::string &content);
Oop makeSelector(const std::string &content);
// Global context.
Oop getGlobalContext();
// Object reading
std::string getClassNameOfObject(Oop object);
std::string getByteSymbolData(Oop object);
std::string getByteStringData(Oop object);
// Other special objects.
Oop getBlockActivationSelector(size_t argumentCount);
Oop getSpecialMessageSelector(SpecialMessageSelector selectorIndex);
Oop getCompilerOptimizedSelector(CompilerOptimizedSelector selectorIndex);
CompilerOptimizedSelector getCompilerOptimizedSelectorId(Oop selector);
unsigned int instanceClassFactory(AbstractClassFactory *factory);
// Primitives
PrimitiveFunction findPrimitive(int primitiveIndex);
void registerPrimitive(int primitiveIndex, PrimitiveFunction primitive);
void registerNamedPrimitive(Oop name, Oop module, PrimitiveFunction primitive);
private:
void initialize();
void createGlobalDictionary();
void instanceClassFactories();
MemoryManager *memoryManager;
SpecialRuntimeObjects *specialRuntimeObjects;
SystemDictionary *globalDictionary;
std::unordered_map<AbstractClassFactory*, unsigned int> instancedClassFactories;
std::unordered_map<int, PrimitiveFunction> numberedPrimitives;
};
LODTALK_VM_EXPORT VMContext *createVMContext();
LODTALK_VM_EXPORT VMContext *getCurrentContext();
LODTALK_VM_EXPORT void setCurrentContext(VMContext *context);
// Garbage collection interface
class LODTALK_VM_EXPORT WithoutGC
{
public:
WithoutGC(VMContext *context)
: context(context)
{
context->disableGC();
}
~WithoutGC()
{
context->enableGC();
}
private:
VMContext *context;
};
#ifdef _MSC_VER
# pragma warning( pop )
#endif
} // End of namespace VMContext
#endif //LODTALK_VMCONTEXT_HPP_
|
#!/bin/bash
# QCs the soft clips results for a run
#
# $1 = case EBN
# $2 = data directory
source qclib.sh
CASE=$1
DIR=$2
CaseDir="$DIR/$CASE"
starttestcase gInfo Using dir $DIR
# Do tests
starttest DirExists
if [ -d "$CaseDir" ]; then passtest; else aborttestcase; fi
cd $CaseDir
starttest AnyGinfoFilesExist
if ls $CaseDir/$CASE.gene_info.txt >&2; then passtest; else aborttestcase; fi
starttest GinfoSize
cutoff=200
while read file
do
if [ "`filesize $file`" -lt $cutoff ]
then
ls -l $file >&2
failtestifactive Found at least one gene_info.txt file that was too small
fi
done< <(ls $CaseDir/$CASE.gene_info.txt | sed '$d' )
passtestbydefault
summarize
|
import numpy as np
class ParticleRegularizeL2(object):
"""
L2 regularizer for charges
"""
def __init__(self, coeff_lambda=0.0, zeta=8.0):
self.coeff_lambda = coeff_lambda
self.zeta = zeta
self.n = 1
def cost(self, particle_input, layers):
c = 0.0
# c = np.sum(particle_input.q * particle_input.q)
# # c = np.sum(particle_input.rx * particle_input.rx + particle_input.ry * particle_input.ry + particle_input.rz * particle_input.rz)
# for layer in layers:
# # c += np.sum(layer.q * layer.q) + np.sum(layer.b * layer.b)
# # c += np.sum(layer.q * layer.q)
# # c += np.sum(layer.rx * layer.rx + layer.ry * layer.ry + layer.rz * layer.rz)
#
# # Layer inter-particle repulsion
# for i in range(layer.output_size):
# rx_i = layer.rx[i]
# ry_i = layer.ry[i]
# rz_i = layer.rz[i]
# for j in range(i+1, layer.output_size):
# dx = layer.rx[j] - rx_i
# dy = layer.ry[j] - ry_i
# dz = layer.rz[j] - rz_i
# d2 = dx*dx + dy*dy + dz*dz
# c += np.exp(-self.zeta * d2)
#
# n = layer.output_size
# c /= (n * (n-1)) / 2
# # Input layer inter-particle repulsion
# for i in range(particle_input.output_size):
# rx_i = particle_input.rx[i]
# ry_i = particle_input.ry[i]
# rz_i = particle_input.rz[i]
# for j in range(i+1, particle_input.output_size):
# dx = particle_input.rx[j] - rx_i
# dy = particle_input.ry[j] - ry_i
# dz = particle_input.rz[j] - rz_i
# d2 = dx*dx + dy*dy + dz*dz
# c += np.exp(-self.zeta * d2)
# c /= n
# Compute the matrices
r = particle_input.get_rxyz()
for i, layer in enumerate(layers):
w = layer.compute_w(r)
# c += np.sum(w * w)
c += np.mean(w * w)
r = layer.get_rxyz()
return self.coeff_lambda * c
def cost_gradient(self, particle_input, layers, dc_dq, dc_db, dc_dr):
# dc_dr_x = dc_dr[0]
# dc_dr_y = dc_dr[1]
# dc_dr_z = dc_dr[2]
#
# two_lambda = 2.0 * self.coeff_lambda
#
# # # dc_dq[0] += two_lambda * particle_input.q
# # # dc_dr_x[0] += two_lambda * particle_input.rx
# # # dc_dr_y[0] += two_lambda * particle_input.ry
# # # dc_dr_z[0] += two_lambda * particle_input.rz
# for l, layer in enumerate(layers):
# # dc_dq[l] += two_lambda * layer.q
# # dc_db[l] += two_lambda * layer.b
# # dc_dr_x[l+1] += two_lambda * layer.rx
# # dc_dr_y[l+1] += two_lambda * layer.ry
# # dc_dr_z[l+1] += two_lambda * layer.rz
#
# n = layer.output_size
# n = (n * (n-1)) / 2
# for i in range(layer.output_size):
# rx_i = layer.rx[i]
# ry_i = layer.ry[i]
# rz_i = layer.rz[i]
# for j in range(i+1, layer.output_size):
# dx = layer.rx[j] - rx_i
# dy = layer.ry[j] - ry_i
# dz = layer.rz[j] - rz_i
# d2 = dx*dx + dy*dy + dz*dz
# # tmp = two_lambda * self.zeta * np.exp(-self.zeta * d2)
# tmp = two_lambda * self.zeta * np.exp(-self.zeta * d2) / n
# tx = tmp * dx
# ty = tmp * dy
# tz = tmp * dz
#
# dc_dr_x[l+1][i] += tx
# dc_dr_y[l+1][i] += ty
# dc_dr_z[l+1][i] += tz
# dc_dr_x[l+1][j] -= tx
# dc_dr_y[l+1][j] -= ty
# dc_dr_z[l+1][j] -= tz
# #
# # # for i in range(particle_input.output_size):
# # # rx_i = particle_input.rx[i]
# # # ry_i = particle_input.ry[i]
# # # rz_i = particle_input.rz[i]
# # # for j in range(i+1, particle_input.output_size):
# # # dx = particle_input.rx[j] - rx_i
# # # dy = particle_input.ry[j] - ry_i
# # # dz = particle_input.rz[j] - rz_i
# # # d2 = dx*dx + dy*dy + dz*dz
# # # tmp = two_lambda * self.zeta * np.exp(-self.zeta * d2)
# # # tx = tmp * dx
# # # ty = tmp * dy
# # # tz = tmp * dz
# # #
# # # dc_dr_x[0][i] += tx
# # # dc_dr_y[0][i] += ty
# # # dc_dr_z[0][i] += tz
# # # dc_dr_x[0][j] -= tx
# # # dc_dr_y[0][j] -= ty
# # # dc_dr_z[0][j] -= tz
#
# dc_dr = (dc_dr_x, dc_dr_y, dc_dr_z)
return dc_dq, dc_db, dc_dr |
<reponame>kiwiroy/libtickit<filename>src/termdriver.h
#include "tickit.h"
#include "tickit-termdrv.h"
typedef struct {
const char *termtype;
const struct TickitTerminfoHook *ti_hook;
} TickitTermProbeArgs;
typedef struct {
TickitTermDriver *(*new)(const TickitTermProbeArgs *args);
} TickitTermDriverProbe;
extern TickitTermDriverProbe tickit_termdrv_probe_xterm;
extern TickitTermDriverProbe tickit_termdrv_probe_ti;
|
<filename>src/test/java/jooq/generated/entities/static_/tables/UpdateHistory.java
/*
* This file is generated by jOOQ.
*/
package jooq.generated.entities.static_.tables;
import java.sql.Timestamp;
import java.util.Arrays;
import java.util.List;
import javax.annotation.Generated;
import jooq.generated.entities.static_.Keys;
import jooq.generated.entities.static_.Public;
import jooq.generated.entities.static_.tables.records.UpdateHistoryRecord;
import org.jooq.Field;
import org.jooq.Schema;
import org.jooq.Table;
import org.jooq.TableField;
import org.jooq.UniqueKey;
import org.jooq.impl.TableImpl;
/**
* This class is generated by jOOQ.
*/
@Generated(
value = {
"http://www.jooq.org",
"jOOQ version:3.9.2"
},
comments = "This class is generated by jOOQ"
)
@SuppressWarnings({ "all", "unchecked", "rawtypes" })
public class UpdateHistory extends TableImpl<UpdateHistoryRecord> {
private static final long serialVersionUID = -1614584778;
/**
* The reference instance of <code>public.update_history</code>
*/
public static final UpdateHistory UPDATE_HISTORY = new UpdateHistory();
/**
* The class holding records for this type
*/
@Override
public Class<UpdateHistoryRecord> getRecordType() {
return UpdateHistoryRecord.class;
}
/**
* The column <code>public.update_history.time</code>.
*/
public final TableField<UpdateHistoryRecord, Timestamp> TIME = createField("time", org.jooq.impl.SQLDataType.TIMESTAMP.nullable(false), this, "");
/**
* The column <code>public.update_history.status</code>.
*/
public final TableField<UpdateHistoryRecord, String> STATUS = createField("status", org.jooq.impl.SQLDataType.CLOB, this, "");
/**
* Create a <code>public.update_history</code> table reference
*/
public UpdateHistory() {
this("update_history", null);
}
/**
* Create an aliased <code>public.update_history</code> table reference
*/
public UpdateHistory(String alias) {
this(alias, UPDATE_HISTORY);
}
private UpdateHistory(String alias, Table<UpdateHistoryRecord> aliased) {
this(alias, aliased, null);
}
private UpdateHistory(String alias, Table<UpdateHistoryRecord> aliased, Field<?>[] parameters) {
super(alias, null, aliased, parameters, "");
}
/**
* {@inheritDoc}
*/
@Override
public Schema getSchema() {
return Public.PUBLIC;
}
/**
* {@inheritDoc}
*/
@Override
public UniqueKey<UpdateHistoryRecord> getPrimaryKey() {
return Keys.UPDATE_HISTORY_PKEY;
}
/**
* {@inheritDoc}
*/
@Override
public List<UniqueKey<UpdateHistoryRecord>> getKeys() {
return Arrays.<UniqueKey<UpdateHistoryRecord>>asList(Keys.UPDATE_HISTORY_PKEY);
}
/**
* {@inheritDoc}
*/
@Override
public UpdateHistory as(String alias) {
return new UpdateHistory(alias, this);
}
/**
* Rename this table
*/
@Override
public UpdateHistory rename(String name) {
return new UpdateHistory(name, null);
}
}
|
#!/usr/bin/env bash
source cross_compile_env.sh
source native_compile_env.sh
function build_curl(){
if [ ! -f ${CURL_SOURCE_DIR}/configure ]
then
cd ${CURL_SOURCE_DIR}
./buildconf
if [ -n "$MTL" ]
then
cp ${BUILD_TOOLS_DIR}/automake_config/config.guess ./
cp ${BUILD_TOOLS_DIR}/automake_config/config.sub ./
fi
cd -
fi
LIBSDEPEND=""
local ssl_opt=
export CFLAGS="${HARDENED_CFLAG}"
if [ "$1" == "Android" ]
then
cross_compile_set_platform_Android $2
elif [ "$1" == "iOS" ]
then
LIBSDEPEND="LIBS=-lresolv"
cross_compile_set_platform_iOS $2
if [ "$2" == "x86_64" ] || [ "$2" == "i386" ]; then
SYSROOT=${IPHONESIMULATOR_SDK}
else
SYSROOT=${IPHONEOS_SDK}
fi
export CFLAGS="${CFLAGS} -arch $2 -fembed-bitcode --sysroot=$SYSROOT -isysroot $SYSROOT -miphoneos-version-min=$DEPLOYMENT_TARGET"
export LDFLAGS="-arch $2 --sysroot=$SYSROOT"
export CC=clang
if [[ "${SSL_USE_NATIVE}" == "TRUE" ]];then
ssl_opt="--with-darwinssl"
fi
elif [[ "$1" == "win32" ]];then
cross_compile_set_platform_win32 $2
elif [[ "$1" == "Darwin" ]];then
LIBSDEPEND="LIBS=-lresolv"
if [[ "${SSL_USE_NATIVE}" == "TRUE" ]];then
ssl_opt="--with-darwinssl"
fi
print_warning "native build for $1"
native_compile_set_platform_macOS
export CFLAGS="${CFLAGS} $CPU_FLAGS"
elif [[ "$1" == "Linux" ]];then
LIBSDEPEND="LIBS=-lresolv"
print_warning "native build for $1"
else
echo "Unsupported platform"
exit 1;
fi
# \
local config="--enable-shared=no \
--disable-symbol-hiding \
--enable-proxy \
--disable-debug \
--enable-optimize \
--disable-ftp \
--disable-gopher \
--disable-file \
--disable-imap \
--disable-ldap \
--disable-ldaps \
--disable-pop3 \
--disable-rtsp \
--disable-smtp \
--disable-telnet \
--disable-tftp \
--disable-smb \
--disable-smbs \
--disable-dict \
--without-gnutls \
--without-libidn2 \
--without-librtmp \
--without-brotli \
--without-libidn \
--without-nghttp2"
local build_dir="${CWD}/build/curl/$1/$2"
local install_dir="${CWD}/install/curl/$1/$2"
mkdir -p ${build_dir}/
if [ "${BUILD}" != "False" ];then
cd ${build_dir}
if [ -z "${ssl_opt}" ] && [ -d "${OPENSSL_INSTALL_DIR}" ];then
local ssl_opt="--with-ssl=${OPENSSL_INSTALL_DIR}"
fi
if [ -d "${ARES_INSTALL_DIR}" ];then
local resolver_opt="--enable-ares=${ARES_INSTALL_DIR}"
else
local resolver_opt="--enable-threaded-resolver"
fi
if [ -d "${LIBRTMP_INSTALL_DIR}" ];then
local rtmp_opt="--with-librtmp=${LIBRTMP_INSTALL_DIR}"
fi
${CURL_SOURCE_DIR}/configure -host=${CROSS_COMPILE} CC="${CC}" ${ssl_opt} ${resolver_opt} ${config} ${rtmp_opt} --prefix=${install_dir} ${LIBSDEPEND} || exit 1
make -j8 install V=1 || exit 1
cd -
fi
CURL_INSTALL_DIR=${install_dir}
export CFLAGS=""
}
|
#!/usr/bin/env bash
#
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Exit on any error
set -e
echo "Which AAR version do you want to download (ex: '0.9.5')?"
read aarVersion
echo "Download $aarVersion aar from Jenkins"
wget https://matrix.org/jenkins/view/MatrixView/job/MatrixAndroidSDK/lastSuccessfulBuild/artifact/matrix-sdk/build/outputs/aar/matrix-sdk-release-${aarVersion}.aar
echo "Copy downloaded AAR to the Riot project"
cp matrix-sdk-release-${aarVersion}.aar ~/workspaces/riot-android/vector/libs/matrix-sdk.aar
targetDir="SDK_$aarVersion"
echo "Create directory $targetDir/"
mkdir ${targetDir}
echo "Move AAR to $targetDir/"
mv *.aar ${targetDir}
echo "Success!"
|
import random
rand_list = random.sample(range(1, 11), 10)
print(rand_list) |
<reponame>tian2992/belvo-python
import pytest
from belvo import __version__
from belvo.exceptions import RequestError
from belvo.http import APISession
@pytest.mark.parametrize("wrong_http_code", [400, 401, 403, 500])
def test_login_returns_false_when_bad_response(wrong_http_code, responses, fake_url):
responses.add(responses.GET, "{}/api/".format(fake_url), json={}, status=wrong_http_code)
session = APISession(fake_url)
result = session.login(secret_key_id="monty", secret_key_password="<PASSWORD>")
assert not result
@pytest.mark.parametrize("wrong_http_code", [400, 401, 403, 500])
def test_delete_returns_false_when_bad_response(wrong_http_code, responses, fake_url, api_session):
responses.add(
responses.DELETE, "{}/api/resource/666/".format(fake_url), json={}, status=wrong_http_code
)
result = api_session.delete("/api/resource/", 666)
assert not result
def test_get_yields_all_results_when_response_contains_next_page(responses, fake_url, api_session):
resource_url = "{}/api/resources/".format(fake_url)
data = {
"next": "{}?page=2".format(resource_url),
"count": 10,
"results": ["one", "two", "three", "four", "five"],
}
resource_url_page_2 = "{}/api/resources/?page=2".format(fake_url)
data_page_2 = {"next": None, "count": 10, "results": ["six", "seven", "eight", "nine", "ten"]}
responses.add(responses.GET, resource_url, json=data, status=200, match_querystring=True)
responses.add(
responses.GET, resource_url_page_2, json=data_page_2, status=200, match_querystring=True
)
results = list(api_session.list("/api/resources/"))
assert len(results) == 10
assert results == [
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"ten",
]
def test_login_sets_correct_user_agent(responses, fake_url):
responses.add(responses.GET, "{}/api/".format(fake_url), json={}, status=200)
session = APISession(fake_url)
session.login(secret_key_id="monty", secret_key_password="<PASSWORD>")
assert session.headers["User-Agent"] == f"belvo-python ({__version__})"
def test_login_sets_key_id(responses, fake_url):
responses.add(responses.GET, "{}/api/".format(fake_url), json={}, status=200)
session = APISession(fake_url)
session.login(secret_key_id="monty", secret_key_password="<PASSWORD>")
assert session.key_id == "monty"
def test_post_raises_exception_on_error_if_raises_exception_is_true(responses, fake_url):
responses.add(
responses.POST,
"{}/fake-resource/".format(fake_url),
json=[{"code": "unsupported", "message": "Wait, that's illegal!"}],
status=400,
)
session = APISession(fake_url)
with pytest.raises(RequestError) as exc:
session.post("/fake-resource/", {}, raise_exception=True)
assert exc.value.status_code == 400
assert exc.value.detail == [{"code": "unsupported", "message": "Wait, that's illegal!"}]
assert responses.calls[0].request.headers["Content-Type"] == "application/json"
def test_post_doesnt_raise_exception_on_error_by_default(responses, fake_url):
responses.add(
responses.POST,
"{}/fake-resource/".format(fake_url),
json=[{"code": "unsupported", "message": "Wait, that's illegal!"}],
status=400,
)
session = APISession(fake_url)
result = session.post("/fake-resource/", {})
assert result == [{"code": "unsupported", "message": "Wait, that's illegal!"}]
assert responses.calls[0].request.headers["Content-Type"] == "application/json"
def test_put_raises_exception_on_error_if_raises_exception_is_true(responses, fake_url):
responses.add(
responses.PUT,
"{}/fake-resource/some-id/".format(fake_url),
json=[{"code": "unsupported", "message": "Wait, that's illegal!"}],
status=400,
)
session = APISession(fake_url)
with pytest.raises(RequestError) as exc:
session.put("/fake-resource/", "some-id", {}, raise_exception=True)
assert exc.value.status_code == 400
assert exc.value.detail == [{"code": "unsupported", "message": "Wait, that's illegal!"}]
assert responses.calls[0].request.headers["Content-Type"] == "application/json"
def test_put_doesnt_raise_exception_on_error_by_default(responses, fake_url):
responses.add(
responses.PUT,
"{}/fake-resource/some-id/".format(fake_url),
json=[{"code": "unsupported", "message": "Wait, that's illegal!"}],
status=400,
)
session = APISession(fake_url)
result = session.put("/fake-resource/", "some-id", {})
assert result == [{"code": "unsupported", "message": "Wait, that's illegal!"}]
assert responses.calls[0].request.headers["Content-Type"] == "application/json"
def test_patch_raises_exception_on_error_if_raises_exception_is_true(responses, fake_url):
responses.add(
responses.PATCH,
"{}/fake-resource/".format(fake_url),
json=[{"code": "unsupported", "message": "Wait, that's illegal!"}],
status=400,
)
session = APISession(fake_url)
with pytest.raises(RequestError) as exc:
session.patch("/fake-resource/", {}, raise_exception=True)
assert exc.value.status_code == 400
assert exc.value.detail == [{"code": "unsupported", "message": "Wait, that's illegal!"}]
assert responses.calls[0].request.headers["Content-Type"] == "application/json"
def test_patch_doesnt_raise_exception_on_error_by_default(responses, fake_url):
responses.add(
responses.PATCH,
"{}/fake-resource/".format(fake_url),
json=[{"code": "unsupported", "message": "Wait, that's illegal!"}],
status=400,
)
session = APISession(fake_url)
result = session.patch("/fake-resource/", {})
assert result == [{"code": "unsupported", "message": "Wait, that's illegal!"}]
assert responses.calls[0].request.headers["Content-Type"] == "application/json"
|
<filename>config/webpack/app-base.js<gh_stars>0
/**
* Base Webpack configuration for ReactJS applications. It is further extended
* for development and production use in the "app-development" and
* "app-production" configs.
*/
const _ = require('lodash');
const autoprefixer = require('autoprefixer');
const MiniCssExtractPlugin = require('mini-css-extract-plugin');
const forge = require('node-forge');
const fs = require('fs');
const moment = require('moment');
const path = require('path');
const { StatsWriterPlugin } = require('webpack-stats-plugin');
const webpack = require('webpack');
const WorkboxPlugin = require('workbox-webpack-plugin');
/**
* Creates a new Webpack config object, and performs some auxiliary operations
* on the way.
*
* @param {Object} ops Configuration params. This allows to modify some
* frequently changed options in a convenient way, without a need to manipulate
* directly with the created config object.
*
* The following options are accepted:
*
* @param {String} ops.babelEnv BABEL_ENV to use for Babel during the build.
*
* @param {String} ops.context Base URL for resolution of relative
* config paths.
*
* @param {String} ops.cssLocalIdent Optional. The template for CSS classnames
* generation by css-loader (it will be passed into the "localIdentName" param
* of the loader). It should match the corresponding setting in the Babel
* config. Defaults to: [hash:base64:6].
*
* @param {Object|String|String[]} ops.entry Entry points. If an object is
* passed in, the "polyfills" entry point is extended or appended to
* include some polyfills we consider obligatory. If a string or an array is
* passed in, it is assigned to the "main" entry point, and the "polyfills"
* entry point will be added to it.
*
* @param {Boolean|Object} ops.workbox Adds InjectManifest plugin from Workbox,
* with given options, if the argument is Object, or default ones, if it is any
* other truly value.
*
* @param {Boolean} ops.keepBuildInfo Optional. If `true` and a build info file
* from a previous build is found, the factory will use that rather than
* re-generating it. This provide the way to re-create webpack config at the
* server startup, without re-writing the build info generated previously
* during the bundling. Defaults to `false`.
*
* @param {String} ops.publicPath Base URL for the output of the build assets.
*/
module.exports = function configFactory(ops) {
const o = _.defaults(_.clone(ops), {
cssLocalIdent: '[hash:base64:6]',
publicPath: '',
});
const now = moment();
let buildInfo;
const buildInfoUrl = path.resolve(o.context, '.build-info');
/* If build-info file is found, we reuse those data. */
if (fs.existsSync(buildInfoUrl) && o.keepBuildInfo) {
buildInfo = JSON.parse(fs.readFileSync(buildInfoUrl));
} else {
/* Stores misc build info into the local ".build-info" file in the context
* directory. */
buildInfo = {
/* A random 32-bit key, that can be used for encryption. */
key: forge.random.getBytesSync(32),
/* Public path used during build. */
publicPath: o.publicPath,
/* Build timestamp. */
timestamp: now.utc().toISOString(),
/* `true` if client-side code should setup a service worker. */
useServiceWorker: Boolean(o.workbox),
};
fs.writeFileSync(buildInfoUrl, JSON.stringify(buildInfo));
}
/* Entry points normalization. */
const entry = _.isPlainObject(o.entry)
? _.cloneDeep(o.entry) : { main: o.entry };
if (!entry.polyfills) entry.polyfills = [];
else if (!_.isArray(entry.polyfills)) {
entry.polyfills = [entry.polyfills];
}
entry.polyfills = _.union(entry.polyfills, [
'@babel/polyfill',
'nodelist-foreach-polyfill',
]);
const plugins = [
new MiniCssExtractPlugin({
chunkFilename: `[name]-${now.valueOf()}.css`,
filename: `[name]-${now.valueOf()}.css`,
}),
new webpack.DefinePlugin({
BUILD_INFO: JSON.stringify(buildInfo),
}),
new StatsWriterPlugin({
filename: '__stats__.json',
}),
];
/* Adds InjectManifest plugin from WorkBox, if opted to. */
if (o.workbox) {
if (!_.isObject(o.workbox)) o.workbox = {};
plugins.push(new WorkboxPlugin.InjectManifest({
importWorkboxFrom: 'local',
swSrc: path.resolve(__dirname, '../workbox/default.js'),
...o.workbox,
swDest: '__service-worker.js',
}));
}
return {
context: o.context,
entry,
node: {
__dirname: true,
fs: 'empty',
},
mode: o.mode,
output: {
chunkFilename: `[name]-${now.valueOf()}.js`,
filename: `[name]-${now.valueOf()}.js`,
path: path.resolve(__dirname, o.context, 'build'),
publicPath: `${o.publicPath}/`,
},
plugins,
resolve: {
alias: {
/* Aliases to JS an JSX files are handled by Babel. */
assets: path.resolve(o.context, 'src/assets'),
components: path.resolve(o.context, 'src/shared/components'),
fonts: path.resolve(o.context, 'src/assets/fonts'),
styles: path.resolve(o.context, 'src/styles'),
},
extensions: ['.js', '.json', '.jsx', '.scss'],
symlinks: false,
},
module: {
rules: [{
/* Loads font resources from "src/assets/fonts" folder. */
test: /\.(eot|otf|svg|ttf|woff2?)$/,
include: [
/node_modules/,
/src[/\\]assets[/\\]fonts/,
],
loader: 'file-loader',
options: {
outputPath: 'fonts/',
publicPath: `${o.publicPath}/fonts`,
},
}, {
/* Loads JS and JSX moudles, and inlines SVG assets. */
test: /\.(jsx?|svg)$/,
exclude: [/node_modules/],
loader: 'babel-loader',
options: {
babelrc: false,
configFile: false,
envName: o.babelEnv,
presets: ['topcoder-react-utils/config/babel/webpack'],
},
}, {
/* Loads image assets. */
test: /\.(gif|jpe?g|png)$/,
loader: 'file-loader',
options: {
outputPath: 'images/',
publicPath: `${o.publicPath}/images`,
},
}, {
/* Loads SCSS stylesheets. */
test: /\.scss/,
use: [
MiniCssExtractPlugin.loader, {
loader: 'css-loader',
options: {
localIdentName: o.cssLocalIdent,
modules: true,
},
}, {
loader: 'postcss-loader',
options: {
plugins: [autoprefixer],
},
}, 'resolve-url-loader', {
loader: 'sass-loader',
options: {
sourceMap: true,
},
},
],
}, {
/* Loads CSS stylesheets. It is assumed that CSS stylesheets come only
* from dependencies, as we use SCSS inside our own code. */
test: /\.css$/,
use: [
MiniCssExtractPlugin.loader,
'css-loader',
],
}],
},
optimization: {
/* TODO: Dynamic chunk splitting does not play along with server-side
* rendering of split chunks. Probably there is a way to achieve that,
* but it is not a priority now. */
splitChunks: false,
},
};
};
|
import { Inject, Injectable } from '@angular/core';
import { AppearanceAnimation, DialogLayoutDisplay, DisappearanceAnimation } from '../../../core/enums';
import { DataControl } from '../../../core/global-classes';
import { ConfirmBoxCustomStyles, ConfirmBoxSettings } from './classes';
import { IConfirmBoxUserConfig } from './interfaces';
@Injectable({
providedIn: 'root'
})
export class ConfirmBoxConfigService {
authorConfig: IConfirmBoxUserConfig = new ConfirmBoxSettings();
productionConfig: IConfirmBoxUserConfig = new ConfirmBoxSettings();
constructor(
@Inject('confirmBoxConfig')
private userConfig: IConfirmBoxUserConfig = {}
) {
// region *** confirmBox userConfig (user input app-module) ***
const userConfigBase = new ConfirmBoxSettings();
const dataControl = new DataControl();
dataControl.copyValuesFrom(userConfig.confirmBoxCoreConfig, userConfigBase.confirmBoxCoreConfig); // this will make sure that object has right properties
userConfig.confirmBoxCoreConfig = userConfigBase.confirmBoxCoreConfig;
// endregion
// region *** author default config values (if there is no user input) ***
this.authorConfig.confirmBoxCoreConfig.width = 'auto';
this.authorConfig.confirmBoxCoreConfig.height = 'auto';
this.authorConfig.confirmBoxCoreConfig.buttonPosition = 'center';
this.authorConfig.confirmBoxCoreConfig.confirmLabel = 'Confirm';
this.authorConfig.confirmBoxCoreConfig.declineLabel = 'Decline';
this.authorConfig.confirmBoxCoreConfig.disableIcon = false;
this.authorConfig.confirmBoxCoreConfig.allowHtmlMessage = false;
this.authorConfig.confirmBoxCoreConfig.layoutType = DialogLayoutDisplay.NONE;
this.authorConfig.confirmBoxCoreConfig.animationIn = AppearanceAnimation.ZOOM_IN;
this.authorConfig.confirmBoxCoreConfig.animationOut = DisappearanceAnimation.ZOOM_OUT;
this.authorConfig.confirmBoxCoreConfig.customStyles = new ConfirmBoxCustomStyles();
this.authorConfig.confirmBoxCoreConfig.iconStyleClass = null;
// endregion
// region *** Production setup ***
dataControl.copyValuesFrom(this.authorConfig.confirmBoxCoreConfig, this.productionConfig.confirmBoxCoreConfig);
dataControl.copyValuesFrom(userConfig.confirmBoxCoreConfig, this.productionConfig.confirmBoxCoreConfig);
// endregion
}
}
|
package clientAPI;
import javax.smartcardio.CardException;
/**
* Schnittstelle zur Festlegung der Funktionen des Geldbörsen-Applets.
*
*/
public interface Wallet {
/**
* Fügt den angegebenen Geldbetrag in Cent der Geldbörse hinzu.
*
* @param amountInCent
* Geldbetrag.
* @throws CardException
*/
public void addMoney(int amountInCent) throws CardException;
/**
* Zieht den angegebenen Geldbetrag in Cent von der Geldbörse ab.
*
* @param amountInCent
* Geldbetrag.
* @throws CardException
*/
public void removeMoney(int amountInCent) throws CardException;
/**
* Liefert den aktuellen Kontostand
*
* @return Kontostand
*/
public int checkBalance() throws CardException;
}
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/1024+0+512-pad/13-model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/1024+0+512-pad/13-1024+0+512-N-VB-fill-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function remove_all_but_nouns_and_verbs_fill_first_two_thirds_sixth --eval_function last_sixth_eval |
find extractor -type f -print -exec {} \;
|
interface NavState {
organization: string;
dependencies: string[];
}
function createInitialState(): NavState {
return {
organization: '',
dependencies: []
};
}
@Injectable({ providedIn: 'root' })
@StoreConfig({
name: 'nav',
resettable: true
})
export class NavStore extends Store<NavState> {
constructor() {
super(createInitialState());
}
clearOrgAndDeps() {
this.update(state => ({
...state,
organization: '',
dependencies: []
}));
}
} |
/**
* @fileoverview This file is generated by the Angular 2 template compiler.
* Do not edit.
* @suppress {suspiciousCode,uselessCode,missingProperties}
*/
/* tslint:disable */
import * as import0 from '../../../../app/setup/wizardsMenu/wizards.component';
import * as import1 from '@angular/core/src/linker/view';
import * as import2 from '@angular/core/src/render/api';
import * as import3 from '@angular/core/src/linker/view_utils';
import * as import4 from '@angular/core/src/metadata/view';
import * as import5 from '@angular/core/src/linker/view_type';
import * as import6 from '@angular/core/src/change_detection/constants';
import * as import7 from '@angular/core/src/linker/component_factory';
import * as import8 from '@angular/router/src/router';
import * as import9 from '@angular/router/src/router_state';
import * as import10 from '../../../../app/service/navigation.service';
import * as import11 from '@ng-bootstrap/ng-bootstrap/modal/modal';
import * as import12 from '@angular/core/src/linker/view_container';
import * as import13 from '../../../node_modules/@angular/router/src/directives/router_outlet.ngfactory';
import * as import14 from '@angular/core/src/linker/template_ref';
import * as import15 from '@angular/router/src/router_outlet_map';
import * as import16 from '@angular/core/src/linker/component_factory_resolver';
import * as import17 from '@angular/router/src/directives/router_outlet';
export class Wrapper_WizardsComponent {
/*private*/ _eventHandler:Function;
context:import0.WizardsComponent;
/*private*/ _changed:boolean;
constructor(p0:any,p1:any,p2:any,p3:any) {
this._changed = false;
this.context = new import0.WizardsComponent(p0,p1,p2,p3);
}
ngOnDetach(view:import1.AppView<any>,componentView:import1.AppView<any>,el:any):void {
}
ngOnDestroy():void {
this.context.ngOnDestroy();
}
ngDoCheck(view:import1.AppView<any>,el:any,throwOnChange:boolean):boolean {
var changed:any = this._changed;
this._changed = false;
if (!throwOnChange) { if ((view.numberOfChecks === 0)) { this.context.ngOnInit(); } }
return changed;
}
checkHost(view:import1.AppView<any>,componentView:import1.AppView<any>,el:any,throwOnChange:boolean):void {
}
handleEvent(eventName:string,$event:any):boolean {
var result:boolean = true;
return result;
}
subscribe(view:import1.AppView<any>,_eventHandler:any):void {
this._eventHandler = _eventHandler;
}
}
var renderType_WizardsComponent_Host:import2.RenderComponentType = import3.createRenderComponentType('',0,import4.ViewEncapsulation.None,([] as any[]),{});
class View_WizardsComponent_Host0 extends import1.AppView<any> {
_el_0:any;
compView_0:import1.AppView<import0.WizardsComponent>;
_WizardsComponent_0_3:Wrapper_WizardsComponent;
constructor(viewUtils:import3.ViewUtils,parentView:import1.AppView<any>,parentIndex:number,parentElement:any) {
super(View_WizardsComponent_Host0,renderType_WizardsComponent_Host,import5.ViewType.HOST,viewUtils,parentView,parentIndex,parentElement,import6.ChangeDetectorStatus.CheckAlways);
}
createInternal(rootSelector:string):import7.ComponentRef<any> {
this._el_0 = import3.selectOrCreateRenderHostElement(this.renderer,'menus',import3.EMPTY_INLINE_ARRAY,rootSelector,(null as any));
this.compView_0 = new View_WizardsComponent0(this.viewUtils,this,0,this._el_0);
this._WizardsComponent_0_3 = new Wrapper_WizardsComponent(this.injectorGet(import8.Router,this.parentIndex),this.injectorGet(import9.ActivatedRoute,this.parentIndex),this.injectorGet(import10.NavService,this.parentIndex),this.injectorGet(import11.NgbModal,this.parentIndex));
this.compView_0.create(this._WizardsComponent_0_3.context);
this.init(this._el_0,((<any>this.renderer).directRenderer? (null as any): [this._el_0]),(null as any));
return new import7.ComponentRef_<any>(0,this,this._el_0,this._WizardsComponent_0_3.context);
}
injectorGetInternal(token:any,requestNodeIndex:number,notFoundResult:any):any {
if (((token === import0.WizardsComponent) && (0 === requestNodeIndex))) { return this._WizardsComponent_0_3.context; }
return notFoundResult;
}
detectChangesInternal(throwOnChange:boolean):void {
this._WizardsComponent_0_3.ngDoCheck(this,this._el_0,throwOnChange);
this.compView_0.internalDetectChanges(throwOnChange);
}
destroyInternal():void {
this.compView_0.destroy();
this._WizardsComponent_0_3.ngOnDestroy();
}
visitRootNodesInternal(cb:any,ctx:any):void {
cb(this._el_0,ctx);
}
}
export const WizardsComponentNgFactory:import7.ComponentFactory<import0.WizardsComponent> = new import7.ComponentFactory<import0.WizardsComponent>('menus',View_WizardsComponent_Host0,import0.WizardsComponent);
const styles_WizardsComponent:any[] = ([] as any[]);
var renderType_WizardsComponent:import2.RenderComponentType = import3.createRenderComponentType('',0,import4.ViewEncapsulation.None,styles_WizardsComponent,{});
export class View_WizardsComponent0 extends import1.AppView<import0.WizardsComponent> {
_text_0:any;
_anchor_1:any;
/*private*/ _vc_1:import12.ViewContainer;
_TemplateRef_1_4:any;
_text_2:any;
_el_3:any;
_text_4:any;
_el_5:any;
_text_6:any;
_el_7:any;
_text_8:any;
_el_9:any;
_text_10:any;
_text_11:any;
_el_12:any;
_text_13:any;
_el_14:any;
/*private*/ _vc_14:import12.ViewContainer;
_RouterOutlet_14_5:import13.Wrapper_RouterOutlet;
_text_15:any;
_text_16:any;
_text_17:any;
constructor(viewUtils:import3.ViewUtils,parentView:import1.AppView<any>,parentIndex:number,parentElement:any) {
super(View_WizardsComponent0,renderType_WizardsComponent,import5.ViewType.COMPONENT,viewUtils,parentView,parentIndex,parentElement,import6.ChangeDetectorStatus.CheckAlways);
}
createInternal(rootSelector:string):import7.ComponentRef<any> {
const parentRenderNode:any = this.renderer.createViewRoot(this.parentElement);
this._text_0 = this.renderer.createText(parentRenderNode,'\n ',(null as any));
this._anchor_1 = this.renderer.createTemplateAnchor(parentRenderNode,(null as any));
this._vc_1 = new import12.ViewContainer(1,(null as any),this,this._anchor_1);
this._TemplateRef_1_4 = new import14.TemplateRef_(this,1,this._anchor_1);
this._text_2 = this.renderer.createText(parentRenderNode,'\n ',(null as any));
this._el_3 = import3.createRenderElement(this.renderer,parentRenderNode,'div',new import3.InlineArray2(2,'class','wizards_box'),(null as any));
this._text_4 = this.renderer.createText(this._el_3,'\n ',(null as any));
this._el_5 = import3.createRenderElement(this.renderer,this._el_3,'div',new import3.InlineArray2(2,'class','wizards_top'),(null as any));
this._text_6 = this.renderer.createText(this._el_3,'\n ',(null as any));
this._el_7 = import3.createRenderElement(this.renderer,this._el_3,'button',new import3.InlineArray2(2,'class','btn_close'),(null as any));
this._text_8 = this.renderer.createText(this._el_7,'\n ',(null as any));
this._el_9 = import3.createRenderElement(this.renderer,this._el_7,'div',new import3.InlineArray2(2,'class','btn_close_in'),(null as any));
this._text_10 = this.renderer.createText(this._el_7,'\n ',(null as any));
this._text_11 = this.renderer.createText(this._el_3,'\n ',(null as any));
this._el_12 = import3.createRenderElement(this.renderer,this._el_3,'div',new import3.InlineArray2(2,'class','wizards_main'),(null as any));
this._text_13 = this.renderer.createText(this._el_12,'\n ',(null as any));
this._el_14 = import3.createRenderElement(this.renderer,this._el_12,'router-outlet',import3.EMPTY_INLINE_ARRAY,(null as any));
this._vc_14 = new import12.ViewContainer(14,12,this,this._el_14);
this._RouterOutlet_14_5 = new import13.Wrapper_RouterOutlet(this.parentView.injectorGet(import15.RouterOutletMap,this.parentIndex),this._vc_14.vcRef,this.parentView.injectorGet(import16.ComponentFactoryResolver,this.parentIndex),(null as any));
this._text_15 = this.renderer.createText(this._el_12,'\n ',(null as any));
this._text_16 = this.renderer.createText(this._el_3,'\n ',(null as any));
this._text_17 = this.renderer.createText(parentRenderNode,'\n ',(null as any));
var disposable_0:Function = import3.subscribeToRenderElement(this,this._el_7,new import3.InlineArray2(2,'click',(null as any)),this.eventHandler(this.handleEvent_7));
this.init((null as any),((<any>this.renderer).directRenderer? (null as any): [
this._text_0,
this._anchor_1,
this._text_2,
this._el_3,
this._text_4,
this._el_5,
this._text_6,
this._el_7,
this._text_8,
this._el_9,
this._text_10,
this._text_11,
this._el_12,
this._text_13,
this._el_14,
this._text_15,
this._text_16,
this._text_17
]
),[disposable_0]);
return (null as any);
}
injectorGetInternal(token:any,requestNodeIndex:number,notFoundResult:any):any {
if (((token === import14.TemplateRef) && (1 === requestNodeIndex))) { return this._TemplateRef_1_4; }
if (((token === import17.RouterOutlet) && (14 === requestNodeIndex))) { return this._RouterOutlet_14_5.context; }
return notFoundResult;
}
detectChangesInternal(throwOnChange:boolean):void {
this._RouterOutlet_14_5.ngDoCheck(this,this._el_14,throwOnChange);
this._vc_1.detectChangesInNestedViews(throwOnChange);
this._vc_14.detectChangesInNestedViews(throwOnChange);
}
destroyInternal():void {
this._vc_1.destroyNestedViews();
this._vc_14.destroyNestedViews();
this._RouterOutlet_14_5.ngOnDestroy();
}
createEmbeddedViewInternal(nodeIndex:number):import1.AppView<any> {
if ((nodeIndex == 1)) { return new View_WizardsComponent1(this.viewUtils,this,1,this._anchor_1,this._vc_1); }
return (null as any);
}
handleEvent_7(eventName:string,$event:any):boolean {
this.markPathToRootAsCheckOnce();
var result:boolean = true;
if ((eventName == 'click')) {
const pd_sub_0:any = ((<any>this.context.open(this._TemplateRef_1_4)) !== false);
result = (pd_sub_0 && result);
}
return result;
}
}
class View_WizardsComponent1 extends import1.AppView<any> {
_text_0:any;
_el_1:any;
_text_2:any;
_el_3:any;
_text_4:any;
_text_5:any;
_el_6:any;
_text_7:any;
_text_8:any;
_text_9:any;
constructor(viewUtils:import3.ViewUtils,parentView:import1.AppView<any>,parentIndex:number,parentElement:any,declaredViewContainer:import12.ViewContainer) {
super(View_WizardsComponent1,renderType_WizardsComponent,import5.ViewType.EMBEDDED,viewUtils,parentView,parentIndex,parentElement,import6.ChangeDetectorStatus.CheckAlways,declaredViewContainer);
}
createInternal(rootSelector:string):import7.ComponentRef<any> {
this._text_0 = this.renderer.createText((null as any),'\n ',(null as any));
this._el_1 = import3.createRenderElement(this.renderer,(null as any),'div',new import3.InlineArray2(2,'class','login_alert'),(null as any));
this._text_2 = this.renderer.createText(this._el_1,'\n ',(null as any));
this._el_3 = import3.createRenderElement(this.renderer,this._el_1,'button',new import3.InlineArray4(4,'class','btn_depth_style','style','margin-bottom:7px;'),(null as any));
this._text_4 = this.renderer.createText(this._el_3,'\n Disconnect from printer\n ',(null as any));
this._text_5 = this.renderer.createText(this._el_1,'\n ',(null as any));
this._el_6 = import3.createRenderElement(this.renderer,this._el_1,'button',new import3.InlineArray2(2,'class','btn_depth_style'),(null as any));
this._text_7 = this.renderer.createText(this._el_6,'\n Lock screen\n ',(null as any));
this._text_8 = this.renderer.createText(this._el_1,'\n ',(null as any));
this._text_9 = this.renderer.createText((null as any),'\n ',(null as any));
var disposable_0:Function = import3.subscribeToRenderElement(this,this._el_6,new import3.InlineArray2(2,'click',(null as any)),this.eventHandler(this.handleEvent_6));
this.init(this._text_9,((<any>this.renderer).directRenderer? (null as any): [
this._text_0,
this._el_1,
this._text_2,
this._el_3,
this._text_4,
this._text_5,
this._el_6,
this._text_7,
this._text_8,
this._text_9
]
),[disposable_0]);
return (null as any);
}
visitRootNodesInternal(cb:any,ctx:any):void {
cb(this._text_0,ctx);
cb(this._el_1,ctx);
cb(this._text_9,ctx);
}
handleEvent_6(eventName:string,$event:any):boolean {
this.markPathToRootAsCheckOnce();
var result:boolean = true;
if ((eventName == 'click')) {
const pd_sub_0:any = ((<any>this.context.close('Close click')) !== false);
result = (pd_sub_0 && result);
}
return result;
}
} |
<reponame>jamacanbacn/splits-io
class RemoveUrlFromGoogleUsers < ActiveRecord::Migration[5.2]
def change
remove_column :google_users, :url, :string
end
end
|
#!/usr/bin/env bash
#
# This script builds the application from source for multiple platforms.
set -e
# Get the parent directory of where this script is.
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )/../" && pwd )"
# Change into that directory
cd "$DIR"
# Get the git commit
if [ -f $GOPATH/src/github.com/openebs/zfs-localpv/GITCOMMIT ];
then
GIT_COMMIT="$(cat $GOPATH/src/github.com/openebs/zfs-localpv/GITCOMMIT)"
else
GIT_COMMIT="$(git rev-parse HEAD)"
fi
# Set BUILDMETA based on travis tag
if [[ -n "$TRAVIS_TAG" ]] && [[ $TRAVIS_TAG != *"RC"* ]]; then
echo "released" > BUILDMETA
fi
CURRENT_BRANCH=""
if [ -z ${TRAVIS_BRANCH} ];
then
CURRENT_BRANCH=$(git branch | grep \* | cut -d ' ' -f2)
else
CURRENT_BRANCH=${TRAVIS_BRANCH}
fi
# Get the version details
if [ -n "$TRAVIS_TAG" ]; then
VERSION="$(git describe --tags `git rev-list --tags --max-count=1`)"
else
BUILDDATE=`date +%m-%d-%Y`
SHORT_COMMIT="$(git rev-parse --short HEAD)"
VERSION="$CURRENT_BRANCH-$SHORT_COMMIT:$BUILDDATE"
fi
echo -e "\nbuilding the ZFS Driver version :- $VERSION\n"
VERSION_META="$(cat $GOPATH/src/github.com/openebs/zfs-localpv/BUILDMETA)"
# Determine the arch/os combos we're building for
UNAME=$(uname)
ARCH=$(uname -m)
if [ "$UNAME" != "Linux" -a "$UNAME" != "Darwin" ] ; then
echo "Sorry, this OS is not supported yet."
exit 1
fi
if [ "$UNAME" = "Darwin" ] ; then
XC_OS="darwin"
elif [ "$UNAME" = "Linux" ] ; then
XC_OS="linux"
fi
if [ "${ARCH}" = "i686" ] ; then
XC_ARCH='386'
elif [ "${ARCH}" = "x86_64" ] ; then
XC_ARCH='amd64'
else
echo "Unusable architecture: ${ARCH}"
exit 1
fi
if [ -z "${PNAME}" ];
then
echo "Project name not defined"
exit 1
fi
if [ -z "${CTLNAME}" ];
then
echo "CTLNAME not defined"
exit 1
fi
# Delete the old dir
echo "==> Removing old directory..."
rm -rf bin/${PNAME}/*
mkdir -p bin/${PNAME}/
# If its dev mode, only build for ourself
if [[ "${DEV}" ]]; then
XC_OS=$(go env GOOS)
XC_ARCH=$(go env GOARCH)
fi
# Build!
echo "==> Building ${CTLNAME} using $(go version)... "
GOOS="${XC_OS}"
GOARCH="${XC_ARCH}"
output_name="bin/${PNAME}/"$GOOS"_"$GOARCH"/"$CTLNAME
if [ $GOOS = "windows" ]; then
output_name+='.exe'
fi
env GOOS=$GOOS GOARCH=$GOARCH go build -ldflags \
"-X github.com/openebs/zfs-localpv/pkg/version.GitCommit=${GIT_COMMIT} \
-X main.CtlName='${CTLNAME}' \
-X github.com/openebs/zfs-localpv/pkg/version.Version=${VERSION} \
-X github.com/openebs/zfs-localpv/pkg/version.VersionMeta=${VERSION_META}"\
-o $output_name\
./cmd
echo ""
# Move all the compiled things to the $GOPATH/bin
GOPATH=${GOPATH:-$(go env GOPATH)}
case $(uname) in
CYGWIN*)
GOPATH="$(cygpath $GOPATH)"
;;
esac
OLDIFS=$IFS
IFS=: MAIN_GOPATH=($GOPATH)
IFS=$OLDIFS
# Create the gopath bin if not already available
mkdir -p ${MAIN_GOPATH}/bin/
# Copy our OS/Arch to the bin/ directory
DEV_PLATFORM="./bin/${PNAME}/$(go env GOOS)_$(go env GOARCH)"
for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f); do
cp ${F} bin/${PNAME}/
cp ${F} ${MAIN_GOPATH}/bin/
done
if [[ "x${DEV}" == "x" ]]; then
# Zip and copy to the dist dir
echo "==> Packaging..."
for PLATFORM in $(find ./bin/${PNAME} -mindepth 1 -maxdepth 1 -type d); do
OSARCH=$(basename ${PLATFORM})
echo "--> ${OSARCH}"
pushd "$PLATFORM" >/dev/null 2>&1
zip ../${PNAME}-${OSARCH}.zip ./*
popd >/dev/null 2>&1
done
fi
# Done!
echo
echo "==> Results:"
ls -hl bin/${PNAME}/
|
string = "This is a sample string"
str_list = string.split()
print(str_list) |
<gh_stars>1-10
/*
* Copyright (c) 2018 Ahome' Innovation Technologies. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ait.lienzo.ks.server;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import com.ait.lienzo.ks.client.GetSourceService;
import com.google.gwt.user.server.rpc.RemoteServiceServlet;
@SuppressWarnings("serial")
public class GetSourceServiceImpl extends RemoteServiceServlet implements GetSourceService
{
public GetSourceServiceImpl()
{
}
@Override
public String getSource(final String url) throws IllegalArgumentException
{
if (false == url.startsWith("com/ait/lienzo/ks"))
{
throw new IllegalArgumentException("bad source " + url);
}
String result = "";
try
{
final BufferedReader reader = new BufferedReader(new InputStreamReader(getClass().getClassLoader().getResourceAsStream(url)));
final StringBuilder buffer = new StringBuilder();
String line;
while ((line = reader.readLine()) != null)
{
buffer.append(line);
buffer.append("\n");
}
reader.close();
result = buffer.toString();
}
catch (final Exception e)
{
e.printStackTrace();
}
return result;
}
}
|
public static void main(String[] args) {
for (int i = 0; i < 5; i++) {
System.out.println("Hello World!");
}
} |
<gh_stars>0
resolvers += Resolver.sonatypeRepo("snapshots")
// Current release 0.4.0-M2
addSbtPlugin("org.scala-native" % "sbt-scala-native" % "0.4.0-SNAPSHOT")
addSbtPlugin("com.geirsson" % "sbt-ci-release" % "1.5.3")
|
#!/bin/sh
if [ $# -ne 3 ]; then
echo "Uasage: [App Name] [Target name] [Decoy Name] [Path]"
exit
fi
target_name=$1
decoyPath=$2
outputPath=$3
echo "java -cp $APP_PATH/meshi.jar programs.Pdb2Fasta"
java -cp $APP_PATH/meshi.jar programs.Pdb2Fasta
mv $decoyPath.fasta $outputPath/$target_name.fasta
echo "$APP_SCRIPTS/sequenceAuxCalc.sh $outputPath/$target_name.fasta"
$APP_SCRIPTS/sequenceAuxCalc.sh $outputPath/$target_name.fasta # calculate blast files
if [ -e $outputPath/$target_name.fasta.acc.out ]; then
acc_seq=`cat $outputPath/$target_name.fasta.acc.out | head -2 | tail -1 | wc -m`
acc_pred=`cat $outputPath/$target_name.fasta.acc | wc -m`
if [ $acc_seq -eq $acc_pred ]; then
ln -s $outputPath/$target_name.fasta.acc.out $outputPath/sasaPrediction
else
echo "Error - solvation accessability file $target_name.fasta.acc.out sequence prediction wasn't calculated."
exit
fi
else
echo "Error - solvation accessability file $target_name.fasta.acc.out doesn't exist."
exit
fi
if [ -e $outputPath/$target_name.fasta.ss2 ]; then
ln -s $outputPath/$target_name.fasta.ss2 $outputPath/secondaryStructurePrediction
else
echo "Error - secondary structure prediction file $outputPath/$target_name.fasta.ss2 doesn't exist."
exit
fi
|
struct Node
{
int key;
Node* left, *right;
};
// A utility function to create a new Node
Node* newNode(int key)
{
Node* node = new Node;
node->key = key;
node->left = node->right = NULL;
return node;
}
// A utility function to do inorder traversal of BST
void inorder(Node *root)
{
if (root != NULL)
{
inorder(root->left);
cout << root->key << " ";
inorder(root->right);
}
}
// Function to traverse the binary tree in Depth-First Search (DFS)
// traversal and print out the nodes in occurrence order
void depthFirstSearch(struct Node* node)
{
if (node == NULL)
return;
// First print the data of node
cout << node->key<< " ";
//Then recur on left child
depthFirstSearch(node->left);
//then recur on right child
depthFirstSearch(node->right);
}
// Driver Program to test above functions
int main()
{
/*
struct node *root = newNode(1);
root->left = newNode(2);
root->right = newNode(3);
root->left->left = newNode(4);
root->left->right = newNode(5);
*/
depthFirstSearch(root);
return 0;
} |
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.tapestry5.internal.bindings;
import org.apache.tapestry5.ioc.Location;
/**
* Binding type for literal, immutable values. Literal bindings are {@linkplain org.apache.tapestry5.Binding#isInvariant()
* invariant}; any value provided by a LiteralBinding, even if {@linkplain org.apache.tapestry5.ioc.services.TypeCoercer#coerce(Object,
* Class) coerced}, will be cached aggresively by Tapestry cmponent.
*
* LiteralBindings are often used for literal string values supplied in-line in the component template, but is used
* for many other things as well, any kind of fixed, read-only value.
*/
public class LiteralBinding extends AbstractBinding
{
private final String description;
private final Object value;
public LiteralBinding(Location location, String description, Object value)
{
super(location);
this.description = description;
this.value = value;
}
public Object get()
{
return value;
}
@Override
public String toString()
{
return String.format("LiteralBinding[%s: %s]", description, value);
}
}
|
import React, { Component } from 'react';
import { connect } from 'react-redux';
import Navbar from '../Components/navbar';
import GroceryItems from '../Components/grocery/groceryItems';
import IngredientDetails from '../Components/grocery/ingredientDetails';
import NewGroceryItemDialog from '../Components/grocery/newGroceryItemDialog';
import './Grocery.css';
class GroceryPage extends Component {
constructor(props) {
super(props)
this.state = {
showNewGroceryItemDialog: false
}
}
render() {
return (
<div className="grocery-container">
<Navbar />
<div className="grocery-header">
<h1>Grocery Items</h1>
</div>
<div className="grocery-body">
<button className="grocery-new-button" onClick={() => this.setState({ showNewGroceryItemDialog: true })}>Create New Grocery Item</button>
<NewGroceryItemDialog cancel={() => this.setState({ showNewGroceryItemDialog: false })} visible={this.state.showNewGroceryItemDialog}/>
<GroceryItems />
<IngredientDetails />
</div>
</div>
);
}
}
const mapStateToProps = state => ({
ingredients: state.grocery.ingredients
})
export default connect(mapStateToProps, null)(GroceryPage);
|
CREATE TABLE movie (
id INT(6) UNSIGNED AUTO_INCREMENT PRIMARY KEY,
title VARCHAR(100) NOT NULL,
year INT(4) NOT NULL,
rating DECIMAL(3, 1) NOT NULL,
director VARCHAR(100) NOT NULL
); |
<filename>acmicpc.net/source/10179.cpp
// 10179. 쿠폰
// 2021.03.19
// 수학
#include <iostream>
using namespace std;
int main()
{
int n;
double k;
cin >> n;
for (int i = 0; i < n; i++)
{
cin >> k;
printf("$%.2f\n", k * 0.8);
}
return 0;
}
|
package backups
import "time"
var (
timestamp = "2015-11-12T14:22:42Z"
timeVal, _ = time.Parse(time.RFC3339, timestamp)
)
var getResp = `
{
"backup": {
"created": "` + timestamp + `",
"description": "My Backup",
"id": "61f12fef-edb1-4561-8122-e7c00ef26a82",
"instance_id": "d4603f69-ec7e-4e9b-803f-600b9205576f",
"locationRef": null,
"name": "snapshot",
"parent_id": null,
"size": 100,
"status": "NEW",
"datastore": {
"version": "5.1",
"type": "MySQL",
"version_id": "20000000-0000-0000-0000-000000000002"
},
"updated": "` + timestamp + `"
}
}
`
var createReq = `
{
"backup": {
"description": "My Backup",
"instance": "d4603f69-ec7e-4e9b-803f-600b9205576f",
"name": "snapshot"
}
}
`
var createResp = getResp
var listResp = `
{
"backups": [
{
"status": "COMPLETED",
"updated": "` + timestamp + `",
"description": "Backup from Restored Instance",
"datastore": {
"version": "5.1",
"type": "MySQL",
"version_id": "20000000-0000-0000-0000-000000000002"
},
"id": "87972694-4be2-40f5-83f8-501656e0032a",
"size": 0.141026,
"name": "restored_backup",
"created": "` + timestamp + `",
"instance_id": "29af2cd9-0674-48ab-b87a-b160f00208e6",
"parent_id": null,
"locationRef": "http://localhost/path/to/backup"
}
]
}
`
|
<gh_stars>1-10
import {IbcTxDataUpdateTaskService} from "./ibc_tx_data_update_task.service";
import {Test} from "@nestjs/testing";
import {AppModule} from "../app.module";
describe('IbcTxLatestMigrateTaskService', () => {
let ibcTxDataUpdateTaskService: IbcTxDataUpdateTaskService;
beforeEach(async () => {
const module = await Test.createTestingModule({
imports: [
AppModule
]
}).compile();
ibcTxDataUpdateTaskService = module.get<IbcTxDataUpdateTaskService>(IbcTxDataUpdateTaskService);
})
describe('handleUpdateIbcTx', () => {
it('handleUpdateIbcTx', async () => {
jest.setTimeout(100000000)
await ibcTxDataUpdateTaskService.handleUpdateIbcTx()
// console.log(ibcTxTaskService,'----')
});
});
}) |
def is_anagram(word1, word2):
word1 = word1.lower()
word2 = word2.lower()
return sorted(word1) == sorted(word2) |
<reponame>wayveai/concourse
// Code generated by counterfeiter. DO NOT EDIT.
package workerfakes
import (
"sync"
"code.cloudfoundry.org/lager"
"github.com/concourse/concourse/atc/db"
"github.com/concourse/concourse/atc/worker"
)
type FakeVolumeClient struct {
CreateVolumeStub func(lager.Logger, worker.VolumeSpec, int, string, db.VolumeType) (worker.Volume, error)
createVolumeMutex sync.RWMutex
createVolumeArgsForCall []struct {
arg1 lager.Logger
arg2 worker.VolumeSpec
arg3 int
arg4 string
arg5 db.VolumeType
}
createVolumeReturns struct {
result1 worker.Volume
result2 error
}
createVolumeReturnsOnCall map[int]struct {
result1 worker.Volume
result2 error
}
CreateVolumeForTaskCacheStub func(lager.Logger, worker.VolumeSpec, int, int, string, string) (worker.Volume, error)
createVolumeForTaskCacheMutex sync.RWMutex
createVolumeForTaskCacheArgsForCall []struct {
arg1 lager.Logger
arg2 worker.VolumeSpec
arg3 int
arg4 int
arg5 string
arg6 string
}
createVolumeForTaskCacheReturns struct {
result1 worker.Volume
result2 error
}
createVolumeForTaskCacheReturnsOnCall map[int]struct {
result1 worker.Volume
result2 error
}
FindOrCreateCOWVolumeForContainerStub func(lager.Logger, worker.VolumeSpec, db.CreatingContainer, worker.Volume, int, string) (worker.Volume, error)
findOrCreateCOWVolumeForContainerMutex sync.RWMutex
findOrCreateCOWVolumeForContainerArgsForCall []struct {
arg1 lager.Logger
arg2 worker.VolumeSpec
arg3 db.CreatingContainer
arg4 worker.Volume
arg5 int
arg6 string
}
findOrCreateCOWVolumeForContainerReturns struct {
result1 worker.Volume
result2 error
}
findOrCreateCOWVolumeForContainerReturnsOnCall map[int]struct {
result1 worker.Volume
result2 error
}
FindOrCreateVolumeForBaseResourceTypeStub func(lager.Logger, worker.VolumeSpec, int, string) (worker.Volume, error)
findOrCreateVolumeForBaseResourceTypeMutex sync.RWMutex
findOrCreateVolumeForBaseResourceTypeArgsForCall []struct {
arg1 lager.Logger
arg2 worker.VolumeSpec
arg3 int
arg4 string
}
findOrCreateVolumeForBaseResourceTypeReturns struct {
result1 worker.Volume
result2 error
}
findOrCreateVolumeForBaseResourceTypeReturnsOnCall map[int]struct {
result1 worker.Volume
result2 error
}
FindOrCreateVolumeForContainerStub func(lager.Logger, worker.VolumeSpec, db.CreatingContainer, int, string) (worker.Volume, error)
findOrCreateVolumeForContainerMutex sync.RWMutex
findOrCreateVolumeForContainerArgsForCall []struct {
arg1 lager.Logger
arg2 worker.VolumeSpec
arg3 db.CreatingContainer
arg4 int
arg5 string
}
findOrCreateVolumeForContainerReturns struct {
result1 worker.Volume
result2 error
}
findOrCreateVolumeForContainerReturnsOnCall map[int]struct {
result1 worker.Volume
result2 error
}
FindOrCreateVolumeForResourceCertsStub func(lager.Logger) (worker.Volume, bool, error)
findOrCreateVolumeForResourceCertsMutex sync.RWMutex
findOrCreateVolumeForResourceCertsArgsForCall []struct {
arg1 lager.Logger
}
findOrCreateVolumeForResourceCertsReturns struct {
result1 worker.Volume
result2 bool
result3 error
}
findOrCreateVolumeForResourceCertsReturnsOnCall map[int]struct {
result1 worker.Volume
result2 bool
result3 error
}
FindVolumeForResourceCacheStub func(lager.Logger, db.ResourceCache) (worker.Volume, bool, error)
findVolumeForResourceCacheMutex sync.RWMutex
findVolumeForResourceCacheArgsForCall []struct {
arg1 lager.Logger
arg2 db.ResourceCache
}
findVolumeForResourceCacheReturns struct {
result1 worker.Volume
result2 bool
result3 error
}
findVolumeForResourceCacheReturnsOnCall map[int]struct {
result1 worker.Volume
result2 bool
result3 error
}
FindVolumeForTaskCacheStub func(lager.Logger, int, int, string, string) (worker.Volume, bool, error)
findVolumeForTaskCacheMutex sync.RWMutex
findVolumeForTaskCacheArgsForCall []struct {
arg1 lager.Logger
arg2 int
arg3 int
arg4 string
arg5 string
}
findVolumeForTaskCacheReturns struct {
result1 worker.Volume
result2 bool
result3 error
}
findVolumeForTaskCacheReturnsOnCall map[int]struct {
result1 worker.Volume
result2 bool
result3 error
}
LookupVolumeStub func(lager.Logger, string) (worker.Volume, bool, error)
lookupVolumeMutex sync.RWMutex
lookupVolumeArgsForCall []struct {
arg1 lager.Logger
arg2 string
}
lookupVolumeReturns struct {
result1 worker.Volume
result2 bool
result3 error
}
lookupVolumeReturnsOnCall map[int]struct {
result1 worker.Volume
result2 bool
result3 error
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
func (fake *FakeVolumeClient) CreateVolume(arg1 lager.Logger, arg2 worker.VolumeSpec, arg3 int, arg4 string, arg5 db.VolumeType) (worker.Volume, error) {
fake.createVolumeMutex.Lock()
ret, specificReturn := fake.createVolumeReturnsOnCall[len(fake.createVolumeArgsForCall)]
fake.createVolumeArgsForCall = append(fake.createVolumeArgsForCall, struct {
arg1 lager.Logger
arg2 worker.VolumeSpec
arg3 int
arg4 string
arg5 db.VolumeType
}{arg1, arg2, arg3, arg4, arg5})
stub := fake.CreateVolumeStub
fakeReturns := fake.createVolumeReturns
fake.recordInvocation("CreateVolume", []interface{}{arg1, arg2, arg3, arg4, arg5})
fake.createVolumeMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3, arg4, arg5)
}
if specificReturn {
return ret.result1, ret.result2
}
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeVolumeClient) CreateVolumeCallCount() int {
fake.createVolumeMutex.RLock()
defer fake.createVolumeMutex.RUnlock()
return len(fake.createVolumeArgsForCall)
}
func (fake *FakeVolumeClient) CreateVolumeCalls(stub func(lager.Logger, worker.VolumeSpec, int, string, db.VolumeType) (worker.Volume, error)) {
fake.createVolumeMutex.Lock()
defer fake.createVolumeMutex.Unlock()
fake.CreateVolumeStub = stub
}
func (fake *FakeVolumeClient) CreateVolumeArgsForCall(i int) (lager.Logger, worker.VolumeSpec, int, string, db.VolumeType) {
fake.createVolumeMutex.RLock()
defer fake.createVolumeMutex.RUnlock()
argsForCall := fake.createVolumeArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5
}
func (fake *FakeVolumeClient) CreateVolumeReturns(result1 worker.Volume, result2 error) {
fake.createVolumeMutex.Lock()
defer fake.createVolumeMutex.Unlock()
fake.CreateVolumeStub = nil
fake.createVolumeReturns = struct {
result1 worker.Volume
result2 error
}{result1, result2}
}
func (fake *FakeVolumeClient) CreateVolumeReturnsOnCall(i int, result1 worker.Volume, result2 error) {
fake.createVolumeMutex.Lock()
defer fake.createVolumeMutex.Unlock()
fake.CreateVolumeStub = nil
if fake.createVolumeReturnsOnCall == nil {
fake.createVolumeReturnsOnCall = make(map[int]struct {
result1 worker.Volume
result2 error
})
}
fake.createVolumeReturnsOnCall[i] = struct {
result1 worker.Volume
result2 error
}{result1, result2}
}
func (fake *FakeVolumeClient) CreateVolumeForTaskCache(arg1 lager.Logger, arg2 worker.VolumeSpec, arg3 int, arg4 int, arg5 string, arg6 string) (worker.Volume, error) {
fake.createVolumeForTaskCacheMutex.Lock()
ret, specificReturn := fake.createVolumeForTaskCacheReturnsOnCall[len(fake.createVolumeForTaskCacheArgsForCall)]
fake.createVolumeForTaskCacheArgsForCall = append(fake.createVolumeForTaskCacheArgsForCall, struct {
arg1 lager.Logger
arg2 worker.VolumeSpec
arg3 int
arg4 int
arg5 string
arg6 string
}{arg1, arg2, arg3, arg4, arg5, arg6})
stub := fake.CreateVolumeForTaskCacheStub
fakeReturns := fake.createVolumeForTaskCacheReturns
fake.recordInvocation("CreateVolumeForTaskCache", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6})
fake.createVolumeForTaskCacheMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3, arg4, arg5, arg6)
}
if specificReturn {
return ret.result1, ret.result2
}
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeVolumeClient) CreateVolumeForTaskCacheCallCount() int {
fake.createVolumeForTaskCacheMutex.RLock()
defer fake.createVolumeForTaskCacheMutex.RUnlock()
return len(fake.createVolumeForTaskCacheArgsForCall)
}
func (fake *FakeVolumeClient) CreateVolumeForTaskCacheCalls(stub func(lager.Logger, worker.VolumeSpec, int, int, string, string) (worker.Volume, error)) {
fake.createVolumeForTaskCacheMutex.Lock()
defer fake.createVolumeForTaskCacheMutex.Unlock()
fake.CreateVolumeForTaskCacheStub = stub
}
func (fake *FakeVolumeClient) CreateVolumeForTaskCacheArgsForCall(i int) (lager.Logger, worker.VolumeSpec, int, int, string, string) {
fake.createVolumeForTaskCacheMutex.RLock()
defer fake.createVolumeForTaskCacheMutex.RUnlock()
argsForCall := fake.createVolumeForTaskCacheArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6
}
func (fake *FakeVolumeClient) CreateVolumeForTaskCacheReturns(result1 worker.Volume, result2 error) {
fake.createVolumeForTaskCacheMutex.Lock()
defer fake.createVolumeForTaskCacheMutex.Unlock()
fake.CreateVolumeForTaskCacheStub = nil
fake.createVolumeForTaskCacheReturns = struct {
result1 worker.Volume
result2 error
}{result1, result2}
}
func (fake *FakeVolumeClient) CreateVolumeForTaskCacheReturnsOnCall(i int, result1 worker.Volume, result2 error) {
fake.createVolumeForTaskCacheMutex.Lock()
defer fake.createVolumeForTaskCacheMutex.Unlock()
fake.CreateVolumeForTaskCacheStub = nil
if fake.createVolumeForTaskCacheReturnsOnCall == nil {
fake.createVolumeForTaskCacheReturnsOnCall = make(map[int]struct {
result1 worker.Volume
result2 error
})
}
fake.createVolumeForTaskCacheReturnsOnCall[i] = struct {
result1 worker.Volume
result2 error
}{result1, result2}
}
func (fake *FakeVolumeClient) FindOrCreateCOWVolumeForContainer(arg1 lager.Logger, arg2 worker.VolumeSpec, arg3 db.CreatingContainer, arg4 worker.Volume, arg5 int, arg6 string) (worker.Volume, error) {
fake.findOrCreateCOWVolumeForContainerMutex.Lock()
ret, specificReturn := fake.findOrCreateCOWVolumeForContainerReturnsOnCall[len(fake.findOrCreateCOWVolumeForContainerArgsForCall)]
fake.findOrCreateCOWVolumeForContainerArgsForCall = append(fake.findOrCreateCOWVolumeForContainerArgsForCall, struct {
arg1 lager.Logger
arg2 worker.VolumeSpec
arg3 db.CreatingContainer
arg4 worker.Volume
arg5 int
arg6 string
}{arg1, arg2, arg3, arg4, arg5, arg6})
stub := fake.FindOrCreateCOWVolumeForContainerStub
fakeReturns := fake.findOrCreateCOWVolumeForContainerReturns
fake.recordInvocation("FindOrCreateCOWVolumeForContainer", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6})
fake.findOrCreateCOWVolumeForContainerMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3, arg4, arg5, arg6)
}
if specificReturn {
return ret.result1, ret.result2
}
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeVolumeClient) FindOrCreateCOWVolumeForContainerCallCount() int {
fake.findOrCreateCOWVolumeForContainerMutex.RLock()
defer fake.findOrCreateCOWVolumeForContainerMutex.RUnlock()
return len(fake.findOrCreateCOWVolumeForContainerArgsForCall)
}
func (fake *FakeVolumeClient) FindOrCreateCOWVolumeForContainerCalls(stub func(lager.Logger, worker.VolumeSpec, db.CreatingContainer, worker.Volume, int, string) (worker.Volume, error)) {
fake.findOrCreateCOWVolumeForContainerMutex.Lock()
defer fake.findOrCreateCOWVolumeForContainerMutex.Unlock()
fake.FindOrCreateCOWVolumeForContainerStub = stub
}
func (fake *FakeVolumeClient) FindOrCreateCOWVolumeForContainerArgsForCall(i int) (lager.Logger, worker.VolumeSpec, db.CreatingContainer, worker.Volume, int, string) {
fake.findOrCreateCOWVolumeForContainerMutex.RLock()
defer fake.findOrCreateCOWVolumeForContainerMutex.RUnlock()
argsForCall := fake.findOrCreateCOWVolumeForContainerArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6
}
func (fake *FakeVolumeClient) FindOrCreateCOWVolumeForContainerReturns(result1 worker.Volume, result2 error) {
fake.findOrCreateCOWVolumeForContainerMutex.Lock()
defer fake.findOrCreateCOWVolumeForContainerMutex.Unlock()
fake.FindOrCreateCOWVolumeForContainerStub = nil
fake.findOrCreateCOWVolumeForContainerReturns = struct {
result1 worker.Volume
result2 error
}{result1, result2}
}
func (fake *FakeVolumeClient) FindOrCreateCOWVolumeForContainerReturnsOnCall(i int, result1 worker.Volume, result2 error) {
fake.findOrCreateCOWVolumeForContainerMutex.Lock()
defer fake.findOrCreateCOWVolumeForContainerMutex.Unlock()
fake.FindOrCreateCOWVolumeForContainerStub = nil
if fake.findOrCreateCOWVolumeForContainerReturnsOnCall == nil {
fake.findOrCreateCOWVolumeForContainerReturnsOnCall = make(map[int]struct {
result1 worker.Volume
result2 error
})
}
fake.findOrCreateCOWVolumeForContainerReturnsOnCall[i] = struct {
result1 worker.Volume
result2 error
}{result1, result2}
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForBaseResourceType(arg1 lager.Logger, arg2 worker.VolumeSpec, arg3 int, arg4 string) (worker.Volume, error) {
fake.findOrCreateVolumeForBaseResourceTypeMutex.Lock()
ret, specificReturn := fake.findOrCreateVolumeForBaseResourceTypeReturnsOnCall[len(fake.findOrCreateVolumeForBaseResourceTypeArgsForCall)]
fake.findOrCreateVolumeForBaseResourceTypeArgsForCall = append(fake.findOrCreateVolumeForBaseResourceTypeArgsForCall, struct {
arg1 lager.Logger
arg2 worker.VolumeSpec
arg3 int
arg4 string
}{arg1, arg2, arg3, arg4})
stub := fake.FindOrCreateVolumeForBaseResourceTypeStub
fakeReturns := fake.findOrCreateVolumeForBaseResourceTypeReturns
fake.recordInvocation("FindOrCreateVolumeForBaseResourceType", []interface{}{arg1, arg2, arg3, arg4})
fake.findOrCreateVolumeForBaseResourceTypeMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3, arg4)
}
if specificReturn {
return ret.result1, ret.result2
}
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForBaseResourceTypeCallCount() int {
fake.findOrCreateVolumeForBaseResourceTypeMutex.RLock()
defer fake.findOrCreateVolumeForBaseResourceTypeMutex.RUnlock()
return len(fake.findOrCreateVolumeForBaseResourceTypeArgsForCall)
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForBaseResourceTypeCalls(stub func(lager.Logger, worker.VolumeSpec, int, string) (worker.Volume, error)) {
fake.findOrCreateVolumeForBaseResourceTypeMutex.Lock()
defer fake.findOrCreateVolumeForBaseResourceTypeMutex.Unlock()
fake.FindOrCreateVolumeForBaseResourceTypeStub = stub
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForBaseResourceTypeArgsForCall(i int) (lager.Logger, worker.VolumeSpec, int, string) {
fake.findOrCreateVolumeForBaseResourceTypeMutex.RLock()
defer fake.findOrCreateVolumeForBaseResourceTypeMutex.RUnlock()
argsForCall := fake.findOrCreateVolumeForBaseResourceTypeArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForBaseResourceTypeReturns(result1 worker.Volume, result2 error) {
fake.findOrCreateVolumeForBaseResourceTypeMutex.Lock()
defer fake.findOrCreateVolumeForBaseResourceTypeMutex.Unlock()
fake.FindOrCreateVolumeForBaseResourceTypeStub = nil
fake.findOrCreateVolumeForBaseResourceTypeReturns = struct {
result1 worker.Volume
result2 error
}{result1, result2}
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForBaseResourceTypeReturnsOnCall(i int, result1 worker.Volume, result2 error) {
fake.findOrCreateVolumeForBaseResourceTypeMutex.Lock()
defer fake.findOrCreateVolumeForBaseResourceTypeMutex.Unlock()
fake.FindOrCreateVolumeForBaseResourceTypeStub = nil
if fake.findOrCreateVolumeForBaseResourceTypeReturnsOnCall == nil {
fake.findOrCreateVolumeForBaseResourceTypeReturnsOnCall = make(map[int]struct {
result1 worker.Volume
result2 error
})
}
fake.findOrCreateVolumeForBaseResourceTypeReturnsOnCall[i] = struct {
result1 worker.Volume
result2 error
}{result1, result2}
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForContainer(arg1 lager.Logger, arg2 worker.VolumeSpec, arg3 db.CreatingContainer, arg4 int, arg5 string) (worker.Volume, error) {
fake.findOrCreateVolumeForContainerMutex.Lock()
ret, specificReturn := fake.findOrCreateVolumeForContainerReturnsOnCall[len(fake.findOrCreateVolumeForContainerArgsForCall)]
fake.findOrCreateVolumeForContainerArgsForCall = append(fake.findOrCreateVolumeForContainerArgsForCall, struct {
arg1 lager.Logger
arg2 worker.VolumeSpec
arg3 db.CreatingContainer
arg4 int
arg5 string
}{arg1, arg2, arg3, arg4, arg5})
stub := fake.FindOrCreateVolumeForContainerStub
fakeReturns := fake.findOrCreateVolumeForContainerReturns
fake.recordInvocation("FindOrCreateVolumeForContainer", []interface{}{arg1, arg2, arg3, arg4, arg5})
fake.findOrCreateVolumeForContainerMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3, arg4, arg5)
}
if specificReturn {
return ret.result1, ret.result2
}
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForContainerCallCount() int {
fake.findOrCreateVolumeForContainerMutex.RLock()
defer fake.findOrCreateVolumeForContainerMutex.RUnlock()
return len(fake.findOrCreateVolumeForContainerArgsForCall)
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForContainerCalls(stub func(lager.Logger, worker.VolumeSpec, db.CreatingContainer, int, string) (worker.Volume, error)) {
fake.findOrCreateVolumeForContainerMutex.Lock()
defer fake.findOrCreateVolumeForContainerMutex.Unlock()
fake.FindOrCreateVolumeForContainerStub = stub
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForContainerArgsForCall(i int) (lager.Logger, worker.VolumeSpec, db.CreatingContainer, int, string) {
fake.findOrCreateVolumeForContainerMutex.RLock()
defer fake.findOrCreateVolumeForContainerMutex.RUnlock()
argsForCall := fake.findOrCreateVolumeForContainerArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForContainerReturns(result1 worker.Volume, result2 error) {
fake.findOrCreateVolumeForContainerMutex.Lock()
defer fake.findOrCreateVolumeForContainerMutex.Unlock()
fake.FindOrCreateVolumeForContainerStub = nil
fake.findOrCreateVolumeForContainerReturns = struct {
result1 worker.Volume
result2 error
}{result1, result2}
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForContainerReturnsOnCall(i int, result1 worker.Volume, result2 error) {
fake.findOrCreateVolumeForContainerMutex.Lock()
defer fake.findOrCreateVolumeForContainerMutex.Unlock()
fake.FindOrCreateVolumeForContainerStub = nil
if fake.findOrCreateVolumeForContainerReturnsOnCall == nil {
fake.findOrCreateVolumeForContainerReturnsOnCall = make(map[int]struct {
result1 worker.Volume
result2 error
})
}
fake.findOrCreateVolumeForContainerReturnsOnCall[i] = struct {
result1 worker.Volume
result2 error
}{result1, result2}
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForResourceCerts(arg1 lager.Logger) (worker.Volume, bool, error) {
fake.findOrCreateVolumeForResourceCertsMutex.Lock()
ret, specificReturn := fake.findOrCreateVolumeForResourceCertsReturnsOnCall[len(fake.findOrCreateVolumeForResourceCertsArgsForCall)]
fake.findOrCreateVolumeForResourceCertsArgsForCall = append(fake.findOrCreateVolumeForResourceCertsArgsForCall, struct {
arg1 lager.Logger
}{arg1})
stub := fake.FindOrCreateVolumeForResourceCertsStub
fakeReturns := fake.findOrCreateVolumeForResourceCertsReturns
fake.recordInvocation("FindOrCreateVolumeForResourceCerts", []interface{}{arg1})
fake.findOrCreateVolumeForResourceCertsMutex.Unlock()
if stub != nil {
return stub(arg1)
}
if specificReturn {
return ret.result1, ret.result2, ret.result3
}
return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForResourceCertsCallCount() int {
fake.findOrCreateVolumeForResourceCertsMutex.RLock()
defer fake.findOrCreateVolumeForResourceCertsMutex.RUnlock()
return len(fake.findOrCreateVolumeForResourceCertsArgsForCall)
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForResourceCertsCalls(stub func(lager.Logger) (worker.Volume, bool, error)) {
fake.findOrCreateVolumeForResourceCertsMutex.Lock()
defer fake.findOrCreateVolumeForResourceCertsMutex.Unlock()
fake.FindOrCreateVolumeForResourceCertsStub = stub
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForResourceCertsArgsForCall(i int) lager.Logger {
fake.findOrCreateVolumeForResourceCertsMutex.RLock()
defer fake.findOrCreateVolumeForResourceCertsMutex.RUnlock()
argsForCall := fake.findOrCreateVolumeForResourceCertsArgsForCall[i]
return argsForCall.arg1
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForResourceCertsReturns(result1 worker.Volume, result2 bool, result3 error) {
fake.findOrCreateVolumeForResourceCertsMutex.Lock()
defer fake.findOrCreateVolumeForResourceCertsMutex.Unlock()
fake.FindOrCreateVolumeForResourceCertsStub = nil
fake.findOrCreateVolumeForResourceCertsReturns = struct {
result1 worker.Volume
result2 bool
result3 error
}{result1, result2, result3}
}
func (fake *FakeVolumeClient) FindOrCreateVolumeForResourceCertsReturnsOnCall(i int, result1 worker.Volume, result2 bool, result3 error) {
fake.findOrCreateVolumeForResourceCertsMutex.Lock()
defer fake.findOrCreateVolumeForResourceCertsMutex.Unlock()
fake.FindOrCreateVolumeForResourceCertsStub = nil
if fake.findOrCreateVolumeForResourceCertsReturnsOnCall == nil {
fake.findOrCreateVolumeForResourceCertsReturnsOnCall = make(map[int]struct {
result1 worker.Volume
result2 bool
result3 error
})
}
fake.findOrCreateVolumeForResourceCertsReturnsOnCall[i] = struct {
result1 worker.Volume
result2 bool
result3 error
}{result1, result2, result3}
}
func (fake *FakeVolumeClient) FindVolumeForResourceCache(arg1 lager.Logger, arg2 db.ResourceCache) (worker.Volume, bool, error) {
fake.findVolumeForResourceCacheMutex.Lock()
ret, specificReturn := fake.findVolumeForResourceCacheReturnsOnCall[len(fake.findVolumeForResourceCacheArgsForCall)]
fake.findVolumeForResourceCacheArgsForCall = append(fake.findVolumeForResourceCacheArgsForCall, struct {
arg1 lager.Logger
arg2 db.ResourceCache
}{arg1, arg2})
stub := fake.FindVolumeForResourceCacheStub
fakeReturns := fake.findVolumeForResourceCacheReturns
fake.recordInvocation("FindVolumeForResourceCache", []interface{}{arg1, arg2})
fake.findVolumeForResourceCacheMutex.Unlock()
if stub != nil {
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1, ret.result2, ret.result3
}
return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3
}
func (fake *FakeVolumeClient) FindVolumeForResourceCacheCallCount() int {
fake.findVolumeForResourceCacheMutex.RLock()
defer fake.findVolumeForResourceCacheMutex.RUnlock()
return len(fake.findVolumeForResourceCacheArgsForCall)
}
func (fake *FakeVolumeClient) FindVolumeForResourceCacheCalls(stub func(lager.Logger, db.ResourceCache) (worker.Volume, bool, error)) {
fake.findVolumeForResourceCacheMutex.Lock()
defer fake.findVolumeForResourceCacheMutex.Unlock()
fake.FindVolumeForResourceCacheStub = stub
}
func (fake *FakeVolumeClient) FindVolumeForResourceCacheArgsForCall(i int) (lager.Logger, db.ResourceCache) {
fake.findVolumeForResourceCacheMutex.RLock()
defer fake.findVolumeForResourceCacheMutex.RUnlock()
argsForCall := fake.findVolumeForResourceCacheArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2
}
func (fake *FakeVolumeClient) FindVolumeForResourceCacheReturns(result1 worker.Volume, result2 bool, result3 error) {
fake.findVolumeForResourceCacheMutex.Lock()
defer fake.findVolumeForResourceCacheMutex.Unlock()
fake.FindVolumeForResourceCacheStub = nil
fake.findVolumeForResourceCacheReturns = struct {
result1 worker.Volume
result2 bool
result3 error
}{result1, result2, result3}
}
func (fake *FakeVolumeClient) FindVolumeForResourceCacheReturnsOnCall(i int, result1 worker.Volume, result2 bool, result3 error) {
fake.findVolumeForResourceCacheMutex.Lock()
defer fake.findVolumeForResourceCacheMutex.Unlock()
fake.FindVolumeForResourceCacheStub = nil
if fake.findVolumeForResourceCacheReturnsOnCall == nil {
fake.findVolumeForResourceCacheReturnsOnCall = make(map[int]struct {
result1 worker.Volume
result2 bool
result3 error
})
}
fake.findVolumeForResourceCacheReturnsOnCall[i] = struct {
result1 worker.Volume
result2 bool
result3 error
}{result1, result2, result3}
}
func (fake *FakeVolumeClient) FindVolumeForTaskCache(arg1 lager.Logger, arg2 int, arg3 int, arg4 string, arg5 string) (worker.Volume, bool, error) {
fake.findVolumeForTaskCacheMutex.Lock()
ret, specificReturn := fake.findVolumeForTaskCacheReturnsOnCall[len(fake.findVolumeForTaskCacheArgsForCall)]
fake.findVolumeForTaskCacheArgsForCall = append(fake.findVolumeForTaskCacheArgsForCall, struct {
arg1 lager.Logger
arg2 int
arg3 int
arg4 string
arg5 string
}{arg1, arg2, arg3, arg4, arg5})
stub := fake.FindVolumeForTaskCacheStub
fakeReturns := fake.findVolumeForTaskCacheReturns
fake.recordInvocation("FindVolumeForTaskCache", []interface{}{arg1, arg2, arg3, arg4, arg5})
fake.findVolumeForTaskCacheMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3, arg4, arg5)
}
if specificReturn {
return ret.result1, ret.result2, ret.result3
}
return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3
}
func (fake *FakeVolumeClient) FindVolumeForTaskCacheCallCount() int {
fake.findVolumeForTaskCacheMutex.RLock()
defer fake.findVolumeForTaskCacheMutex.RUnlock()
return len(fake.findVolumeForTaskCacheArgsForCall)
}
func (fake *FakeVolumeClient) FindVolumeForTaskCacheCalls(stub func(lager.Logger, int, int, string, string) (worker.Volume, bool, error)) {
fake.findVolumeForTaskCacheMutex.Lock()
defer fake.findVolumeForTaskCacheMutex.Unlock()
fake.FindVolumeForTaskCacheStub = stub
}
func (fake *FakeVolumeClient) FindVolumeForTaskCacheArgsForCall(i int) (lager.Logger, int, int, string, string) {
fake.findVolumeForTaskCacheMutex.RLock()
defer fake.findVolumeForTaskCacheMutex.RUnlock()
argsForCall := fake.findVolumeForTaskCacheArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5
}
func (fake *FakeVolumeClient) FindVolumeForTaskCacheReturns(result1 worker.Volume, result2 bool, result3 error) {
fake.findVolumeForTaskCacheMutex.Lock()
defer fake.findVolumeForTaskCacheMutex.Unlock()
fake.FindVolumeForTaskCacheStub = nil
fake.findVolumeForTaskCacheReturns = struct {
result1 worker.Volume
result2 bool
result3 error
}{result1, result2, result3}
}
func (fake *FakeVolumeClient) FindVolumeForTaskCacheReturnsOnCall(i int, result1 worker.Volume, result2 bool, result3 error) {
fake.findVolumeForTaskCacheMutex.Lock()
defer fake.findVolumeForTaskCacheMutex.Unlock()
fake.FindVolumeForTaskCacheStub = nil
if fake.findVolumeForTaskCacheReturnsOnCall == nil {
fake.findVolumeForTaskCacheReturnsOnCall = make(map[int]struct {
result1 worker.Volume
result2 bool
result3 error
})
}
fake.findVolumeForTaskCacheReturnsOnCall[i] = struct {
result1 worker.Volume
result2 bool
result3 error
}{result1, result2, result3}
}
func (fake *FakeVolumeClient) LookupVolume(arg1 lager.Logger, arg2 string) (worker.Volume, bool, error) {
fake.lookupVolumeMutex.Lock()
ret, specificReturn := fake.lookupVolumeReturnsOnCall[len(fake.lookupVolumeArgsForCall)]
fake.lookupVolumeArgsForCall = append(fake.lookupVolumeArgsForCall, struct {
arg1 lager.Logger
arg2 string
}{arg1, arg2})
stub := fake.LookupVolumeStub
fakeReturns := fake.lookupVolumeReturns
fake.recordInvocation("LookupVolume", []interface{}{arg1, arg2})
fake.lookupVolumeMutex.Unlock()
if stub != nil {
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1, ret.result2, ret.result3
}
return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3
}
func (fake *FakeVolumeClient) LookupVolumeCallCount() int {
fake.lookupVolumeMutex.RLock()
defer fake.lookupVolumeMutex.RUnlock()
return len(fake.lookupVolumeArgsForCall)
}
func (fake *FakeVolumeClient) LookupVolumeCalls(stub func(lager.Logger, string) (worker.Volume, bool, error)) {
fake.lookupVolumeMutex.Lock()
defer fake.lookupVolumeMutex.Unlock()
fake.LookupVolumeStub = stub
}
func (fake *FakeVolumeClient) LookupVolumeArgsForCall(i int) (lager.Logger, string) {
fake.lookupVolumeMutex.RLock()
defer fake.lookupVolumeMutex.RUnlock()
argsForCall := fake.lookupVolumeArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2
}
func (fake *FakeVolumeClient) LookupVolumeReturns(result1 worker.Volume, result2 bool, result3 error) {
fake.lookupVolumeMutex.Lock()
defer fake.lookupVolumeMutex.Unlock()
fake.LookupVolumeStub = nil
fake.lookupVolumeReturns = struct {
result1 worker.Volume
result2 bool
result3 error
}{result1, result2, result3}
}
func (fake *FakeVolumeClient) LookupVolumeReturnsOnCall(i int, result1 worker.Volume, result2 bool, result3 error) {
fake.lookupVolumeMutex.Lock()
defer fake.lookupVolumeMutex.Unlock()
fake.LookupVolumeStub = nil
if fake.lookupVolumeReturnsOnCall == nil {
fake.lookupVolumeReturnsOnCall = make(map[int]struct {
result1 worker.Volume
result2 bool
result3 error
})
}
fake.lookupVolumeReturnsOnCall[i] = struct {
result1 worker.Volume
result2 bool
result3 error
}{result1, result2, result3}
}
func (fake *FakeVolumeClient) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.createVolumeMutex.RLock()
defer fake.createVolumeMutex.RUnlock()
fake.createVolumeForTaskCacheMutex.RLock()
defer fake.createVolumeForTaskCacheMutex.RUnlock()
fake.findOrCreateCOWVolumeForContainerMutex.RLock()
defer fake.findOrCreateCOWVolumeForContainerMutex.RUnlock()
fake.findOrCreateVolumeForBaseResourceTypeMutex.RLock()
defer fake.findOrCreateVolumeForBaseResourceTypeMutex.RUnlock()
fake.findOrCreateVolumeForContainerMutex.RLock()
defer fake.findOrCreateVolumeForContainerMutex.RUnlock()
fake.findOrCreateVolumeForResourceCertsMutex.RLock()
defer fake.findOrCreateVolumeForResourceCertsMutex.RUnlock()
fake.findVolumeForResourceCacheMutex.RLock()
defer fake.findVolumeForResourceCacheMutex.RUnlock()
fake.findVolumeForTaskCacheMutex.RLock()
defer fake.findVolumeForTaskCacheMutex.RUnlock()
fake.lookupVolumeMutex.RLock()
defer fake.lookupVolumeMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
for key, value := range fake.invocations {
copiedInvocations[key] = value
}
return copiedInvocations
}
func (fake *FakeVolumeClient) recordInvocation(key string, args []interface{}) {
fake.invocationsMutex.Lock()
defer fake.invocationsMutex.Unlock()
if fake.invocations == nil {
fake.invocations = map[string][][]interface{}{}
}
if fake.invocations[key] == nil {
fake.invocations[key] = [][]interface{}{}
}
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ worker.VolumeClient = new(FakeVolumeClient)
|
from . import LOBDeepPP_model
from . import LOBDeepPP_params_files
class StockPricePredictor:
def __init__(self, model_path, params_path):
self.model_path = model_path
self.params_path = params_path
self.model = None
self.parameters = None
def load_model(self):
try:
self.model = LOBDeepPP_model.load_model(self.model_path)
print("Model loaded successfully.")
except Exception as e:
print(f"Error loading model: {e}")
def load_parameters(self):
try:
self.parameters = LOBDeepPP_params_files.load_parameters(self.params_path)
print("Parameters loaded successfully.")
except Exception as e:
print(f"Error loading parameters: {e}")
def predict_price_movement(self, order_book_data):
if self.model is None:
print("Model not loaded. Please load the model first.")
return
try:
prediction = self.model.predict(order_book_data)
# Perform post-processing and return the predicted movement
return "up" # Replace with actual prediction logic
except Exception as e:
print(f"Error predicting price movement: {e}")
def get_model_parameters(self):
if self.parameters is None:
print("Parameters not loaded. Please load the parameters first.")
return
return self.parameters |
import urllib.parse
url = 'https://www.example.com/path/to/file'
parsed_url = urllib.parse.urlparse(url)
# Get the protocol
protocol = parsed_url.scheme
# Get the domain name
domain_name = parsed_url.netloc
# Get the path
path = parsed_url.path
# Print the results
print('Protocol:', protocol)
print('Domain name:', domain_name)
print('Path:', path)
# Output:
# Protocol: https
# Domain name: www.example.com
# Path: /path/to/file |
# Turn on a case-insensitive matching (-s set nocasematch)
opt=$1
echo "$opt"
case $opt in
[lL][Ii][nN][uU][Xx])
echo "BitBucket Running on Linux..."
sudo cp -xp InSightsBitBucketAgentAllbranch.sh /etc/init.d/InSightsBitBucketAgentAllbranch
sudo chmod +x /etc/init.d/InSightsBitBucketAgentAllbranch
sudo chkconfig InSightsBitBucketAgentAllbranch on
sudo service InSightsBitBucketAgentAllbranch status
sudo service InSightsBitBucketAgentAllbranch stop
sudo service InSightsBitBucketAgentAllbranch status
sudo service InSightsBitBucketAgentAllbranch start
sudo service InSightsBitBucketAgentAllbranch status
echo "Service installaton steps completed"
;;
[uU][bB][uU][nN][tT][uU])
echo "BitBucket Running on Ubuntu..."
sudo cp -xp InSightsBitBucketAgentAllbranch.service /etc/systemd/system
sudo systemctl enable InSightsBitBucketAgentAllbranch
sudo systemctl start InSightsBitBucketAgentAllbranch
echo "Service installaton steps completed"
;;
centos)
echo "BitBucket Running on centso..."
;;
*)
echo "Please provide correct OS input"
esac |
#!/bin/bash
set -Eeuo pipefail
if [[ -z "${CIRCLE_TAG-}" ]]; then
echo "# This is not a git tag, reverting to the git hash"
echo "export RELEASE_NAME=${CIRCLE_SHA1}"
echo "export DOCKER_FULL_TAG=${CIRCLE_SHA1}"
echo "export DOCKER_SHORT_TAG=${CIRCLE_SHA1}"
echo "export GORELEASER_CURRENT_TAG=${CIRCLE_SHA1}"
exit 0
fi
release_list=$(curl -s "https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/releases")
release_name=$(jq -c -r '.[] | select(.tag_name == "'"${CIRCLE_TAG}"'") | .name' <(echo "${release_list}"))
if [[ ${release_name} == "${CIRCLE_TAG}"* ]]; then
echo "export RELEASE_NAME=${release_name}"
echo "export DOCKER_FULL_TAG=$(echo "${CIRCLE_TAG}" | tr '+' '_')"
echo "export DOCKER_SHORT_TAG=$(echo "${CIRCLE_TAG}" | cut -d '+' -f1)"
echo "export GORELEASER_CURRENT_TAG=$(echo "${CIRCLE_TAG}" | cut -d '+' -f1)"
else
echo "Exected the release title to contain the git tag which was not the case!"
printf "Release title:\t\t%s\n" "${release_name}"
printf "Git tag:\t\t%s\n" "${CIRCLE_TAG}"
printf "Release:\t\t%s\n" "$(jq -r '.[] | select(.tag_name == "'"${CIRCLE_TAG}"'") | .' <(echo "${release_list}"))"
exit 1
fi
|
// Fake database for demo
const db = {};
const get = (username, key) => {
db[username] = db[username] || {};
return db[username][key];
};
const set = (username, key, value) => {
db[username] = db[username] || {};
db[username][key] = value;
};
module.exports = {
get,
set,
};
|
#!/bin/bash
. ~/.local/bin/env/configureJvmEnv.sh
. ~/.local/bin/env/configureN.sh
. ~/.cargo/env
. ~/.local/bin/env/configureDeno.sh
echo -e "\n\nListing software versions:"
echo -e "\nOpenVSCode Server: "
grep version ~/.local/openvscode-server/latest/package.json
echo -e "\ndocker:"
docker --version
docker --help | grep compose
echo -e "\njava:"
java --version
javac --version
mvn --version
echo -e "\nnode:"
echo "n $(n --version)"
echo "node $(node --version)"
echo "npm $(npm --version)"
echo "tsc $(tsc --version)"
echo -e "\nrust:"
rustup --version
rustc --version
cargo --version
echo -e "\ndeno:"
deno --version
echo -e "\nApps:"
git --version
echo "virt-manager $(virt-manager --version)"
firefox --version
google-chrome --version
|
import * as React from "react";
import styled from "styled-components";
type Headers = "h1" | "h2" | "h3";
type OtherTypes = "p" | "span";
type TextType = Headers | OtherTypes;
interface ITextProps {
children: React.ReactChild | React.ReactChild[];
type: TextType;
fontWeight?: "normal" | "bold";
fontStyle?: "italic";
color?: string;
className?: string;
}
const Text = (props: ITextProps) => {
if (props.type === "h1") {
return <h1 className={props.className}>{props.children}</h1>;
} else if (props.type === "h2") {
return <h2 className={props.className}>{props.children}</h2>;
} else if (props.type === "h3") {
return <h3 className={props.className}>{props.children}</h3>;
} else if (props.type === "p") {
return <p className={props.className}>{props.children}</p>;
} else if (props.type === "span") {
return <span className={props.className}>{props.children}</span>;
}
};
function calculateFontWeight(props: ITextProps) {
if (props.fontWeight === "bold") {
return 700;
} else if (
props.type === "h1" ||
props.type === "h2" ||
props.type === "h3"
) {
return 200;
} else {
return 400;
}
}
const StyledText = styled<ITextProps>(Text)`
font-weight: ${props => calculateFontWeight(props)};
font-style: ${props =>
props.fontStyle !== null ? props.fontStyle : "normal"};
color: ${props =>
props.color !== null ? props.color : "hsla(0, 0 %, 0 %, 0.7)"};
`;
export default StyledText;
|
#include "catch.hpp"
#include "test_helpers.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test mix of updates inserts and deletes", "[update]") {
unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db), con2(db);
REQUIRE_NO_FAIL(con.Query("CREATE TABLE test (a INTEGER);"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO test VALUES (1), (2), (3);"));
result = con.Query("SELECT SUM(a) FROM test");
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(6)}));
REQUIRE_NO_FAIL(con.Query("BEGIN TRANSACTION"));
// append from con2
REQUIRE_NO_FAIL(con2.Query("INSERT INTO test VALUES (4), (5), (6);"));
result = con.Query("SELECT SUM(a) FROM test");
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(6)}));
result = con2.Query("SELECT SUM(a) FROM test");
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(21)}));
// delete from con2
REQUIRE_NO_FAIL(con2.Query("DELETE FROM test WHERE a < 4"));
result = con.Query("SELECT SUM(a) FROM test");
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(6)}));
result = con2.Query("SELECT SUM(a) FROM test");
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(15)}));
// update from con2
REQUIRE_NO_FAIL(con2.Query("UPDATE test SET a=a-3"));
result = con.Query("SELECT SUM(a) FROM test");
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(6)}));
result = con2.Query("SELECT SUM(a) FROM test");
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(6)}));
// now commit
REQUIRE_NO_FAIL(con.Query("COMMIT"));
result = con.Query("SELECT SUM(a) FROM test");
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(6)}));
result = con2.Query("SELECT SUM(a) FROM test");
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(6)}));
}
TEST_CASE("Test update and delete of the same tuple", "[transactions]") {
unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db), con2(db);
// on a normal table, we can update and delete the same tuple concurrently without a conflict
REQUIRE_NO_FAIL(con.Query("CREATE TABLE test (a INTEGER);"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO test VALUES (1), (2), (3);"));
REQUIRE_NO_FAIL(con.Query("BEGIN TRANSACTION;"));
REQUIRE_NO_FAIL(con2.Query("BEGIN TRANSACTION;"));
REQUIRE_NO_FAIL(con.Query("UPDATE test SET a=a+1;"));
REQUIRE_NO_FAIL(con2.Query("DELETE FROM test"));
result = con.Query("SELECT * FROM test ORDER BY a");
REQUIRE(CHECK_COLUMN(result, 0, {2, 3, 4}));
result = con2.Query("SELECT * FROM test ORDER BY a");
REQUIRE(CHECK_COLUMN(result, 0, {}));
REQUIRE_NO_FAIL(con.Query("COMMIT;"));
REQUIRE_NO_FAIL(con2.Query("COMMIT;"));
REQUIRE_NO_FAIL(con.Query("DROP TABLE test;"));
}
|
<reponame>andriykuba/reactivemongo-shortcuts-play-json
package com.github.andriykuba.play.reactivemongo.shortcuts
import scala.concurrent.Future
import scala.concurrent.ExecutionContext
import scala.concurrent.Await
import scala.concurrent.duration._
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.Configuration
import play.api.libs.json.Json
import play.api.libs.json.JsObject
import play.modules.reactivemongo.ReactiveMongoApi
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.scalatest.mockito.MockitoSugar
import org.scalatestplus.play.OneAppPerSuite
import org.mongodb.scala.MongoClient
import org.mongodb.scala.MongoDatabase
import org.mongodb.scala.MongoCollection
import org.mongodb.scala.Document
import org.mongodb.scala.Completed
import reactivemongo.bson.BSONDocument
import com.github.simplyscala.MongoEmbedDatabase
import com.github.simplyscala.MongodProps
import com.github.andriykuba.play.reactivemongo.shortcuts.exceptions.NotUniqueDocumentException
import com.github.andriykuba.play.reactivemongo.shortcuts.Collection.Folder
import com.github.andriykuba.play.reactivemongo.shortcuts.Collection.FolderM
import com.github.andriykuba.play.reactivemongo.shortcuts.exceptions.FieldNotFoundException
import reactivemongo.core.actors.Exceptions.ClosedException
import com.github.andriykuba.play.reactivemongo.shortcuts.exceptions.DocumentAlreadyExists
@RunWith(classOf[JUnitRunner])
class CollectionTest
extends FlatSpec
with Matchers
with MockitoSugar
with MongoEmbedDatabase
with BeforeAndAfterAll
with OneAppPerSuite{
val testMongoProt = 12345
val testDatabaseName = "shortcuts"
val testCollectionName = "app.users"
val testTimeout = 15 seconds
lazy val injector =
new GuiceApplicationBuilder()
.configure(Map(
"mongodb.uri" ->
("mongodb://localhost:" + testMongoProt.toString + "/" + testDatabaseName),
"mongo-async-driver.akka.loglevel" -> "WARNING"))
.bindings(new play.modules.reactivemongo.ReactiveMongoModule)
.injector
implicit lazy val mongo = injector.instanceOf[ReactiveMongoApi]
implicit lazy val context = injector.instanceOf[ExecutionContext]
object TestCollection extends Collection{
val collectionName = testCollectionName
}
var mongoProps: MongodProps = null
override def beforeAll() = {
mongoProps = mongoStart(testMongoProt)
prepareTestData()
}
override def afterAll() = {
mongoStop(mongoProps)
}
def prepareTestData() = {
val mongoClient: MongoClient =
MongoClient("mongodb://localhost:" + testMongoProt.toString)
val database: MongoDatabase =
mongoClient.getDatabase(testDatabaseName)
val collection: MongoCollection[Document] =
database.getCollection(testCollectionName);
val documents = (1 to 100) map { i: Int => {
val even = i % 2 == 0
Document(
"name" -> ("name_" + i.toString),
"even" -> even)
}}
collection.insertMany(documents).subscribe(
(res: Completed) => Unit,
(e: Throwable) => e.printStackTrace(),
() => mongoClient.close()
)
}
"Shorcuts" should "extend collection" in {
object App extends Collection {
val collectionName = "app"
}
App.collectionName should be ("app")
}
it should "define sub collection name" in {
object App extends Collection {
val collectionName = "app"
}
object Users extends Collection{
val collectionName = App.defineSubCollectionName("users")
}
Users.collectionName should be ("app.users")
}
it should "get collection" in {
val result = for{
c <- TestCollection.collection()
count <- c.count()
} yield {
c.name should be (testCollectionName)
count should be (100)
}
Await.result(result, testTimeout)
}
it should "find all documents" in
Await.result(
TestCollection
.all(Json.obj("even" -> true))
.map(r => r.size should be (50)),
testTimeout)
it should "find a document" in
Await.result(
TestCollection
.one(Json.obj("name" -> "name_15"))
.map(r => r.isDefined should be (true)),
testTimeout)
it should "not find a document" in
Await.result(
TestCollection
.one(Json.obj("name" -> "name_150"))
.map(r => r.isDefined should be (false)),
testTimeout)
it should "throw NotUniqueDocumentException exception" in {
val exception = intercept[NotUniqueDocumentException]{
Await.result(
TestCollection
.one(Json.obj("even" -> true))
.map(r => r),
testTimeout)
}
exception.getMessage should equal ("There are 50 documents.")
}
it should "find a first document" in
Await.result(
TestCollection
.first(Json.obj("name" -> "name_15"))
.map(r => r.isDefined should be (true)),
testTimeout)
it should "not find a first document" in
Await.result(
TestCollection
.first(Json.obj("name" -> "name_150"))
.map(r => r.isDefined should be (false)),
testTimeout)
it should "find a first document and not thrown an exceptin" in
Await.result(
TestCollection
.first(Json.obj("even" -> true))
.map(r => r.isDefined should be (true)),
testTimeout)
it should "fold the collection" in {
val folder = Folder(0, (count: Int, doc: JsObject) => {
count + 1
})
Await.result(
TestCollection
.fold(Json.obj(), folder)
.map(r => r should be (100)),
testTimeout)
}
it should "fold the collection asynchroniously" in {
val folderM = FolderM(0, (count: Int, doc: JsObject) => {
Future(count + 1)
})
Await.result(
TestCollection
.foldM(Json.obj(), folderM)
.map(r => r should be (100)),
testTimeout)
}
it should "get an optional field from a document" in
Await.result(
TestCollection
.fieldOpt[Boolean](Json.obj("name"->"name_2"), "even")
.map(r => r.get should be (true)),
testTimeout)
it should "not get an field value if filed is not exist" in
Await.result(
TestCollection
.fieldOpt[Boolean](Json.obj("name"->"name_2"), "fake")
.map(r => r.isDefined should be (false)),
testTimeout)
it should "thrown an error if more than one document exist" in {
val exception = intercept[NotUniqueDocumentException]{
Await.result(
TestCollection
.fieldOpt[Boolean](Json.obj("even"->true), "name")
.map(r => r),
testTimeout)
}
exception.getMessage should equal ("There are 50 documents.")
}
it should "get a field from a document" in
Await.result(
TestCollection
.field[Boolean](Json.obj("name"->"name_2"), "even")
.map(r => r should be (true)),
testTimeout)
it should "thrown an error if field is not present" in {
val exception = intercept[FieldNotFoundException]{
Await.result(
TestCollection
.field[Boolean](Json.obj("name"->"name_2"), "fake")
.map(r => r),
testTimeout)
}
exception.getMessage should equal ("No such field: fake")
}
it should "get a string field from a document" in
Await.result(
TestCollection
.fieldStringOrEmpty(Json.obj("name"->"name_2"), "name")
.map(r => r should be ("name_2")),
testTimeout)
it should "get an empty string if field is not present" in
Await.result(
TestCollection
.fieldStringOrEmpty(Json.obj("name"->"name_2"), "fake")
.map(r => r should be ("")),
testTimeout)
it should "update only one document" in
Await.result(
TestCollection
.update(Json.obj("even"->true), Json.obj(
"$set" -> Json.obj("odd" -> false)))
.flatMap(r => {
TestCollection
.all(Json.obj("odd" -> false))
.map(r => r.size should be (1))
}),
testTimeout)
it should "remove only one doucment" in
Await.result(
TestCollection
.remove(Json.obj("even"->true))
.flatMap(r => {
TestCollection
.all(Json.obj())
.map(r => r.size should be (99))
}),
testTimeout)
it should "insert a document" in
Await.result(
TestCollection
.insert(Json.obj("name" -> "unlisted_1"))
.flatMap(r => {
TestCollection
.all(Json.obj("name" -> "unlisted_1"))
.map(r => r.size should be (1))
}),
testTimeout)
it should "create a document" in
Await.result(
TestCollection
.createUnique(Json.obj("name" -> "unlisted_2"), Json.obj("name" -> "unlisted_2"))
.flatMap(r => {
TestCollection
.all(Json.obj("name" -> "unlisted_2"))
.map(r => r.size should be (1))
}),
testTimeout)
it should "throw DocumentAlreadyExists" in {
val exception = intercept[DocumentAlreadyExists]{
Await.result(
TestCollection
.createUnique(Json.obj("name" -> "unlisted_2"), Json.obj("name" -> "unlisted_2"))
.flatMap(r => {
TestCollection
.all(Json.obj("name" -> "unlisted_2"))
.map(r => r.size should be (1))
}),
testTimeout)
}
(exception.oldDocument \ "name").as[String] should be ("unlisted_2")
}
it should "throw error" in {
mongo.connection.close()
val exception = intercept[ClosedException]{
Await.result(
TestCollection
.fieldStringOrEmpty(Json.obj("name"->"name_2"), "name")
.map(r => r should be ("name_2")),
testTimeout)
}
exception.getMessage should include ("This MongoConnection is closed")
}
} |
#pragma once
#include <chrono>
#include <string>
#include "ChatWindow.hpp"
class Network;
class Peer;
class NetworkChatWindow : public ChatWindow {
Q_OBJECT
const Network * netw;
public:
NetworkChatWindow(const Network *);
using ChatWindow::receiveMessage;
void receiveMessage(std::chrono::system_clock::time_point time, const Peer&, std::string);
void sendMessage(std::string);
void setDestinationPtr(void *);
};
|
#! /bin/bash
if [[ "$MOD" != "config-server" && "$MOD" != "service-discovery" ]];
then
while ! nc -z configserver 8091;
do
echo "waiting for configserver to come up"
sleep 3;
done
# while ! nc -z service-discovery 8090;
# do
# echo "waiting for service-discovery to come up"
# sleep 3;
# done
fi
java -Deureka.client.serviceUrl.defaultZone=http://service-discovery:8090/eureka -Dspring.cloud.config.uri=http://configserver:8091 -jar /app/app.jar
|
<reponame>irenicaa/go-dice-generator<filename>http-utils/http_utils.go<gh_stars>0
package httputils
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strconv"
)
// Logger ...
type Logger interface {
Print(arguments ...interface{})
}
// GetIntFormValue ...
func GetIntFormValue(
request *http.Request,
key string,
min int,
max int,
) (int, error) {
value := request.FormValue(key)
if value == "" {
return 0, errors.New("key is missed")
}
valueAsInt, err := strconv.Atoi(value)
if err != nil {
return 0, fmt.Errorf("value is incorrect: %v", err)
}
if valueAsInt < min {
return 0, errors.New("value too less")
}
if valueAsInt > max {
return 0, errors.New("value too greater")
}
return valueAsInt, nil
}
// HandleError ...
func HandleError(
writer http.ResponseWriter,
logger Logger,
status int,
format string,
arguments ...interface{},
) {
message := fmt.Sprintf(format, arguments...)
logger.Print(message)
writer.WriteHeader(status)
writer.Write([]byte(message))
}
// HandleJSON ...
func HandleJSON(writer http.ResponseWriter, logger Logger, data interface{}) {
dataBytes, err := json.Marshal(data)
if err != nil {
HandleError(
writer,
logger,
http.StatusInternalServerError,
"unable to marshal the data: %v",
err,
)
return
}
writer.Header().Set("Content-Type", "application/json")
writer.Write(dataBytes)
}
|
<reponame>LeticiaISilveira/python-boilerplate
from django.conf.urls import url
from django import http
def index_view(request):
return http.HttpResponse("""<!DOCTYPE html>
<html>
<head>
<title>Dockerized Django Test Server</title>
<!-- This stylesheet is served by Nginx! -->
<link rel="stylesheet" href="/static/style.css">
</head>
<body>
<h1>Congratulations!</h1>
<p>Your docker container is working! Of course you must still configure it
for your application. The best way to do it is to create a Dockerfile that
is based on this image:
<code><pre>
# Dockerfile
FROM pythonboilerplate/django:base
# Tell docker the python path of your application
ENV GUNICORN_WSGI_APPLICATION=my_application.wsgi
# Assuming that your project files are under src/:
ADD . /app
</pre></code>
</p>
<p>Enjoy :)</p>
</body>
</html>
""")
urlpatterns = [
url(r'^$', index_view),
]
|
"""
Function in Python to return the number of possible paths from the top-left to the bottom-right in an mxn matrix
"""
def num_of_paths(m, n):
# Create a 2D table to store results of subproblems
count = [[0 for x in range(m)] for x in range(n)]
# Count of paths to reach any cell in first column is 1
for i in range(m):
count[i][0] = 1;
# Count of paths to reach any cell in first column is 1
for j in range(n):
count[0][j] = 1;
# Calculate count of paths for other cells in bottom-up manner using
# the recursive solution
for i in range(1, m):
for j in range(n):
# By uncommenting the last part the code calculatest he total
# possible paths if the diagonal Movements are allowed
count[i][j] = count[i-1][j] + count[i][j-1] #+ count[i-1][j-1];
return count[m-1][n-1];
# Driver code
m = 3
n = 3
print(num_of_paths(m, n)) |
# test errors operating on bignum
i = 1 << 65
try:
i << -1
except ValueError:
print("ValueError")
try:
len(i)
except TypeError:
print("TypeError")
try:
1 in i
except TypeError:
print("TypeError")
# overflow because rhs of >> is being converted to machine int
try:
1 >> i
except OverflowError:
print('OverflowError')
# to test conversion of negative mpz to machine int
# (we know << will convert to machine int, even though it fails to do the shift)
try:
i << (-(i >> 40))
except ValueError:
print('ValueError')
try:
i // 0
except ZeroDivisionError:
print('ZeroDivisionError')
try:
i % 0
except ZeroDivisionError:
print('ZeroDivisionError')
|
#!/bin/sh
CONFIGURE_OPTS="${CONFIGURE_OPTS} --enable-mongodb-developer-flags --enable-mongodb-coverage"
if [ -n "${SSL_VERSION}" ]; then
CONFIGURE_OPTS="${CONFIGURE_OPTS} --with-mongodb-ssl=${SSL_VERSION}"
fi
phpize
./configure ${CONFIGURE_OPTS}
|
class User < ApplicationRecord
validates :name, presence: true
validates :name, uniqueness: true
has_many :posts
has_many :comments, foreign_key: :author_id, class_name: "Comment"
has_many :commented_posts, through: :comments, source: :author
has_many :followed_users, foreign_key: :follower_id, class_name: 'Follow'
has_many :followees, through: :followed_users
has_many :following_users, foreign_key: :followee_id, class_name: 'Follow'
has_many :followers, through: :following_users
# Include default devise modules. Others available are:
# :confirmable, :lockable, :timeoutable, :trackable and :omniauthable
devise :database_authenticatable, :registerable,
:recoverable, :rememberable, :validatable
devise :omniauthable, :omniauth_providers => [:facebook]
def self.from_omniauth(auth)
where(provider: auth.provider, uid: auth.uid).first_or_create do |user|
user.email = auth.info.email
user.name = auth.info.name
user.password = <PASSWORD>[0,20]
end
end
def follow(user_id)
followed_users.create(followee_id: user_id)
end
def unfollow(user_id)
followed_users.find_by(followee_id: user_id).destroy
end
end
|
<reponame>cristiancobo/ExpressParty
package com.ceiba.reserva.adaptador.repositorio;
import com.ceiba.infraestructura.jdbc.CustomNamedParameterJdbcTemplate;
import com.ceiba.infraestructura.jdbc.sqlstatement.SqlStatement;
import com.ceiba.reserva.modelo.entidad.Reserva;
import com.ceiba.reserva.puerto.repositorio.RepositorioReserva;
import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
import org.springframework.stereotype.Repository;
@Repository
public class RepositorioReservaMysql implements RepositorioReserva {
private final CustomNamedParameterJdbcTemplate customNamedParameterJdbcTemplate;
@SqlStatement(namespace="reserva", value="crear")
private static String sqlCrear;
@SqlStatement(namespace="reserva", value="actualizar")
private static String sqlActualizar;
@SqlStatement(namespace="reserva", value="eliminar")
private static String sqlEliminar;
@SqlStatement(namespace="reserva", value="existe_reserva_por_id_persona")
private static String sqlExisteReservaIdPersona;
@SqlStatement(namespace="reserva", value="existe")
private static String sqlExiste;
public RepositorioReservaMysql(CustomNamedParameterJdbcTemplate customNamedParameterJdbcTemplate) {
this.customNamedParameterJdbcTemplate = customNamedParameterJdbcTemplate;
}
@Override
public Long crear(Reserva reserva) {
return this.customNamedParameterJdbcTemplate.crear(reserva,sqlCrear);
}
@Override
public void actualizar(Reserva reserva) {
this.customNamedParameterJdbcTemplate.actualizar(reserva,sqlActualizar);
}
@Override
public void eliminar(Long id) {
MapSqlParameterSource mapSqlParameterSource = new MapSqlParameterSource();
mapSqlParameterSource.addValue("id",id);
this.customNamedParameterJdbcTemplate.getNamedParameterJdbcTemplate().update(sqlEliminar,mapSqlParameterSource);
}
@Override
public boolean existeReservaConIdPersona(String idPersonaReserva) {
MapSqlParameterSource paramSource = new MapSqlParameterSource();
paramSource.addValue("idPersonaReserva",idPersonaReserva);
return this.customNamedParameterJdbcTemplate.getNamedParameterJdbcTemplate().queryForObject(sqlExisteReservaIdPersona,paramSource, Boolean.class);
}
@Override
public boolean existe(Long id) {
MapSqlParameterSource paramSource = new MapSqlParameterSource();
paramSource.addValue("id",id);
return this.customNamedParameterJdbcTemplate.getNamedParameterJdbcTemplate().queryForObject(sqlExiste,paramSource, Boolean.class);
}
} |
#!/bin/bash
# test.sh
# Create a default rails appliaction, install blacklight, and run all the tests.
#If null or empty, use default value
RAILS_VERSION=${RAILS_VERSION:-"~> 3.2"}
JETTY_URL=${JETTY_URL:-"https://github.com/projectblacklight/blacklight-jetty/zipball/v3.5.0"}
before="$(date +%s)"
benchmark()
{
after="$(date +%s)"
elapsed_seconds="$(expr $after - $before)"
echo "Total Time: ${elapsed_seconds} sec"
# as a bonus, make our script exit with the right error code.
}
check_errs()
{
# Function. Parameter 1 is the return code
# Para. 2 is text to display on failure.
if [ "${1}" -ne "0" ]; then
echo "ERROR # ${1} : ${2}"
benchmark
exit 1
fi
}
# Make sure we are in the blacklight directory
if [ ! -f "blacklight.gemspec" ]
then
echo "You must execute test.sh from the root of your blacklight checkout."
exit 1
fi
# Clear out the tmp/ directory.
rm -rf tmp/test_app
mkdir -p tmp/test_app
cd tmp
if [[ $# -gt 0 ]]
then
# Make certain rvm will work correctly.
# Load RVM into a shell session *as a function*
if [[ -s "$HOME/.rvm/scripts/rvm" ]] ; then
# First try to load from a user install
source "$HOME/.rvm/scripts/rvm"
elif [[ -s "/usr/local/rvm/scripts/rvm" ]] ; then
# Then try to load from a root install
source "/usr/local/rvm/scripts/rvm"
else
echo "WARNING: An RVM installation was not found.\n"
exit 1
fi
rvm use "$@" --create
check_errs $? "rvm failed. please run 'rvm install $@', and then re-run these tests."
fi
if ! gem query -n rails -v "$RAILS_VERSION" --installed > /dev/null; then
gem install --no-rdoc --no-ri 'rails' -v "$RAILS_VERSION"
fi
if ! gem query -n bundler -v ">=1.0" --installed > /dev/null; then
gem install --no-rdoc --no-ri 'bundler'
fi
rails "_${RAILS_VERSION}_" new test_app
cd test_app
rm public/index.html
echo "
platforms :jruby do
gem 'jruby-openssl'
gem 'activerecord-jdbcsqlite3-adapter'
gem 'jdbc-sqlite3'
gem 'mediashelf-loggable', '>= 0.4.8'
gem 'therubyrhino'
end
platforms :ruby do
gem 'sqlite3'
gem 'execjs'
gem 'therubyracer'
end
gem 'blacklight', :path => '../../'
gem 'jquery-rails'
group :assets do
gem 'compass-rails', '~> 1.0.0'
gem 'compass-susy-plugin', '~> 0.9.0'
end
# For testing
group :development, :test do
gem 'rspec'
gem 'rspec-rails'
gem 'generator_spec'
gem 'cucumber-rails'
gem 'database_cleaner'
gem 'capybara'
gem 'rcov', :platform => :mri_18
gem 'simplecov', :platform => :mri_19
gem 'simplecov-rcov', :platform => :mri_19
end
gem 'jettywrapper', '>= 1.2.0'
gem \"devise\"
" >> Gemfile
bundle install
check_errs $? "Bundle install failed."
rails generate blacklight -d
check_errs $? "Blacklight generator failed"
bundle exec rake db:migrate
check_errs $? "Rake Migration failed"
rails g cucumber:install &> /dev/null
cp ../../test_support/alternate_controller.rb app/controllers/
# add routing for the alternate_controller:
# resources :alternate do
# member do
# get :facet
# end
# end
ruby -pi.bak -e 'gsub(/devise_for :users/, "devise_for :users\n resources :alternate do\n member do\n get :facet\n end\n end")' config/routes.rb
jetty_zip=$( echo $JETTY_URL | awk '{split($0,a,"/"); print "/tmp/blacklight_jetty_"a[length(a)]}')
if [ ! -f $jetty_zip ]
then
curl -L $JETTY_URL -o $jetty_zip
check_errs $? "Jetty file does not exist, and cannot be downloaded."
fi
rails g blacklight:jetty test_jetty -e test -d $jetty_zip
check_errs $? "Jetty setup failed."
bundle exec rake solr:marc:index_test_data RAILS_ENV=test
bundle exec rake blacklight:hudson
check_errs $? "Tests failed."
benchmark
|
const { startServer, stopServer } = require('../../lib/server');
const { request } = require('../scripts/helpers');
const generator = require('../scripts/generator');
const mock = require('../scripts/mock-core-registry');
describe('API requests', () => {
beforeEach(async () => {
mock.mockAll();
await startServer();
});
afterEach(async () => {
await stopServer();
mock.cleanAll();
});
test('should fail if core returns net error while fetching user', async () => {
mock.mockAll({ core: { netError: true } });
const res = await request({
uri: '/',
method: 'POST',
headers: {
'X-Auth-Token': 'blablabla'
}
});
expect(res.statusCode).toEqual(500);
expect(res.body.success).toEqual(false);
});
test('should fail if core returns garbage while fetching user', async () => {
mock.mockAll({ core: { badResponse: true } });
const res = await request({
uri: '/',
method: 'GET',
headers: {
'X-Auth-Token': 'blablabla'
}
});
expect(res.statusCode).toEqual(500);
expect(res.body.success).toEqual(false);
});
test('should fail if core returns net error while fetching permissions', async () => {
mock.mockAll({ mainPermissions: { netError: true } });
const res = await request({
uri: '/',
method: 'POST',
headers: {
'X-Auth-Token': 'blablabla'
}
});
expect(res.statusCode).toEqual(500);
expect(res.body.success).toEqual(false);
});
test('should fail if core returns garbage while fetching permissions', async () => {
mock.mockAll({ mainPermissions: { badResponse: true } });
const res = await request({
uri: '/',
method: 'GET',
headers: {
'X-Auth-Token': 'blablabla'
}
});
expect(res.statusCode).toEqual(500);
expect(res.body.success).toEqual(false);
});
test('should return 500 if core returned unsuccessful response', async () => {
mock.mockAll({ core: { unsuccessfulResponse: true } });
const event = await generator.createEvent();
const res = await request({
uri: '/events/' + event.id + '/applications',
method: 'GET',
headers: {
'X-Auth-Token': 'blablabla'
}
});
expect(res.statusCode).toEqual(500);
expect(res.body.success).toEqual(false);
});
test('should return 500 if core returned unsuccessful response for permissions', async () => {
mock.mockAll({ mainPermissions: { unsuccessfulResponse: true } });
const event = await generator.createEvent();
const res = await request({
uri: '/events/' + event.id + '/applications',
method: 'GET',
headers: {
'X-Auth-Token': 'blablabla'
}
});
expect(res.statusCode).toEqual(500);
expect(res.body.success).toEqual(false);
});
test('should return 500 if core returned unsuccessful response for approve permissions', async () => {
mock.mockAll({ approvePermissions: { unsuccessfulResponse: true } });
const event = await generator.createEvent();
const res = await request({
uri: '/events/' + event.id + '/applications',
method: 'GET',
headers: {
'X-Auth-Token': 'blablabla'
}
});
expect(res.statusCode).toEqual(500);
expect(res.body.success).toEqual(false);
});
test('should return 401 if the user request returned 401 for auth-only endpoint', async () => {
mock.mockAll({ core: { unauthorized: true } });
const event = await generator.createEvent();
const res = await request({
uri: '/events/' + event.id + '/applications',
method: 'GET',
headers: {
'X-Auth-Token': 'blablabla'
}
});
expect(res.statusCode).toEqual(401);
expect(res.body.success).toEqual(false);
});
test('should return 401 if the permission request returned 401 for auth-only endpoint', async () => {
mock.mockAll({ mainPermissions: { unauthorized: true } });
const event = await generator.createEvent();
const res = await request({
uri: '/events/' + event.id + '/applications',
method: 'GET',
headers: {
'X-Auth-Token': 'blablabla'
}
});
expect(res.statusCode).toEqual(401);
expect(res.body.success).toEqual(false);
});
test('should return 401 if the approve permissions request returned 401 for auth-only endpoint', async () => {
mock.mockAll({ approvePermissions: { unauthorized: true } });
const event = await generator.createEvent();
const res = await request({
uri: '/events/' + event.id + '/applications',
method: 'GET',
headers: {
'X-Auth-Token': 'blablabla'
}
});
expect(res.statusCode).toEqual(401);
expect(res.body.success).toEqual(false);
});
test('should fail if core returns garbage while fetching approve permissions', async () => {
mock.mockAll({ approvePermissions: { badResponse: true } });
const event = await generator.createEvent({});
const res = await request({
uri: '/events/' + event.id,
method: 'GET',
headers: {
'X-Auth-Token': 'blablabla'
}
});
expect(res.statusCode).toEqual(500);
expect(res.body.success).toEqual(false);
});
test('should fail if body is not JSON', async () => {
const res = await request({
uri: '/',
method: 'POST',
headers: {
'X-Auth-Token': 'blablabla',
'Content-Type': 'application/json'
},
body: 'Totally not JSON'
});
expect(res.statusCode).toEqual(400);
expect(res.body.success).toEqual(false);
});
test('should fail on accessing non-existant endpoint', async () => {
const res = await request({
uri: '/nonexistant',
headers: { 'X-Auth-Token': 'blablabla' }
});
expect(res.statusCode).toEqual(404);
expect(res.body.success).toEqual(false);
});
});
|
const chai = require( 'chai' )
const chaiAsPromised = require( 'chai-as-promised' )
chai.use( chaiAsPromised )
const expect = chai.expect ;
const { ethers, waffle } = require( 'hardhat' )
const { loadFixture } = waffle
const { getTestCasesByFunction, generateTestCase } = require( '../fail-test-module' )
const {
contract_deployer_name,
token_owner_name,
proxy_user_name,
wl_user1_name,
wl_user2_name,
user1_name,
user2_name,
ERROR,
THROW,
CST,
} = require( '../test-var-module' )
// For activating or de-activating test cases
const TEST = {
EVENTS : {
},
METHODS : {
allowance : true,
approve : true,
balanceOf : true,
totalSupply : true,
transfer : true,
transferFrom : true,
mint : true,
mintBatch : true,
mintBatch_ol : true,
},
USE_CASES : {
CORRECT_INPUT : true,
INVALID_INPUT : true,
},
}
// For contract data
const CONTRACT = {
EVENTS : {
Transfer : 'Transfer',
Approval : 'Approval',
},
METHODS : {
allowance : {
SIGNATURE : 'allowance(address,address)',
PARAMS : [ 'owner_', 'spender_' ],
},
approve : {
SIGNATURE : 'approve(address,uint256)',
PARAMS : [ 'spender_', 'amount_' ],
},
balanceOf : {
SIGNATURE : 'balanceOf(address)',
PARAMS : [ 'account_' ],
},
totalSupply : {
SIGNATURE : 'totalSupply()',
PARAMS : [],
},
transfer : {
SIGNATURE : 'transfer(address,uint256)',
PARAMS : [ 'recipient_', 'amount_' ],
},
transferFrom : {
SIGNATURE : 'transferFrom(address,address,uint256)',
PARAMS : [ 'owner_', 'recipient_', 'amount_' ],
},
mint : {
SIGNATURE : 'mint(address,uint256)',
PARAMS : [ 'recipient_', 'amount_' ],
},
mintBatch : {
SIGNATURE : 'mintBatch(address[],uint256)',
PARAMS : [ 'recipients_', 'amount_' ],
},
mintBatch_ol : {
SIGNATURE : 'mintBatch(address[],uint256[])',
PARAMS : [ 'recipients_', 'amounts_' ],
},
},
}
const shouldBehaveLikeERC20Base = function( fixture, contract_params ) {
describe( 'Should behave like ERC20Base', function() {
let contract_deployer_address
let contract_deployer
let token_owner_address
let token_owner
let proxy_user_address
let proxy_user
let wl_user1_address
let wl_user1
let wl_user2_address
let wl_user2
let contract_address
let contract
let user1_address
let user1
let user2_address
let user2
let addrs
before( async function() {
[
x,
token_owner,
proxy_user,
wl_user1,
wl_user2,
user1,
user2,
...addrs
] = await ethers.getSigners()
token_owner_address = token_owner.address
proxy_user_address = proxy_user.address
wl_user1_address = wl_user1.address
wl_user2_address = wl_user2.address
user1_address = user1.address
user2_address = user2.address
})
beforeEach( async function() {
const { test_contract, test_contract_deployer } = await loadFixture( fixture )
contract = test_contract
contract_deployer = test_contract_deployer
contract_deployer_address = test_contract_deployer.address
contract_address = test_contract.address
})
describe( 'Correct input ...', function() {
if ( TEST.USE_CASES.CORRECT_INPUT ) {
describe( CONTRACT.METHODS.totalSupply.SIGNATURE, function() {
if ( TEST.METHODS.totalSupply ) {
it( 'Total supply should be ' + contract_params.INIT_SUPPLY, async function() {
expect( await contract.totalSupply() ).to.equal( contract_params.INIT_SUPPLY )
})
}
})
describe( CONTRACT.METHODS.balanceOf.SIGNATURE, function() {
if ( TEST.METHODS.balanceOf ) {
it( 'Balance of ' + contract_deployer_name + ' should be ' + contract_params.INIT_SUPPLY, async function() {
expect( await contract.balanceOf( contract_deployer_address ) ).to.equal( contract_params.INIT_SUPPLY )
})
}
})
describe( CONTRACT.METHODS.allowance.SIGNATURE, function() {
if ( TEST.METHODS.allowance ) {
it( 'Allowance when no tokens exist should be 0', async function() {
expect( await contract.allowance( contract_deployer_address, user1_address ) ).to.equal( 0 )
})
it( 'Trying to get allowance of tokens owned by the null address should be reverted with ' + ERROR.IERC20_NULL_ADDRESS_OWNER, async function() {
await expect( contract.allowance( CST.ADDRESS_ZERO, user1_address ) ).to.be.revertedWith( ERROR.IERC20_NULL_ADDRESS_OWNER )
})
}
})
describe( CONTRACT.METHODS.transfer.SIGNATURE, function() {
if ( TEST.METHODS.transfer ) {
it( 'Trying to transfer tokens when balance is zero should be reverted with ' + ERROR.IERC20_INSUFFICIENT_BALANCE, async function() {
await expect( contract.connect( user1 ).transfer( user2_address, 1 ) ).to.be.revertedWith( ERROR.IERC20_INSUFFICIENT_BALANCE )
})
}
})
describe( CONTRACT.METHODS.mint.SIGNATURE, function() {
if ( TEST.METHODS.mint ) {
beforeEach( async function() {
await contract.mint( token_owner_address, 1 )
})
describe( CONTRACT.METHODS.totalSupply.SIGNATURE, function() {
if ( TEST.METHODS.totalSupply ) {
it( 'Total supply should be ' + ( contract_params.INIT_SUPPLY + 1 ).toString(), async function() {
expect( await contract.totalSupply() ).to.equal( contract_params.INIT_SUPPLY + 1 )
})
}
})
describe( CONTRACT.METHODS.balanceOf.SIGNATURE, function() {
if ( TEST.METHODS.balanceOf ) {
it( 'Balance of ' + contract_deployer_name + ' should be ' + contract_params.INIT_SUPPLY, async function() {
expect( await contract.balanceOf( contract_deployer_address ) ).to.equal( contract_params.INIT_SUPPLY )
})
it( 'Balance of ' + token_owner_name + ' should be 1', async function() {
expect( await contract.balanceOf( token_owner_address ) ).to.equal( 1 )
})
}
})
describe( CONTRACT.METHODS.allowance.SIGNATURE, function() {
if ( TEST.METHODS.allowance ) {
it( 'Allowance of ' + token_owner_name + ' should not be required, expect 0', async function() {
expect( await contract.allowance( token_owner_address, token_owner_address ) ).to.equal( 0 )
})
it( 'Allowance of non owner should be 0', async function() {
expect( await contract.allowance( token_owner_address, user1_address ) ).to.equal( 0 )
})
}
})
describe( CONTRACT.METHODS.transfer.SIGNATURE, function() {
if ( TEST.METHODS.transfer ) {
it( 'Transfering tokens to null address should be reverted with ' + ERROR.IERC20_NULL_ADDRESS_TRANSFER, async function() {
await expect( contract.connect( token_owner ).transfer( CST.ADDRESS_ZERO, 1 ) ).to.be.revertedWith( ERROR.IERC20_NULL_ADDRESS_TRANSFER )
})
it( 'Trying to transfer more tokens than owned should be reverted with ' + ERROR.IERC20_INSUFFICIENT_BALANCE, async function() {
await expect( contract.connect( token_owner ).transfer( user1_address, 2 ) ).to.be.revertedWith( ERROR.IERC20_INSUFFICIENT_BALANCE )
})
it( 'Contract should emit a ' + CONTRACT.EVENTS.Transfer + ' event mentioning a token was transfered from TokenOwner to User1', async function() {
await expect( contract.connect( token_owner ).transfer( user1_address, 1 ) ).to.emit( contract, CONTRACT.EVENTS.Transfer ).withArgs( token_owner_address, user1_address, 1 )
})
it( 'Transfer of token to User1 should be successful', async function() {
await contract.connect( token_owner ).transfer( user1_address, 1 )
expect( await contract.balanceOf( token_owner_address ) ).to.equal( 0 )
expect( await contract.balanceOf( user1_address ) ).to.equal( 1 )
expect( await contract.totalSupply() ).to.equal( contract_params.INIT_SUPPLY + 1 )
})
}
})
describe( CONTRACT.METHODS.approve.SIGNATURE, function() {
if ( TEST.METHODS.approve ) {
it( 'Trying to approve self should be reverted with ' + ERROR.IERC20_APPROVE_OWNER, async function() {
await expect( contract.connect( token_owner ).approve( token_owner_address, 1 ) ).to.be.revertedWith( ERROR.IERC20_APPROVE_OWNER )
})
it( 'Contract should emit an ' + CONTRACT.EVENTS.Approval + ' event mentioning User1 is now allowed to spend 2 token in behalf of TokenOwner', async function() {
await expect( contract.connect( token_owner ).approve( user1_address, 1 ) ).to.emit( contract, CONTRACT.EVENTS.Approval ).withArgs( token_owner_address, user1_address, 1 )
})
describe( 'TokenOwner approves User1 to spend 2 tokens in their behalf', async function() {
beforeEach( async function() {
await contract.connect( token_owner ).approve( user1_address, 2 )
})
describe( CONTRACT.METHODS.allowance.SIGNATURE, function() {
if ( TEST.METHODS.allowance ) {
it( 'User1 should be allowed to spend 2 token in behalf of TokenOwner', async function() {
const allowance = await contract.allowance( token_owner_address, user1_address )
expect( allowance ).to.equal( 2 )
})
}
})
describe( CONTRACT.METHODS.transferFrom.SIGNATURE, function() {
it( 'Contract should emit a ' + CONTRACT.EVENTS.Transfer + ' event mentioning a token was transfered from TokenOwner to User1', async function() {
await expect( contract.connect( user1 ).transferFrom( token_owner_address, user1_address, 1 ) ).to.emit( contract, CONTRACT.EVENTS.Transfer ).withArgs( token_owner_address, user1_address, 1 )
})
if ( TEST.METHODS.transferFrom ) {
it( 'User1 should be able to transfer a token owned by TokenOwner', async function() {
await contract.connect( user1 ).transferFrom( token_owner_address, user1_address, 1 )
expect( await contract.balanceOf( token_owner_address ) ).to.equal( 0 )
expect( await contract.balanceOf( user1_address ) ).to.equal( 1 )
expect( await contract.totalSupply() ).to.equal( contract_params.INIT_SUPPLY + 1 )
})
it( 'Trying to transfer tokens from the null address should be reverted with ' + ERROR.IERC20_NULL_ADDRESS_OWNER, async function() {
await expect( contract.connect( user1 ).transferFrom( CST.ADDRESS_ZERO, user1_address, 1 ) ).to.be.revertedWith( ERROR.IERC20_NULL_ADDRESS_OWNER )
})
it( 'Trying to transfer tokens to the null address should be reverted with ' + ERROR.IERC20_NULL_ADDRESS_TRANSFER, async function() {
await expect( contract.connect( user1 ).transferFrom( token_owner_address, CST.ADDRESS_ZERO, 1 ) ).to.be.revertedWith( ERROR.IERC20_NULL_ADDRESS_TRANSFER )
})
it( 'User1 trying to transfer more tokens than TokenOwner owns should be reverted with ' + ERROR.IERC20_INSUFFICIENT_BALANCE, async function() {
await expect( contract.connect( user1 ).transferFrom( token_owner_address, user1_address, 2 ) ).to.be.revertedWith( ERROR.IERC20_INSUFFICIENT_BALANCE )
})
it( 'User1 trying to transfer more tokens than allowed should be reverted with ' + ERROR.IERC20_CALLER_NOT_ALLOWED, async function() {
await expect( contract.connect( user1 ).transferFrom( token_owner_address, user1_address, 10 ) ).to.be.revertedWith( ERROR.IERC20_CALLER_NOT_ALLOWED )
})
}
})
})
}
})
describe( CONTRACT.METHODS.transferFrom.SIGNATURE, function() {
if ( TEST.METHODS.transferFrom ) {
it( 'Trying to transfer from a token owner while not allowed should be reverted with ' + ERROR.IERC20_CALLER_NOT_ALLOWED, async function() {
await expect( contract.connect( user1 ).transferFrom( token_owner_address, user1_address, 1 ) ).to.be.revertedWith( ERROR.IERC20_CALLER_NOT_ALLOWED )
})
}
})
}
})
describe( CONTRACT.METHODS.mintBatch.SIGNATURE, function() {
if ( TEST.METHODS.mintBatch ) {
it( 'Airdropping 10 tokens to multiple users', async function() {
let recipients = [ token_owner_address, user1_address ]
await contract.functions[ CONTRACT.METHODS.mintBatch.SIGNATURE ]( recipients, 10 )
expect( await contract.balanceOf( token_owner_address ) ).to.equal( 10 )
expect( await contract.balanceOf( user1_address ) ).to.equal( 10 )
expect( await contract.totalSupply() ).to.equal( 20 )
})
}
})
describe( CONTRACT.METHODS.mintBatch_ol.SIGNATURE, function() {
if ( TEST.METHODS.mintBatch_ol ) {
it( 'Airdropping different number of tokens to multiple users', async function() {
let recipients = [ token_owner_address, user1_address ]
let amounts = [ 10, 1 ]
await contract.functions[ CONTRACT.METHODS.mintBatch_ol.SIGNATURE ]( recipients, amounts )
expect( await contract.balanceOf( token_owner_address ) ).to.equal( 10 )
expect( await contract.balanceOf( user1_address ) ).to.equal( 1 )
expect( await contract.totalSupply() ).to.equal( 11 )
})
it( 'Trying to airdrop tokens to the null address should be reverted with ' + ERROR.IERC20_NULL_ADDRESS_MINT, async function() {
let recipients = [ token_owner_address, CST.ADDRESS_ZERO ]
let amounts = [ 1, 2 ]
await expect( contract.functions[ CONTRACT.METHODS.mintBatch_ol.SIGNATURE ]( recipients, amounts ) ).to.be.revertedWith( ERROR.IERC20_NULL_ADDRESS_MINT )
})
it( 'Input arrays with different numbers of parameters should be reverted with ' + ERROR.IERC20_ARRAY_LENGTH_MISMATCH, async function() {
await expect( contract.functions[ CONTRACT.METHODS.mintBatch_ol.SIGNATURE ]( [user1_address, user2_address], [1] ) ).to.be.revertedWith( ERROR.IERC20_ARRAY_LENGTH_MISMATCH )
})
}
})
}
})
describe( 'Invalid input ...', function() {
if ( TEST.USE_CASES.INVALID_INPUT ) {
beforeEach( async function() {
defaultArgs = {}
defaultArgs [ CONTRACT.METHODS.allowance.SIGNATURE ] = {
err : null,
args : [
contract_deployer_address,
user1_address,
]
}
defaultArgs [ CONTRACT.METHODS.approve.SIGNATURE ] = {
err : null,
args : [
user1_address,
10,
]
}
defaultArgs [ CONTRACT.METHODS.balanceOf.SIGNATURE ] = {
err : null,
args : [
user1_address,
]
}
defaultArgs [ CONTRACT.METHODS.totalSupply.SIGNATURE ] = {
err : null,
args : []
}
defaultArgs [ CONTRACT.METHODS.transfer.SIGNATURE ] = {
err : null,
args : [
user1_address,
10,
]
}
defaultArgs [ CONTRACT.METHODS.transferFrom.SIGNATURE ] = {
err : null,
args : [
contract_deployer_address,
user1_address,
10,
]
}
defaultArgs [ CONTRACT.METHODS.mint.SIGNATURE ] = {
err : null,
args : [
token_owner_address,
10,
]
}
defaultArgs [ CONTRACT.METHODS.mintBatch.SIGNATURE ] = {
err : null,
args : [
[ token_owner_address ],
10,
]
}
defaultArgs [ CONTRACT.METHODS.mintBatch_ol.SIGNATURE ] = {
err : null,
args : [
[ token_owner_address ],
[ 10 ],
]
}
})
Object.entries( CONTRACT.METHODS ).forEach( function( [ prop, val ] ) {
describe( val.SIGNATURE, function() {
const testSuite = getTestCasesByFunction( val.SIGNATURE, val.PARAMS )
testSuite.forEach( testCase => {
it( testCase.test_description, async function() {
await generateTestCase( contract, testCase, defaultArgs, prop, val )
})
})
})
})
}
})
})
}
module.exports = { shouldBehaveLikeERC20Base }
|
#!/bin/bash
set -e
if [ "${S3_S3V4}" = "yes" ]; then
aws configure set default.s3.signature_version s3v4
fi
if [ "${SCHEDULE}" = "**None**" ]; then
sh backup.sh
else
echo "$SCHEDULE /bin/sh $(pwd)/backup.sh" > crontab-fragment.txt
crontab -l | cat - crontab-fragment.txt > crontab.txt && crontab crontab.txt
crond -f
fi |
<gh_stars>0
package com.company;
public class Message {
int type;
String body;
public Message(int type, String body) {
/*
Message types
initialize – the initial board state, player information
move – a list of effects
*/
this.type = type;
this.body = body;
}
}
|
<filename>BOJ/10000/10828.java
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
public class Main {
static int top;
static int[] stack;
public static void main(String[] args) throws NumberFormatException, IOException {
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(System.out));
top = -1;
stack = new int[10001];
int t = Integer.parseInt(br.readLine());
for (int i = 0; i < t; i++) {
String[] str = br.readLine().split(" ");
if (str[0].equals("push")) { push(Integer.parseInt(str[1])); }
else if (str[0].equals("pop")) { bw.write(pop() + "\n"); }
else if (str[0].equals("size")) { bw.write(size() + "\n"); }
else if (str[0].equals("empty")) { bw.write(empty() + "\n"); }
else if (str[0].equals("top")) { bw.write(top() + "\n"); }
}
bw.flush();
}
public static void push(int num) { stack[++top] = num; }
public static int pop() { return (top > -1) ? stack[top--] : -1; }
public static int size() { return top + 1; }
public static int empty() { return (top == -1) ? 1 : 0; }
public static int top() { return (top != - 1) ? stack[top] : -1; }
} |
// Function to calculate the Euclidean distance between two points
double line_distance(double p1[2], double p2[2]) {
return sqrt(pow(p2[0] - p1[0], 2) + pow(p2[1] - p1[1], 2));
}
// Function to determine if two circles are touching or intersecting
bool are_circles_touching_or_intersecting(double p1[2], double r1, double p2[2], double r2) {
// Calculate the distance between the centers of the circles
double distance = line_distance(p1, p2);
// If the distance is less than or equal to the sum of the radii, the circles are touching or intersecting
if (distance <= (r1 + r2)) {
return true;
} else {
return false;
}
} |
#!/bin/bash
java -jar selenium-server-standalone-3.141.59.jar -role node -nodeConfig ~/selenium/safari.json > /dev/null 2>&1 &
|
def is_subsequence(string1, string2):
count1 = 0
count2 = 0
while count1 < len(string1) and count2 < len(string2):
if string1[count1] == string2[count2]:
count1 = count1 + 1
count2 = count2 + 1
return count1 == len(string1)
result = is_subsequence(string1, string2)
print(result) |
import Algorithmia
import tabpy_client
ALGORITHMIA_API_KEY = 'YOUR_API_KEY' #algorithmia.com/user#credentials
TABPY_SERVER_URL = 'http://localhost:9004/'
DEBUG = True
def algorithmia(algorithm_name, input):
if DEBUG: print("algorithm: %sinput: %s\n"%(algorithm_name,input))
try:
client = Algorithmia.client(ALGORITHMIA_API_KEY)
algo = client.algo(algorithm_name)
result = algo.pipe(input).result
except Exception as x:
if DEBUG: print(x)
raise Exception(str(x))
if DEBUG: print("result: %s"%result)
return result
tabpy_conn = tabpy_client.Client(TABPY_SERVER_URL)
tabpy_conn.deploy('algorithmia', algorithmia, 'Run a function on Algorithmia: algorithmia(algorithm_name, input)', override=True) |
#!/bin/bash
helm install --name "airflow" --namespace airflow -f values.yaml stable/airflow
|
<reponame>JoeWrightss/sdk-go
package client
import (
"github.com/cloudevents/sdk-go/pkg/cloudevents"
"github.com/cloudevents/sdk-go/pkg/cloudevents/types"
"github.com/google/go-cmp/cmp"
"testing"
"time"
)
func TestDefaultIDToUUIDIfNotSet(t *testing.T) {
testCases := map[string]struct {
event cloudevents.Event
}{
"nil context": {
event: cloudevents.Event{},
},
"v0.1 empty": {
event: cloudevents.Event{
Context: cloudevents.EventContextV01{},
},
},
"v0.2 empty": {
event: cloudevents.Event{
Context: cloudevents.EventContextV02{},
},
},
"v0.3 empty": {
event: cloudevents.Event{
Context: cloudevents.EventContextV03{},
},
},
"v0.1 no change": {
event: cloudevents.Event{
Context: cloudevents.EventContextV01{EventID: "abc"}.AsV01(),
},
},
"v0.2 no change": {
event: cloudevents.Event{
Context: cloudevents.EventContextV02{ID: "abc"}.AsV02(),
},
},
"v0.3 no change": {
event: cloudevents.Event{
Context: cloudevents.EventContextV03{ID: "abc"}.AsV03(),
},
},
}
for n, tc := range testCases {
t.Run(n, func(t *testing.T) {
got := DefaultIDToUUIDIfNotSet(tc.event)
if got.Context != nil && got.Context.AsV02().ID == "" {
t.Errorf("failed to generate an id for event")
}
})
}
}
func TestDefaultIDToUUIDIfNotSetImmutable(t *testing.T) {
event := cloudevents.Event{
Context: cloudevents.EventContextV01{},
}
got := DefaultIDToUUIDIfNotSet(event)
want := "0.1"
if diff := cmp.Diff(want, got.SpecVersion()); diff != "" {
t.Errorf("unexpected (-want, +got) = %v", diff)
}
if event.Context.AsV01().EventID != "" {
t.Errorf("modified the original event")
}
if got.Context.AsV01().EventID == "" {
t.Errorf("failed to generate an id for event")
}
}
func TestDefaultTimeToNowIfNotSet(t *testing.T) {
testCases := map[string]struct {
event cloudevents.Event
}{
"nil context": {
event: cloudevents.Event{},
},
"v0.1 empty": {
event: cloudevents.Event{
Context: cloudevents.EventContextV01{},
},
},
"v0.2 empty": {
event: cloudevents.Event{
Context: cloudevents.EventContextV02{},
},
},
"v0.3 empty": {
event: cloudevents.Event{
Context: cloudevents.EventContextV03{},
},
},
"v0.1 no change": {
event: cloudevents.Event{
Context: cloudevents.EventContextV01{EventTime: &types.Timestamp{Time: time.Now()}}.AsV01(),
},
},
"v0.2 no change": {
event: cloudevents.Event{
Context: cloudevents.EventContextV02{Time: &types.Timestamp{Time: time.Now()}}.AsV02(),
},
},
"v0.3 no change": {
event: cloudevents.Event{
Context: cloudevents.EventContextV03{Time: &types.Timestamp{Time: time.Now()}}.AsV03(),
},
},
}
for n, tc := range testCases {
t.Run(n, func(t *testing.T) {
got := DefaultIDToUUIDIfNotSet(tc.event)
if got.Context != nil && got.Context.AsV02().ID == "" {
t.Errorf("failed to generate an id for event")
}
})
}
}
func TestDefaultTimeToNowIfNotSetImmutable(t *testing.T) {
event := cloudevents.Event{
Context: cloudevents.EventContextV01{},
}
got := DefaultTimeToNowIfNotSet(event)
want := "0.1"
if diff := cmp.Diff(want, got.SpecVersion()); diff != "" {
t.Errorf("unexpected (-want, +got) = %v", diff)
}
if event.Context.AsV01().EventTime != nil {
t.Errorf("modified the original event")
}
if got.Context.AsV01().EventTime.IsZero() {
t.Errorf("failed to generate an id for event")
}
}
|
/*!
* Peer packet handler
*
* Used to handle packets sent from other peers in the network.
*/
const __ = require('./const')
const Debugger = require('./fn.debugger')
const Receiver = require('./receiver')
const WebAccount = require('./web.account')
const WebFileServer = require('./web.file.server')
const WebPost = require('./web.post')
const Acc = require('./data/acc')
const Peer = require('./data/peer.extended')
const Post = require('./data/post')
const PostPointer = require('./data/post.pointer')
const PostLike = require('./data/post.like')
const Result = require('./data/result')
const SignKey = require('./data/key.sign')
/** Peer command handler */
const Handler = class {
/** @type {Receiver} */
receiver
/** @type {WebAccount} */
webAccount
/** @type {WebFileServer} */
webFileServer
/** @type {WebPost} */
webPost
/**
* @param {Peer} peer
* @param {Result} result
*/
async handle (peer, result) {
if (!result.success)
return Debugger.error(result.message)
let allow = true
let data = result.data
let receiver = this.receiver
let storage = receiver.storage.promise
let address = `${peer.ip}:${peer.port}`
if (typeof data[0] !== 'string' ||
typeof data[1] !== 'string' ||
typeof data[2] !== 'number' ||
typeof data[2] !== 'string' )
return Debugger.warn(`${address} sent malformed packet.`)
if (!await storage.access(data[1])) // account not exist
allow = false
Debugger.log(`${address}://${data[0]}/${data[1]}/${data[2]}`)
/**
* Peer high-permission action section.
*
* Commands that do not have [0],[1],[2], will use this general parameters:
* [0]:string social Command (such as post, like, comment, share, mention)
* [1]:string account public key
* [2]:number|string account section: avatar, cover, post (number)
* :
* [n]:any
*/
switch (data[0]) {
case 'request':
/**
* Peer make a request for specific resources.
* [3]:string What resource to request
*/
if (!allow)
return
switch (data[3]) {
case 'account':
/**
* Account Request
*/
let requestAcc = new Acc(await storage.read(data[1]))
receiver.send(peer, [
'account',
requestAcc.description,
requestAcc.key.public,
requestAcc.name,
requestAcc.img.avatar,
requestAcc.img.cover,
requestAcc.public,
requestAcc.tag,
requestAcc.signature
])
return
case 'media':
/**
* Media request
* [4]:string media index
*/
let requestMediaLocation = `${data[1]}.${data[2]}${typeof data[4] === 'number' ? `.${data[4]}`: ''}`
if (!await storage.access(requestMediaLocation))
return
receiver.sendMedia(peer, {
owner: data[1],
index: data[2],
media: typeof data[4] === 'number' ? media : undefined
})
return
case 'post':
/**
* Post request
*/
if (typeof data[2] !== 'number')
return receiver.send(peer, ['invalidPostFormat', data[1]])
let requestPostLocation = `${data[1]}.${data[2]}`
if (!await storage.access(requestPostLocation))
return receiver.send(peer, ['postNotFound', data[1]])
let requestPost = new Post(await storage.read(requestPostLocation))
receiver.send(peer, [
'post',
data[1],
requestPost.number,
requestPost.media,
requestPost.mediaType,
requestPost.mention,
requestPost.tag,
requestPost.text,
requestPost.time,
requestPost.signature
])
return
}
return
case 'account':
/**
* Account create & update
* [1]:string account public key
* [2]:string account description
* [3]:string account name
* [4]:string account avatar (hash)
* [5]:string account cover (hash)
* [6]:boolean account public (anyone can post to this)
* [7]:string[] account tag
* [8]:string account signature
*/
if (typeof data[3] !== 'string' ||
typeof data[4] !== 'string' ||
typeof data[5] !== 'string' ||
typeof data[6] !== 'boolean' ||
!Array.isArray(data[7] ||
typeof data[8] !== 'string') )
return
if (this.webAccount.followPending.indexOf(data[1]) < 0 && !await storage.access(data[1]))
return
let newAcc = new Acc([
data[2],
new SignKey([
'',
'',
data[1]
]),
data[3],
[
data[4],
data[5]
],
0,
data[6],
data[7],
data[8]
])
if (!newAcc.valid)
return
await storage.write(data[1], newAcc.exportPub())
await storage.write('following', (await storage.read('following')).push(data[1]))
/**
* Retrieve data request data from peer
* @param {string} target
* @returns
*/
let picRequestFn = async target => {
if (typeof target !== 'string')
return Debugger.error(`picRequestFn() got invalid 'target' (type ${typeof target})`)
let awaitForData = resolve => {
/** @type {NodeJS.Timeout} */
let interval
let counter = __.MAX_TRIAL + 1
let intervalFn = async () => {
if (await storage.access(`${data[1]}.${target}`)) {
clearInterval(interval)
resolve(true)
} else if (counter > 0)
counter--
else {
clearInterval(interval)
resolve(false)
}
receiver.send(peer, [
'request',
data[1],
target,
'media'
])
}
interval = setInterval(intervalFn, 10000)
intervalFn()
}
return await new Promise(awaitForData)
}
if (newAcc.img.avatar.length > 0) {
if (!await picRequestFn('avatar'))
return Debugger.error(`Can't retrieve avatar image for account ${data[1]}`)
}
if (newAcc.img.cover.length > 0) {
if (!await picRequestFn('cover'))
return Debugger.error(`Can't retrieve cover image for account ${data[1]}`)
}
return
case 'like':
/**
* Like a post
* [3]:string like owner
* [4]:number like time
* [5]:string like signature
*/
let likeFile = `${data[1]}.${data[2]}.like.${data[3]}`
if (await storage.access(likeFile)) //like file exists
return
let like = new PostLike([
d[3],
d[4],
d[0],
d[1],
d[5]
])
if (!like.valid)
return
if (!await storage.write(likeFile, like.export()))
return
let likeCount = 0
let likeCountFileLocation = `${data[1]}.${data[2]}.likes`
if (await storage.access(likeCountFileLocation))
likeCount = await storage.read(likeCountFileLocation)
likeCount++
await storage.write(likeCountFileLocation, likeCount)
await receiver.broadcast(data[1], __.BROADCAST_AMOUNT, data)
return
case 'post':
/**
* Make a new post, can also be used for comments (mention)
* [3]:string[] post media
* [4]:string[] post media type
* [5]:PostPointer post mention (exported as array)
* [6]:string[] post tag (public key)
* [7]:string post text
* [8]:nubmer post time
* [9]:string post signature
*/
if (!Array.isArray(data[3]) ||
!Array.isArray(data[4]) ||
//Ignore 5, not needed
!Array.isArray(data[6]) ||
typeof data[7] !== 'string' ||
typeof data[8] !== 'number' ||
typeof data[9] !== 'string' )
return
let newPostLocation = `${data[1]}.${data[2]}`
if (await storage.access(newPostLocation)) //post exists
return
let newPost = new Post([
data[2],
data[1],
data[3],
data[4],
Array.isArray(data[5]) ? new PostPointer(data[5]) : undefined,
data[6],
data[7],
data[8],
data[9]
])
if (!newPost.valid)
return
if (!await storage.write(newPostLocation, postData))
return
// TODO: categorize post by tag, and add it to timeline.
// * If the post is the mention request, don't add it to timeline
await receiver.broadcast(data[1], __.BROADCAST_AMOUNT, data)
return
case 'media':
/**
* !! UNSTABLE, NOT TESTED !!
*
* Incoming media stream
* [3]:number media index
* [4]:number media total packets that will be received
*/
if (!allow)
return
if (typeof peer.mediaStream !== 'undefined')
return receiver.send(peer, [__.MEDIA_STREAM_NOT_READY])
if (typeof data[3] !== 'number' ||
typeof data[4] !== 'number' )
return receiver.send(peer, [__.MEDIA_STREAM_INFO_INVALID])
if (data[4].length > __.MAX_PAYLOAD || data[4].length > 65536)
return receiver.send(peer, [__.MEDIA_STREAM_FILE_TOO_LARGE])
/** @type {string} */
let mediaHash
let requestMediaLocation = `${data[1]}.${data[2]}`
switch (data[2]) {
case 'avatar':
case 'cover':
mediaHash = new Acc(await storage.read(data[1])).img[data[2]]
requestMediaLocation += '.png'
break
default:
if (!await storage.access(requestMediaLocation))
return receiver.send(peer, [__.MEDIA_STREAM_POST_NOT_FOUND])
mediaHash = new Post(await storage.read(requestMediaLocation)).media[data[3]]
requestMediaLocation += `.${data[3]}`
break
}
if (!mediaHash)
return receiver.send(peer, [__.MEDIA_STREAM_NO_MEDIA])
if (await storage.bin.access(requestMediaLocation))
return receiver.send(peer, [__.MEDIA_STREAM_MEDIA_FOUND])
await peer.openMediaStream(requestMediaLocation, mediaHash, data[4])
receiver.send(peer, [__.MEDIA_STREAM_READY])
return
// Hello test
case 'hello':
receiver.send(peer, [`what`])
return
}
}
}
module.exports = Handler |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.