text
stringlengths 1
1.05M
|
|---|
package com.benmu.framework.event.shorage;
import android.content.Context;
import android.text.TextUtils;
import com.benmu.framework.manager.ManagerFactory;
import com.benmu.framework.manager.StorageManager;
import com.benmu.framework.utils.JsPoster;
import com.taobao.weex.bridge.JSCallback;
import java.util.ArrayList;
/**
* Created by Carry on 2017/5/21.
*/
public class EventGetData {
public void getData(Context context, ArrayList<String> paramsList, JSCallback jscallback) {
String key = paramsList.get(0);
StorageManager storageManager = ManagerFactory.getManagerService(StorageManager.class);
String result = storageManager.getData(context, key);
if (TextUtils.isEmpty(result)) {
JsPoster.postFailed(jscallback);
} else {
JsPoster.postSuccess(result, jscallback);
}
}
public Object getDataSync(Context context, ArrayList<String> list) {
String key = list.get(0);
StorageManager storageManager = ManagerFactory.getManagerService(StorageManager.class);
String result = storageManager.getData(context, key);
return result == null ? JsPoster.getFailed() : JsPoster.getSuccess(result);
}
}
|
<filename>app/platform/PlatformBuilder.js
/*
*SPDX-License-Identifier: Apache-2.0
*/
var Platform = require('./fabric/Platform.js');
class PlatformBuilder {
static async build(pltfrm) {
if(pltfrm == 'fabric') {
var platform = new Platform();
await platform.initialize();
return platform;
}
throw("Invalid Platform");
}
}
module.exports = PlatformBuilder;
|
package log
import (
"fmt"
"os"
"path/filepath"
"time"
)
//file logger print log message to a specified file, log file will rotate to new
//file daily, and will cleanup old log files, the file logger engine cached one
//month's log data and will remove older log files
type file struct {
level int
path string
duration time.Duration
filename string
file *os.File
cache chan *Record
quit chan bool
}
func NewFileLogger(level int, args ...interface{}) Logger {
path := ""
if len(args) == 1 {
ok := false
if path, ok = args[0].(string); !ok {
path = ""
}
}
os.MkdirAll(path, 0770)
f := &file{
level: level,
path: path,
filename: "",
file: nil,
cache: make(chan *Record, BUFFER_CAPACITY),
quit: make(chan bool),
}
go f.run()
return f
}
func (f *file) run() {
f.rotate()
for {
select {
case rec := <-f.cache:
f.write(rec)
case <-time.After(f.duration):
f.rotate()
go f.sweep()
case <-f.quit:
return
}
}
}
func (f *file) rotate() error {
if f.file != nil {
f.file.Close()
}
f.filename = time.Now().Format("2006-01-02") + ".log"
var err error = nil
fname := f.path + "/" + f.filename
f.file, err = os.OpenFile(fname, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660)
if err != nil {
return err
}
day := time.Now().AddDate(0, 0, 1)
day = time.Date(day.Year(), day.Month(), day.Day(), 0, 0, 0, 0, time.Local)
f.duration = day.Sub(time.Now())
return nil
}
func (f *file) sweep() {
filepath.Walk(f.path, func(path string, fi os.FileInfo, err error) error {
if fi == nil {
return err
}
if fi.ModTime().Add(time.Hour * 24 * 30).Before(time.Now()) {
os.Remove(f.path + "/" + path)
}
return nil
})
}
func (f *file) write(record *Record) {
timestr := record.timestamp.Format("2006/01/02 15:04:04")
fmt.Fprintln(f.file, timestr, "[", levelstr[record.level], "]:", record.message)
}
func (f *file) flush() {
for {
select {
case rec := <-f.cache:
f.write(rec)
default:
return
}
}
}
func (f *file) Level() int {
return f.level
}
func (f *file) Write(record *Record) {
f.cache <- record
}
func (f *file) Close() {
defer func() {
if f.file != nil {
f.file.Close()
}
}()
f.quit <- true
f.flush()
close(f.cache)
close(f.quit)
}
|
#!/usr/bin/env bash
cp .boto ~/.boto
pip2 install gsutil
|
<reponame>abanvill/42-Projects
#include "../includes/ping.h"
void alarm_callback(int signbr) {
t_ping *ping;
(void)signbr;
ping = require_data();
if (ping->opts.flag.deadline) {
ping->opts.deadline--;
if (ping->opts.deadline == 0xFFFFFFFF) {
print_outro(ping);
exit(EXIT_SUCCESS);
}
}
send_packet(ping);
alarm(1);
}
void kill_callback(int signbr) {
(void)signbr;
printf("Kill signal\n");
exit(EXIT_FAILURE);
}
void term_callback(int signbr) {
(void)signbr;
printf("Term signal\n");
exit(EXIT_FAILURE);
}
/*
** Must print statistics line
*/
void quit_callback(int signbr) {
(void)signbr;
printf("Quit signal\n");
exit(EXIT_FAILURE);
}
void abort_callback(int signbr) {
(void)signbr;
printf("Abort signal\n");
exit(EXIT_FAILURE);
}
void interrupt_callback(int signbr) {
t_ping *ping;
(void)signbr;
ping = require_data();
update_stats(ping, ended);
print_outro(ping);
exit(EXIT_SUCCESS);
}
|
def get_longest_word(str)
str_arr = str.split(" ")
longest_word = ""
longest_word_length = 0
str_arr.each do |word|
if word.length > longest_word_length
longest_word = word
longest_word_length = word.length
end
end
return longest_word
end
puts get_longest_word("This is a long sentence with some long words")
|
#
# To run on the ZCU102 board, copy the packagge/sd_card directory onto the SD card, plug it into the board and power it up.
# When the Linux prompt appears, run this script by entering the following command:
# source /mnt/sd-mmcblk0p1/run_app.sh
#
mount /dev/mmcblk0p1 /mnt
cd /mnt
cp platform_desc.txt /etc/xocl.txt
export XILINX_XRT=/usr
export XILINX_VITIS=/mnt
./app.exe ydma.xclbin
echo "INFO: press Ctrl+a x to exit qemu"
|
def process_default_source(env_vars):
default_source = env_vars.get('DEFAULT_SOURCE', 'undefined')
if default_source == 'undefined':
return 'No default source specified'
else:
return f'Default source is {default_source}'
|
#!/bin/bash
R CMD INSTALL --no-multiarch --with-keep.source interactiontransformer
|
#coding:utf-8
import numpy as np
np.set_printoptions(linewidth=200,suppress=True)
# 4.2 元素去重
# 4.2.1直接使用库函数
a = np.array((1, 2, 3, 4, 5, 5, 7, 3, 2, 2, 8, 8))
print('原始数组:', a)
# 使用库函数unique
b = np.unique(a)
print('去重后:', b)
# 4.2.2 二维数组的去重,结果会是预期的么?
c = np.array(((1, 2), (3, 4), (5, 6), (1, 3), (3, 4), (7, 6)))
print('二维数组:\n', c)
print('去重后:', np.unique(c))
# # # 4.2.3 方案1:转换为虚数
r, i = np.split(c, (1,), axis=1)
x = r + i * 1j
# x = c[:, 0] + c[:, 1] * 1j
print('转换成虚数:', x)
print('虚数去重后:', np.unique(x))
print(np.unique(x, return_index=True)) # 思考return_index的意义
idx = np.unique(x, return_index=True)[1]
print('二维数组去重:\n', c[idx])
# # 4.2.3 方案2:利用set
print('去重方案2:\n', np.array(list(set([tuple(t) for t in c]))))
|
<gh_stars>0
package dev.vality.sink.common.handle.machineevent.eventpayload.impl;
import dev.vality.damsel.payment_processing.EventPayload;
import dev.vality.damsel.payment_processing.InvoiceChange;
import dev.vality.machinegun.eventsink.MachineEvent;
import dev.vality.sink.common.handle.machineevent.eventpayload.PaymentEventHandler;
import dev.vality.sink.common.handle.machineevent.eventpayload.change.InvoiceChangeEventHandler;
import lombok.RequiredArgsConstructor;
import java.util.List;
@RequiredArgsConstructor
public class InvoiceChangePaymentMachineEventHandler implements PaymentEventHandler {
private final List<InvoiceChangeEventHandler> eventHandlers;
@Override
public boolean accept(EventPayload payload) {
return payload.isSetInvoiceChanges();
}
@Override
public void handle(EventPayload payload, MachineEvent baseEvent) {
for (int i = 0; i < payload.getInvoiceChanges().size(); i++) {
InvoiceChange change = payload.getInvoiceChanges().get(i);
for (InvoiceChangeEventHandler eventHandler : eventHandlers) {
if (eventHandler.accept(change)) {
eventHandler.handle(change, baseEvent, i);
}
}
}
}
}
|
#!/usr/bin/env bash
#### --debug-file
$SH --debug-file $TMP/debug.txt -c 'true'
grep 'Debug file' $TMP/debug.txt >/dev/null && echo yes
## stdout: yes
#### debug-completion option
set -o debug-completion
## status: 0
#### debug-completion from command line
$SH -o debug-completion
## status: 0
#### repr
x=42
repr x
echo status=$?
repr nonexistent
echo status=$?
## STDOUT:
x = (value.Str s:42)
status=0
'nonexistent' is not defined
status=1
## END
#### crash dump
rm -f $TMP/*.json
OSH_CRASH_DUMP_DIR=$TMP $SH -c '
g() {
local glocal="glocal"
echo $(( 1 / 0 ))
}
f() {
local flocal="flocal"
shift
FOO=bar g
}
readonly array=(A B C)
f "${array[@]}"
' dummy a b c
echo status=$?
# Just check that we can parse it. TODO: Test properties.
python -m json.tool $TMP/*.json > /dev/null
echo status=$?
## STDOUT:
status=1
status=0
## END
#### crash dump with source
# TODO: The failure is not propagated through 'source'. Failure only happens
# on 'errexit'.
#rm -f $TMP/*.json
OSH_CRASH_DUMP_DIR=$TMP $SH -c '
set -o errexit
source spec/testdata/crash.sh
'
echo status=$?
python -m json.tool $TMP/*.json > /dev/null
echo status=$?
## STDOUT:
status=1
status=0
## END
# NOTE: strict-arith has one case in arith.test.sh), strict-word-eval has a case in var-op-other.
|
from fastapi import FastAPI
import edgedb
app = FastAPI()
# Connect to EdgeDB
settings = get_settings()
app.state.db = await edgedb.create_async_pool(settings.edgedb_dsn)
# Define the Account entity in EdgeDB schema
async with app.state.db.acquire() as conn:
await conn.execute("""
CREATE SCALAR TYPE balance_t EXTENDING decimal {
CREATE CONSTRAINT exclusive;
};
CREATE TYPE Account {
required property name -> str;
required property balance -> balance_t;
};
""")
# Create account endpoint
@app.post("/create_account")
async def create_account(name: str, initial_balance: float):
async with app.state.db.acquire() as conn:
account = await conn.query_single("""
INSERT Account {
name := <str>$name,
balance := <balance_t>$initial_balance
};
""", name=name, initial_balance=initial_balance)
return account
# Deposit endpoint
@app.post("/deposit/{account_id}")
async def deposit(account_id: str, amount: float):
async with app.state.db.acquire() as conn:
await conn.execute("""
UPDATE Account
FILTER .id = <uuid>$account_id
SET {
balance := .balance + <balance_t>$amount
};
""", account_id=account_id, amount=amount)
return {"message": "Deposit successful"}
# Withdraw endpoint
@app.post("/withdraw/{account_id}")
async def withdraw(account_id: str, amount: float):
async with app.state.db.acquire() as conn:
account = await conn.query_single("""
SELECT Account {
balance
}
FILTER .id = <uuid>$account_id;
""", account_id=account_id)
if account.balance >= amount:
await conn.execute("""
UPDATE Account
FILTER .id = <uuid>$account_id
SET {
balance := .balance - <balance_t>$amount
};
""", account_id=account_id, amount=amount)
return {"message": "Withdrawal successful"}
else:
return {"message": "Insufficient funds"}
# Balance endpoint
@app.get("/balance/{account_id}")
async def get_balance(account_id: str):
async with app.state.db.acquire() as conn:
account = await conn.query_single("""
SELECT Account {
balance
}
FILTER .id = <uuid>$account_id;
""", account_id=account_id)
return {"balance": account.balance}
|
from rest_framework.serializers import ModelSerializer
from .models import Operation
class OperationSerializer(ModelSerializer):
class Meta:
model = Operation
fields = ('id', 'direction', 'amount', 'fee', 'sender', 'receiver', 'payment', 'account',)
|
"""Provides unit tests to verify that the graph merging algorithm is functioning correctly."""
import unittest
import copy
from ..pygraph import UndirectedGraph, merge_graphs, build_triangle_graph
from . import utility_functions
class MergeGraphsTest(unittest.TestCase):
def test_empty_graphs(self):
"""Does the ''merge_graphs'' function return an empty graph when given empty graphs?"""
main_graph = UndirectedGraph()
addition_graph = UndirectedGraph()
node_map, edge_map = merge_graphs(main_graph, addition_graph)
# --We expect no mapping whatsoever
self.assertEqual({}, node_map)
self.assertEqual({}, edge_map)
# --We expect no nodes or edges in the main graph
self.assertEqual(0, len(main_graph.nodes))
self.assertEqual(0, len(main_graph.edges))
def test_empty_addition_graph(self):
"""Does the ''merge_graphs'' function return a duplicate of the main
graph when given an empty addition graph?"""
original_graph = utility_functions.build_simple_test_graph()
main_graph = copy.deepcopy(original_graph)
addition_graph = UndirectedGraph()
node_map, edge_map = merge_graphs(main_graph, addition_graph)
# --We expect no mapping whatsoever
self.assertEqual({}, node_map)
self.assertEqual({}, edge_map)
# --There should be the same number of nodes
self.assertEqual(len(original_graph.get_all_node_ids()), len(main_graph.get_all_node_ids()))
# --There should be the same number of edges
self.assertEqual(len(original_graph.get_all_edge_ids()), len(main_graph.get_all_edge_ids()))
# --All the nodes should match
for node_id in original_graph.get_all_node_ids():
original_node = original_graph.get_node(node_id)
new_node = main_graph.get_node(node_id)
# Verify each node has the proper number of edges
self.assertEqual(len(original_node['edges']), len(new_node['edges']))
# Verify each node has the right edges
for edge_id in original_node['edges']:
self.assertIn(edge_id, new_node['edges'])
for edge_id in original_graph.get_all_edge_ids():
original_edge = original_graph.get_edge(edge_id)
new_edge = main_graph.get_edge(edge_id)
# Verify each edge has the correct targets
self.assertEqual(original_edge['vertices'], new_edge['vertices'])
def test_empty_main_graph(self):
"""Does the ''merge_graphs'' function return a duplicate of the addition
graph when given an empty main graph?"""
original_graph = utility_functions.build_simple_test_graph()
main_graph = UndirectedGraph()
addition_graph = copy.deepcopy(original_graph)
node_map, edge_map = merge_graphs(main_graph, addition_graph)
# --We expect a 1-1 identity mapping
expected_node_map = dict([(node_id, node_id) for node_id in original_graph.get_all_node_ids()])
expected_edge_map = dict([(edge_id, edge_id) for edge_id in original_graph.get_all_edge_ids()])
self.assertEqual(expected_node_map, node_map)
self.assertEqual(expected_edge_map, edge_map)
# --There should be the same number of nodes
self.assertEqual(len(original_graph.get_all_node_ids()), len(main_graph.get_all_node_ids()))
# --There should be the same number of edges
self.assertEqual(len(original_graph.get_all_edge_ids()), len(main_graph.get_all_edge_ids()))
# --All the nodes should match
for node_id in original_graph.get_all_node_ids():
original_node = original_graph.get_node(node_id)
new_node = main_graph.get_node(node_id)
# Verify each node has the proper number of edges
self.assertEqual(len(original_node['edges']), len(new_node['edges']))
# Verify each node has the right edges
for edge_id in original_node['edges']:
self.assertIn(edge_id, new_node['edges'])
for edge_id in original_graph.get_all_edge_ids():
original_edge = original_graph.get_edge(edge_id)
new_edge = main_graph.get_edge(edge_id)
# Verify each edge has the correct targets
self.assertEqual(original_edge['vertices'], new_edge['vertices'])
def test_simple_graph_copy(self):
"""Does the ''merge_graphs'' function produce a combined graph that maintains
the correct topology for both components?"""
original_graph = utility_functions.build_2_node_graph()
addition_graph = build_triangle_graph()
self.graph_copy_integrity_checker(original_graph, addition_graph)
def test_complex_graph_copy(self):
"""Does the ''merge_graphs'' function produce a combined graph that maintains
the correct topology for both components?"""
original_graph = utility_functions.build_fully_biconnected_test_graph()
addition_graph = build_triangle_graph()
self.graph_copy_integrity_checker(original_graph, addition_graph)
def graph_copy_integrity_checker(self, original_graph, addition_graph):
"""Utility function to test the integrity of a graph copy."""
main_graph = copy.deepcopy(original_graph)
node_map, edge_map = merge_graphs(main_graph, addition_graph)
# --Verify that the updated graph has all the nodes and edges from both graphs
expected_node_count = len(original_graph.get_all_node_ids()) + len(addition_graph.get_all_node_ids())
expected_edge_count = len(original_graph.get_all_edge_ids()) + len(addition_graph.get_all_edge_ids())
self.assertEqual(expected_node_count, len(main_graph.get_all_node_ids()))
self.assertEqual(expected_edge_count, len(main_graph.get_all_edge_ids()))
# --Verify that the original graph nodes and edges are still in-place
for node_id in original_graph.get_all_node_ids():
original_node = original_graph.get_node(node_id)
new_node = main_graph.get_node(node_id)
# Verify each node has the proper number of edges
self.assertEqual(len(original_node['edges']), len(new_node['edges']))
# Verify each node has the right edges
for edge_id in original_node['edges']:
self.assertIn(edge_id, new_node['edges'])
for edge_id in original_graph.get_all_edge_ids():
original_edge = original_graph.get_edge(edge_id)
new_edge = main_graph.get_edge(edge_id)
# Verify each edge has the correct targets
self.assertEqual(original_edge['vertices'], new_edge['vertices'])
# --Verify that the new nodes and edges exist and have the correct topology
for node_id in addition_graph.get_all_node_ids():
original_node = addition_graph.get_node(node_id)
new_node = main_graph.get_node(node_map[node_id])
# Verify each node has the proper number of edges
self.assertEqual(len(original_node['edges']), len(new_node['edges']))
# Verify each node has the right edges
for edge_id in original_node['edges']:
self.assertIn(edge_map[edge_id], new_node['edges'])
for edge_id in addition_graph.get_all_edge_ids():
original_edge = addition_graph.get_edge(edge_id)
new_edge = main_graph.get_edge(edge_map[edge_id])
# Verify each edge has the correct targets
original_vertex_a, original_vertex_b = original_edge['vertices']
mapped_new_vertices = (node_map[original_vertex_a], node_map[original_vertex_b])
self.assertEqual(mapped_new_vertices, new_edge['vertices'])
|
def is_anagram(str1, str2):
if len(str1) != len(str2):
return False
# get frequency of each character of str1
freq_map = {}
for ch in str1:
if ch not in freq_map:
freq_map[ch] = 1
else:
freq_map[ch] += 1
# check whether frequency of characters in str2 is same as str1
for ch in str2:
if ch not in freq_map:
return False
else:
freq_map[ch] -= 1
for value in freq_map.values():
if value != 0:
return False
return True
|
package search;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.StringTokenizer;
/**
*
* @author minchoba
* 백준 15805번: 트리나라 관광 가이드
*
* @see https://www.acmicpc.net/problem/15805/
*
*/
public class Boj15805 {
private static final String NEW_LINE = "\n";
private static final String SPACE = " ";
private static class Node{
int p;
int c;
public Node(int p, int c) {
this.p = p;
this.c = c;
}
}
public static void main(String[] args) throws Exception{
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
int N = Integer.parseInt(br.readLine());
StringTokenizer st = new StringTokenizer(br.readLine());
int[] path = new int[N];
for(int i = 0; i < N; i++) {
path[i] = Integer.parseInt(st.nextToken());
}
System.out.println(makeMap(N, path));
}
private static StringBuilder makeMap(int n, int[] arr) {
StringBuilder sb = new StringBuilder();
ArrayList<Node> parent = new ArrayList<>();
boolean[] visit = new boolean[n];
parent.add(new Node(-1, arr[0]));
visit[arr[0]] = true;
for(int i = 1; i < n; i++) {
if(visit[arr[i]]) continue; // 이미 방문한 곳은 어떤 노드의 부모노드가 될 수도
visit[arr[i]] = true;
parent.add(new Node(arr[i - 1], arr[i])); // 이전 등장한 노드 번호는 현재 노드의 부모 노드
}
int[] result = new int[parent.size()];
sb.append(result.length).append(NEW_LINE);
for(Node node: parent) { // 해당 순번에 따라 부모 노드의 번호 저장
result[node.c] = node.p;
}
for(int i = 0; i < result.length; i++) {
sb.append(result[i]).append(SPACE);
}
return sb;
}
}
|
let array = [3,5,6,7,2,1]
let count = array.length
console.log(count)
|
#ifndef __CUSTOM_FILE_REQUEST_H__
#define __CUSTOM_FILE_REQUEST_H__
#include "cocos2d.h"
#include "network/HttpRequest.h"
#include <string>
#include <vector>
class FileRequestListener;
class CustomFileRequest
{
private:
cocos2d::network::HttpRequest *request;
std::vector<FileRequestListener*> listeners;
public:
CustomFileRequest();
~CustomFileRequest();
cocos2d::network::HttpRequest* getRequest() const;
CustomFileRequest &addFileRequestListener(FileRequestListener *listener);
CustomFileRequest &setUrl(const std::string &url);
CustomFileRequest &setRequestType(cocos2d::network::HttpRequest::Type type);
void toSend();
void toSend(const std::string &url);
};
#endif//__CUSTOM_FILE_REQUEST_H__
|
#!/bin/bash
# Color theming
if [ -f ~/clouddrive/aspnet-learn/setup/theme.sh ]
then
. <(cat ~/clouddrive/aspnet-learn/setup/theme.sh)
fi
echo
echo "Building images to ACR"
echo "======================"
if [ -f ~/clouddrive/aspnet-learn/create-acr-exports.txt ]
then
eval $(cat ~/clouddrive/aspnet-learn/create-acr-exports.txt)
fi
pushd ~/clouddrive/aspnet-learn/src/deploy/k8s > /dev/null
if [ -z "$ESHOP_REGISTRY" ] || [ -z "$ESHOP_ACRNAME" ]
then
echo "One or more required environment variables are missing:"
echo "- ESHOP_REGISTRY.: $ESHOP_REGISTRY"
echo "- ESHOP_ACRNAME..: $ESHOP_ACRNAME"
exit 1
fi
while [ "$1" != "" ]; do
case $1 in
--services) shift
services=$1
;;
* ) echo "Invalid param: $1"
exit 1
esac
shift
done
echo
echo "Building and publishing docker images to $ESHOP_REGISTRY"
# This is the list of {service}:{image}>{dockerfile} of the application
appServices=$(cat ./build-to-acr.services)
if [ -z "$services" ]
then
serviceList=$(echo "${appServices}" | sed -e 's/:.*//')
else
serviceList=${services//,/ }
fi
pushd ../.. > /dev/null
for service in $serviceList
do
line=$(echo "${appServices}" | grep "$service:")
tokens=(${line//[:>]/ })
service=${tokens[0]}
image=${tokens[1]}
dockerfile=${tokens[2]}
echo
echo "Building image \"$image\" for service \"$service\" with \"$dockerfile.acr\"..."
serviceCmd="az acr build -r $ESHOP_ACRNAME -t $ESHOP_REGISTRY/$image:linux-latest -f $dockerfile.acr ."
echo "${newline} > ${azCliCommandStyle}$serviceCmd${defaultTextStyle}${newline}"
eval $serviceCmd
done
popd > /dev/null
popd > /dev/null
|
import sbClient from "~/lib/supabase";
import type { Page } from "@linkto/core";
import type { GetStaticPaths } from "next";
export const getStaticPaths: GetStaticPaths = async () => {
// get all sites that have subdomains set up
const subdomains = await sbClient.from<Page>("pages").select("subdomain");
// get all sites that have custom domains set up
const customDomains = await sbClient
.from<Page>("pages")
.select("custom_domain")
.not("custom_domain", "is", null);
const paths = [
...(subdomains.data || []).map((p) => {
return p.subdomain;
}),
...(customDomains.data || []).map((p) => {
return p.custom_domain;
}),
];
return {
paths: paths.map((path) => {
return { params: { page: path } };
}),
fallback: true,
};
};
|
from django.contrib.auth.models import User
from django.db import models
class UserProfile(models.Model):
user = models.OneToOneField(User, blank=True, null=True)
email = models.EmailField(blank=False, unique=True)
def __str__(self):
if self.user:
return self.user.get_full_name() + ' (' + self.user.get_username() + ')'
else:
return "Awaiting Confirmation" + ' (' + self.email + ')'
|
# -*- coding: utf-8 -*-
"""
Azure Resource Manager (ARM) PostgreSQL Server Operations Execution Module
.. versionadded:: 2.0.0
.. versionchanged:: 4.0.0
:maintainer: <<EMAIL>>
:configuration: This module requires Azure Resource Manager credentials to be passed as keyword arguments
to every function or via acct in order to work properly.
Required provider parameters:
if using username and password:
* ``subscription_id``
* ``username``
* ``password``
if using a service principal:
* ``subscription_id``
* ``tenant``
* ``client_id``
* ``secret``
Optional provider parameters:
**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud.
Possible values:
* ``AZURE_PUBLIC_CLOUD`` (default)
* ``AZURE_CHINA_CLOUD``
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
"""
# Python libs
from __future__ import absolute_import
import logging
import datetime
# Azure libs
HAS_LIBS = False
try:
import azure.mgmt.rdbms.postgresql.models # pylint: disable=unused-import
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import ValidationError
HAS_LIBS = True
except ImportError:
pass
__func_alias__ = {"list_": "list"}
log = logging.getLogger(__name__)
async def create(
hub,
ctx,
name,
resource_group,
location,
sku=None,
version=None,
ssl_enforcement=None,
minimal_tls_version=None,
infrastructure_encryption=None,
public_network_access=None,
storage_profile=None,
login=None,
login_password=None,
tags=None,
**kwargs,
):
"""
.. versionadded:: 2.0.0
.. versionchanged:: 4.0.0
Creates a new server, or will overwrite an existing server.
:param name: The name of the server.
:param resource_group: The name of the resource group. The name is case insensitive.
:param location: The location the resource resides in.
:param sku: The name of the SKU (pricing tier) of the server. Typically, the name of the sku is in the form
tier_family_cores, e.g. B_Gen4_1, GP_Gen5_8.
:param version: Server version. Possible values include: "9.5", "9.6", "10", "10.0", "10.2", "11".
:param ssl_enforcement: Enable ssl enforcement or not when connect to server. Possible values include: "Enabled",
"Disabled".
:param minimal_tls_version: Enforce a minimal tls version for the server. Possible values include: "TLS1_0",
"TLS1_1", "TLS1_2", "TLSEnforcementDisabled".
:param infrastructure_encryption: Status showing whether the server enabled infrastructure encryption. Possible
values include: "Enabled", "Disabled".
:param public_network_access: Whether or not public network access is allowed for this server. Possible values
include: "Enabled", "Disabled".
:param storage_profile: A dictionary representing the storage profile of a server. Parameters include:
- ``backup_retention_days``: Backup retention days for the server.
- ``geo_redundant_backup``: Enable Geo-redundant or not for server backup. Possible values include:
'Enabled', 'Disabled'.
- ``storage_mb``: Max storage allowed for a server.
- ``storage_autogrow``: Enable Storage Auto Grow. Possible values include: 'Enabled', 'Disabled'.
:param login: The administrator's login name of a server. This value is immutable once set.
:param login_password: <PASSWORD>.
:param tags: Application-specific metadata in the form of key-value pairs.
CLI Example:
.. code-block:: bash
azurerm.postgresql.server.create test_name test_group test_location test_sku
"""
result = {}
postconn = await hub.exec.azurerm.utils.get_client(ctx, "postgresql", **kwargs)
if sku and not isinstance(sku, dict):
sku = {"name": sku}
try:
propsmodel = await hub.exec.azurerm.utils.create_object_model(
"rdbms.postgresql",
"ServerPropertiesForDefaultCreate",
version=version,
ssl_enforcement=ssl_enforcement,
storage_profile=storage_profile,
minimal_tls_version=minimal_tls_version,
infrastructure_encryption=infrastructure_encryption,
public_network_access=public_network_access,
administrator_login=login,
administrator_login_password=<PASSWORD>,
**kwargs,
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
servermodel = await hub.exec.azurerm.utils.create_object_model(
"rdbms.postgresql",
"ServerForCreate",
sku=sku,
location=location,
properties=propsmodel,
tags=tags,
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
server = postconn.servers.create(
server_name=name, resource_group_name=resource_group, parameters=servermodel
)
server.wait()
result = server.result().as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("postgresql", str(exc), **kwargs)
result = {"error": str(exc)}
except ValidationError as exc:
result = {"error": str(exc)}
return result
async def delete(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 2.0.0
Deletes a server.
:param name: The name of the server.
:param resource_group: The name of the resource group. The name is case insensitive.
CLI Example:
.. code-block:: bash
azurerm.postgresql.server.delete test_name test_group
"""
result = False
postconn = await hub.exec.azurerm.utils.get_client(ctx, "postgresql", **kwargs)
try:
server = postconn.servers.delete(
server_name=name, resource_group_name=resource_group,
)
server.wait()
result = True
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("postgresql", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def get(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 2.0.0
Gets information about a server.
:param name: The name of the server.
:param resource_group: The name of the resource group. The name is case insensitive.
CLI Example:
.. code-block:: bash
azurerm.postgresql.server.get test_name test_group
"""
result = {}
postconn = await hub.exec.azurerm.utils.get_client(ctx, "postgresql", **kwargs)
try:
server = postconn.servers.get(
server_name=name, resource_group_name=resource_group,
)
result = server.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("postgresql", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def list_(hub, ctx, resource_group=None, **kwargs):
"""
.. versionadded:: 2.0.0
.. versionchanged:: 4.0.0
List all the servers in a given subscription.
:param resource_group: The name of the resource group to limit the results.
CLI Example:
.. code-block:: bash
azurerm.postgresql.server.list
"""
result = {}
postconn = await hub.exec.azurerm.utils.get_client(ctx, "postgresql", **kwargs)
try:
if resource_group:
servers = await hub.exec.azurerm.utils.paged_object_to_list(
postconn.servers.list_by_resource_group(
resource_group_name=resource_group
)
)
else:
servers = await hub.exec.azurerm.utils.paged_object_to_list(
postconn.servers.list()
)
for server in servers:
result[server["name"]] = server
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("postgresql", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def restart(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 2.0.0
Restarts a server.
:param name: The name of the server.
:param resource_group: The name of the resource group. The name is case insensitive.
CLI Example:
.. code-block:: bash
azurerm.postgresql.server.restart test_name test_group
"""
result = False
postconn = await hub.exec.azurerm.utils.get_client(ctx, "postgresql", **kwargs)
try:
server = postconn.servers.restart(
server_name=name, resource_group_name=resource_group,
)
server.wait()
result = True
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("postgresql", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def update(
hub,
ctx,
name,
resource_group,
sku=None,
version=None,
ssl_enforcement=None,
minimal_tls_version=None,
infrastructure_encryption=None,
public_network_access=None,
storage_profile=None,
login_password=None,
tags=None,
**kwargs,
):
"""
.. versionadded:: 2.0.0
.. versionchanged:: 4.0.0
Creates a new server, or will overwrite an existing server.
:param name: The name of the server.
:param resource_group: The name of the resource group. The name is case insensitive.
:param sku: The name of the SKU (pricing tier) of the server. The name of the sku is in the form tier_family_cores,
e.g. B_Gen4_1, GP_Gen5_8.
:param version: Server version. Possible values include: "9.5", "9.6", "10", "10.0", "10.2", "11".
:param ssl_enforcement: Enable ssl enforcement or not when connect to server. Possible values include: "Enabled",
"Disabled".
:param minimal_tls_version: Enforce a minimal tls version for the server. Possible values include: "TLS1_0",
"TLS1_1", "TLS1_2", "TLSEnforcementDisabled".
:param infrastructure_encryption: Status showing whether the server enabled infrastructure encryption. Possible
values include: "Enabled", "Disabled".
:param public_network_access: Whether or not public network access is allowed for this server. Possible values
include: "Enabled", "Disabled".
:param storage_profile: A dictionary representing the storage profile of a server. Parameters include:
- ``backup_retention_days``: Backup retention days for the server.
- ``geo_redundant_backup``: Enable Geo-redundant or not for server backup. Possible values include:
'Enabled', 'Disabled'.
- ``storage_mb``: Max storage allowed for a server.
- ``storage_autogrow``: Enable Storage Auto Grow. Possible values include: 'Enabled', 'Disabled'.
:param login_password: The password of the administrator login.
:param tags: Application-specific metadata in the form of key-value pairs.
CLI Example:
.. code-block:: bash
azurerm.postgresql.server.update test_name test_group test_updated_params
"""
result = {}
postconn = await hub.exec.azurerm.utils.get_client(ctx, "postgresql", **kwargs)
if sku and not isinstance(sku, dict):
sku = {"name": sku}
try:
paramsmodel = await hub.exec.azurerm.utils.create_object_model(
"rdbms.postgresql",
"ServerUpdateParameters",
sku=sku,
version=version,
ssl_enforcement=ssl_enforcement,
minimal_tls_version=minimal_tls_version,
infrastructure_encryption=infrastructure_encryption,
public_network_access=public_network_access,
storage_profile=storage_profile,
administrator_login_password=<PASSWORD>,
tags=tags,
**kwargs,
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
server = postconn.servers.update(
server_name=name, resource_group_name=resource_group, parameters=paramsmodel
)
server.wait()
result = server.result().as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("postgresql", str(exc), **kwargs)
result = {"error": str(exc)}
return result
|
import React from "react";
import Cesium, { Viewer as CesiumViewer } from "cesium";
import createCesiumComponent, { EventkeyMap } from "./core/CesiumComponent";
import EventManager from "./core/EventManager";
export interface ViewerCesiumProps {
terrainProvider?: Cesium.TerrainProvider;
terrainShadows?: Cesium.ShadowMode;
clockTrackedDataSource?: Cesium.DataSource;
targetFrameRate?: number;
useDefaultRenderLoop?: boolean;
resolutionScale?: number;
allowDataSourcesToSuspendAnimation?: boolean;
trackedEntity?: Cesium.Entity;
selectedEntity?: Cesium.Entity;
shadows?: boolean;
}
export interface ViewerCesiumReadonlyProps {
animation?: boolean;
baseLayerPicker?: boolean;
fullscreenButton?: boolean;
vrButton?: boolean;
geocoder?: boolean;
homeButton?: boolean;
infoBox?: boolean;
sceneModePicker?: boolean;
selectionIndicator?: boolean;
timeline?: boolean;
navigationHelpButton?: boolean;
navigationInstructionsInitiallyVisible?: boolean;
scene3DOnly?: boolean;
shouldAnimate?: boolean;
clockViewModel?: Cesium.ClockViewModel;
selectedImageryProviderViewModel?: Cesium.ProviderViewModel;
imageryProviderViewModels?: Cesium.ProviderViewModel[];
selectedTerrainProviderViewModel?: Cesium.ProviderViewModel;
terrainProviderViewModels?: Cesium.ProviderViewModel[];
imageryProvider?: Cesium.ImageryProvider;
skyBox?: Cesium.SkyBox;
skyAtmosphere?: Cesium.SkyAtmosphere;
fullscreenElement?: Element | string;
showRenderLoopErrors?: boolean;
automaticallyTrackDataSourceClocks?: boolean;
contextOptions?: any;
sceneMode?: Cesium.SceneMode;
mapProjection?: Cesium.MapProjection;
globe?: Cesium.Globe;
orderIndependentTranslucency?: boolean;
creditContainer?: Element | string;
creditViewport?: Element | string;
dataSources?: Cesium.DataSourceCollection;
terrainExaggeration?: number;
mapMode2D?: Cesium.MapMode2D;
projectionPicker?: boolean;
requestRenderMode?: boolean;
maximumRenderTimeChange?: number;
}
export interface ViewerCesiumEvents {
onSelectedEntityChange?: () => void;
onTrackedEntityChange?: () => void;
}
const cesiumProps: Array<keyof ViewerCesiumProps> = [
"terrainProvider",
"terrainShadows",
"clockTrackedDataSource",
"targetFrameRate",
"useDefaultRenderLoop",
"resolutionScale",
"allowDataSourcesToSuspendAnimation",
"trackedEntity",
"selectedEntity",
"shadows",
];
const cesiumReadonlyProps: Array<keyof ViewerCesiumReadonlyProps> = [
"animation",
"baseLayerPicker",
"fullscreenButton",
"vrButton",
"geocoder",
"homeButton",
"infoBox",
"sceneModePicker",
"selectionIndicator",
"timeline",
"navigationHelpButton",
"navigationInstructionsInitiallyVisible",
"scene3DOnly",
"shouldAnimate",
"clockViewModel",
"selectedImageryProviderViewModel",
"imageryProviderViewModels",
"selectedTerrainProviderViewModel",
"terrainProviderViewModels",
"imageryProvider",
"skyBox",
"skyAtmosphere",
"fullscreenElement",
"showRenderLoopErrors",
"automaticallyTrackDataSourceClocks",
"contextOptions",
"sceneMode",
"mapProjection",
"globe",
"orderIndependentTranslucency",
"creditContainer",
"creditViewport",
"dataSources",
"terrainExaggeration",
"mapMode2D",
"projectionPicker",
"requestRenderMode",
"maximumRenderTimeChange",
];
const cesiumEventProps: EventkeyMap<CesiumViewer, keyof ViewerCesiumEvents> = {
selectedEntityChanged: "onSelectedEntityChange",
trackedEntityChanged: "onTrackedEntityChange",
};
export interface ViewerProps
extends ViewerCesiumProps,
ViewerCesiumReadonlyProps,
ViewerCesiumEvents {
className?: string;
id?: string;
style?: React.CSSProperties;
full?: boolean;
containerProps?: any;
extend?: CesiumViewer.ViewerMixin[] | CesiumViewer.ViewerMixin;
children?: React.ReactNode;
}
export interface ViewerContext {
viewer: CesiumViewer;
cesiumWidget: Cesium.CesiumWidget;
dataSourceCollection: Cesium.DataSourceCollection;
entityCollection: Cesium.EntityCollection;
scene: Cesium.Scene;
camera: Cesium.Camera;
}
const Viewer = createCesiumComponent<
CesiumViewer,
ViewerProps,
{},
ViewerContext | {},
HTMLDivElement
>({
name: "Viewer",
createRef: true,
create(cprops, props, context, ref) {
// ref is not always undefined
const v = new CesiumViewer((ref as React.RefObject<HTMLDivElement>).current as any, cprops);
if (v && props.extend) {
if (Array.isArray(props.extend)) {
props.extend.forEach(e => {
v.extend(e, {});
});
} else {
v.extend(props.extend, {});
}
}
// common event manager for managing events of Entity and Primitives
let state: any;
if (v) {
state = new EventManager(v.scene, v.canvas);
}
return [v, state];
},
render(element, props, mounted, ref) {
return (
<div
className={props.className}
id={props.id}
ref={ref}
style={{
...(props.full
? {
position: "absolute",
bottom: "0",
left: "0",
right: "0",
top: "0",
}
: {}),
...props.style,
}}
{...props.containerProps}>
{element ? props.children : null}
</div>
);
},
unmount(element, cprops, props, ref, state) {
if (element && state) {
const em = state as EventManager;
if (!em.isDestroyed()) {
em.destroy();
}
}
if (element && !element.isDestroyed()) {
element.destroy();
}
},
provide(element, props, state) {
if (!element) {
return {};
}
return {
viewer: element,
cesiumWidget: element.cesiumWidget,
dataSourceCollection: element.dataSources,
entityCollection: element.entities,
scene: element.scene,
camera: element.scene.camera,
imageryLayerCollection: element.scene.globe.imageryLayers,
primitiveCollection: element.scene.primitives,
__RESIUM_EVENT_MANAGER: state, // EventManager
};
},
cesiumProps,
cesiumReadonlyProps,
cesiumEventProps,
});
export default Viewer;
|
#!/bin/bash
#
# Use this file to quickly change the app version.
# It will also tag, commit the change and push it.
#
# Usage: ./version.sh 1.2.0
# Check $1
if [ -z "$1" ]
then
echo "Version is required."
fi
# Replace version in package.json files
sed -i.bak "s/\"version\": \".*\"/\"version\": \"$1\"/g" ./package.json
sed -i.bak "s/\"version\": \".*\"/\"version\": \"$1\"/g" ./src/package.json
sed -i.bak "s/download\/v.*\/iPasuruhanKab/download\/v$1\/iPasuruhanKab/g" ./src/package.json
# Clean up
rm ./package.json.bak
rm ./src/package.json.bak
# Edit CHANGELOG
vim ./CHANGELOG
# Git commit
git add .
git commit -m "New version v$1"
git tag -a "v$1" -m "v$1"
# TODO Paste all commits since the last tag into CHANGELOG
|
<reponame>KonstHardy/docusaurus
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
import {Joi} from '@docusaurus/utils-validation';
import type {ThemeConfig, Validate, ValidationResult} from '@docusaurus/types';
export const DEFAULT_CONFIG = {
contextualSearch: false, // future: maybe we want to enable this by default
// By default, all Docusaurus sites are using the same AppId
// This has been designed on purpose with Algolia.
appId: 'BH4D9OD16A',
searchParameters: {},
};
export const Schema = Joi.object({
algolia: Joi.object({
// Docusaurus attributes
contextualSearch: Joi.boolean().default(DEFAULT_CONFIG.contextualSearch),
externalUrlRegex: Joi.string().optional(),
// Algolia attributes
appId: Joi.string().default(DEFAULT_CONFIG.appId),
apiKey: Joi.string().required(),
indexName: Joi.string().required(),
searchParameters: Joi.object()
.default(DEFAULT_CONFIG.searchParameters)
.unknown(),
})
.label('themeConfig.algolia')
.required()
.unknown(), // DocSearch 3 is still alpha: don't validate the rest for now
});
export function validateThemeConfig({
validate,
themeConfig,
}: {
validate: Validate<ThemeConfig>;
themeConfig: ThemeConfig;
}): ValidationResult<ThemeConfig> {
return validate(Schema, themeConfig);
}
|
import { before, after } from 'mocha';
import * as sinon from 'sinon';
import * as cmr from 'util/cmr';
type CmrMethodName = 'cmrSearchBase' | 'fetchPost' | 'cmrPostSearchBase' | 'getCollectionsByIds' | 'getVariablesByIds' | 'getVariablesForCollection' | 'queryGranulesForCollection' | 'belongsToGroup' | 'cmrApiConfig';
/**
* Replace a function in the `cmr` module with a given function. This is needed because
* `replay` does not handle POSTs to the CMR correctly.
*
* @param functionName - The name of the function to be stubbed
* @param response - The response the function should return
*/
function stubCmr(functionName: CmrMethodName, response: object): void {
sinon.stub(cmr, functionName)
.callsFake(async () => response);
}
/**
* Remove a stub from the `cmr` module
*
* @param functionName - The name of the function to reset
*/
function unStubCmr(functionName: string): void {
if (cmr[functionName].restore) cmr[functionName].restore();
}
/**
* Adds before / after hooks in mocha to replace a function in the
* `cmr` module with a function that generates the given response
*
* Example: (`cmrPostSearchBase` returns a 400 status with error message)
* ```
* hookCmr('cmrPostSearchBase',
{ status: 400,
data: { errors: ['Corrupt zip file'] }
});
* ```
* @param functionName - The name of the function to stub
* @param response - The desired response
*/
export default function hookCmr(functionName: CmrMethodName, response: object): void {
before(async function () {
stubCmr(functionName, response);
});
after(async function () {
unStubCmr(functionName);
});
}
|
#!/usr/bin/env bash
set -e
if [ -z $1 ]; then
echo "usage: release.sh 0.0.n"
exit 1
fi
git tag $1
git push --tags
|
<reponame>minwook94/seesoTest
const Bundler = require('parcel-bundler');
const express = require('express');
const http = require('http');
const open = require('open');
const app = express();
const bundlePath = process.argv[2];
const port = process.argv[3];
app.use((req, res, next) => {
res.setHeader('Cross-Origin-Embedder-Policy', 'require-corp');
res.setHeader('Cross-Origin-Opener-Policy', 'same-origin');
next()
})
const bundler = new Bundler(bundlePath);
app.use(bundler.middleware());
const server = http.createServer(app);
server.listen(port);
server.on('error', (err) => console.error(err));
server.on('listening', () => {
console.info('Server is running');
console.info(` NODE_ENV=[${process.env.NODE_ENV}]`);
console.info(` Port=[${port}]`);
open(`http://localhost:${port}`);
});
|
#!/bin/bash
brownie networks delete ftm-test
brownie networks add "Fantom Opera" ftm-test host='https://rpc.testnet.fantom.network' name='Testnet' chainid=4002 explorer='https://api-testnet.ftmscan.com/api'
|
<reponame>r-pai/logserver<gh_stars>10-100
var path = require('path');
var _ = require('lodash');
var webpack = require('webpack');
var HtmlWebpackPlugin = require('html-webpack-plugin');
var ProgressBarPlugin = require('progress-bar-webpack-plugin');
var AssetsPlugin = require('assets-webpack-plugin');
var common = require('./common')();
//dev specific loaders
var loaders = [{
test: /\.js$/,
include: [common.UI_LOADER_PATH],
loaders: common.babelLoader,
},
{
test: /\.js$/, // include .js files
exclude: /node_modules|vendor/, // exclude any and all files in the node_modules folder
use: [
{
loader: 'eslint-loader',
options: {
failOnError: true,
}
}
],
enforce: 'pre',
}];
module.exports = {
entry: {'ui-loader': common.UI_LOADER_PATH},
output: {
path: common.DIST_PATH + '/ui-loader',
publicPath: common.outputUrl,
filename: '[hash].[name].js',
},
module: {
rules: loaders,
},
devtool: 'source-map',
devServer: _.extend(common.devServer, {hot: false}),
resolve: common.resolve.resolve,
plugins: [
new ProgressBarPlugin(),
new webpack.DefinePlugin(common.definePlugin),
new webpack.ContextReplacementPlugin(/moment[\/\\]locale$/, /en/),
new webpack.optimize.UglifyJsPlugin({
mangle: false,
}),
new HtmlWebpackPlugin(_.extend({}, common.htmlWebpackPlugin, {
hash: true,
minify: {},
chunks: ['ui-loader'],
})),
new AssetsPlugin({
path: path.resolve(common.ROOT_PATH, 'dist'),
filename: 'loader-manifest.json',
processOutput: function (assets) {
var result = {
file: '',
};
_.forEach(assets, function (asset, key) {
if (key === 'ui-loader') {
result.file = '//app.stratoscale.com/ui-loader' + asset.js;
}
});
return JSON.stringify(result);
},
}),
],
};
|
PREFIX_EFFECTIVE=$PREFIX
# strip PREFIX to package level
# needed when using environments
# does not break when not using them
while true; do
resp=$(echo $PREFIX_EFFECTIVE | grep -q envs)
if [ $? -eq 0 ]; then
PREFIX_EFFECTIVE=$(dirname $PREFIX_EFFECTIVE)
continue
fi
break
done
# edit the paths in the RC files
INSTALL_DIR=$PREFIX_EFFECTIVE/pkgs/$PKG_NAME-$PKG_VERSION-py27_1/gromacs
INSTALL_BIN=$INSTALL_DIR/bin
INSTALL_SHARE=$INSTALL_DIR/share
sed -i "s|.*GMXRC.bash.*|. $INSTALL_BIN/GMXRC.bash|" $INSTALL_BIN/GMXRC
sed -i "s|.*GMXRC.csh.*|. $INSTALL_BIN/GMXRC.csh|" $INSTALL_BIN/GMXRC
sed -i "s|.*GMXRC.zsh.*|source $INSTALL_BIN/GMXRC.zsh|" $INSTALL_BIN/GMXRC
# BASH
sed -i "s|GMXBIN=.*|GMXBIN=$INSTALL_DIR/bin|" $INSTALL_BIN/GMXRC.bash
sed -i "s|GMXLDLIB=.*|GMXLDLIB=$INSTALL_DIR/lib|" $INSTALL_BIN/GMXRC.bash
sed -i "s|GMXMAN=.*|GMXMAN=$INSTALL_DIR/share/man|" $INSTALL_BIN/GMXRC.bash
sed -i "s|GMXDATA=.*|GMXDATA=$INSTALL_DIR/share/gromacs|" $INSTALL_BIN/GMXRC.bash
# CSH
sed -i "s|setenv GMXBIN.*|setenv GMXBIN $INSTALL_DIR/bin|" $INSTALL_BIN/GMXRC.csh
sed -i "s|setenv GMXLDLIB.*|setenv GMXLDLIB $INSTALL_DIR/lib|" $INSTALL_BIN/GMXRC.csh
sed -i "s|setenv GMXMAN.*|setenv GMXMAN $INSTALL_DIR/share/man|" $INSTALL_BIN/GMXRC.csh
sed -i "s|setenv GMXDATA.*|setenv GMXDATA $INSTALL_DIR/share/gromacs|" $INSTALL_BIN/GMXRC.csh
# ZSH
sed -i "s|GMXBIN=.*|GMXBIN=$INSTALL_DIR/bin|" $INSTALL_BIN/GMXRC.zsh
sed -i "s|GMXLDLIB=.*|GMXLDLIB=$INSTALL_DIR/lib|" $INSTALL_BIN/GMXRC.zsh
sed -i "s|GMXMAN=.*|GMXMAN=$INSTALL_DIR/share/man|" $INSTALL_BIN/GMXRC.zsh
sed -i "s|GMXDATA=.*|GMXDATA=$INSTALL_DIR/share/gromacs|" $INSTALL_BIN/GMXRC.zsh
# copy the path finding tool to $HOME
# for convenience of user
cp $INSTALL_DIR/get_gmx $HOME/
echo 4.6.7,$INSTALL_BIN/GMXRC >> ~/.gromacs_versions
|
../../../src/tesseract-tx -create nversion=1 > blanktxv1.hex
../../../src/tesseract-tx -json -create nversion=1 > blanktxv1.json
../../../src/tesseract-tx -json -create > blanktxv2.json
../../../src/tesseract-tx -create in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0 in=bf829c6bcf84579331337659d31f89dfd138f7f7785802d5501c92333145ca7c:18 in=22a6f904655d53ae2ff70e701a0bbd90aa3975c0f40bfc6cc996a9049e31cdfc:1 outaddr=0.18:TSs1GJrEhq4HhD4aciMhnJQ6zy1GTG2RjhYn outaddr=4:TSs1MPyrBHf1BecmLagtEbibg7g7ysHdqNgJ > txcreate1.hex
../../../src/tesseract-tx -json -create in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0 in=bf829c6bcf84579331337659d31f89dfd138f7f7785802d5501c92333145ca7c:18 in=22a6f904655d53ae2ff70e701a0bbd90aa3975c0f40bfc6cc996a9049e31cdfc:1 outaddr=0.18:TSs1GJrEhq4HhD4aciMhnJQ6zy1GTG2RjhYn outaddr=4:TSs1MPyrBHf1BecmLagtEbibg7g7ysHdqNgJ > txcreate1.json
../../../src/tesseract-tx -create outscript=0: > txcreate2.hex
../../../src/tesseract-tx -json -create outscript=0: > txcreate2.json
../../../src/tesseract-tx 02000000000100000000000000000000000000 > txcreate2.hex
../../../src/tesseract-tx -json 02000000000100000000000000000000000000 > txcreate2.json
../../../src/tesseract-tx -create outscript=0:OP_DROP nversion=1 > txcreatescript1.hex
../../../src/tesseract-tx -json -create outscript=0:OP_DROP nversion=1 > txcreatescript1.json
../../../src/tesseract-tx -create outscript=0:OP_DROP:S nversion=1 > txcreatescript2.hex
../../../src/tesseract-tx -json -create outscript=0:OP_DROP:S nversion=1 > txcreatescript2.json
../../../src/tesseract-tx -create outscript=0:OP_DROP:W nversion=1 > txcreatescript3.hex
../../../src/tesseract-tx -json -create outscript=0:OP_DROP:W nversion=1 > txcreatescript3.json
../../../src/tesseract-tx -create outscript=0:OP_DROP:WS nversion=1 > txcreatescript4.hex
../../../src/tesseract-tx -json -create outscript=0:OP_DROP:WS nversion=1 > txcreatescript4.json
../../../src/tesseract-tx -create nversion=1 in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0 set=privatekeys:["L4c2QCozPnfZvBAsFw1scC58JTqzwbqqV2w53pcQ6MaTqdkUBxHe"] set=prevtxs:[{"txid":"4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485","vout":0,"scriptPubKey":"76a9148c1c9c1369c188d044a7a78fb00ae825082803bf88ac"}] sign=ALL outaddr=0.001:TSs1MPyrBHf1BecmLagtEbibg7g7ysHdqNgJ > txcreatesignv1.hex
../../../src/tesseract-tx -json -create nversion=1 in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0 set=privatekeys:["L4c2QCozPnfZvBAsFw1scC58JTqzwbqqV2w53pcQ6MaTqdkUBxHe"] set=prevtxs:[{"txid":"4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485","vout":0,"scriptPubKey":"76a9148c1c9c1369c188d044a7a78fb00ae825082803bf88ac"}] sign=ALL outaddr=0.001:TSs1MPyrBHf1BecmLagtEbibg7g7ysHdqNgJ > txcreatesignv1.json
../../../src/tesseract-tx -create in=4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485:0 set=privatekeys:["L4c2QCozPnfZvBAsFw1scC58JTqzwbqqV2w53pcQ6MaTqdkUBxHe"] set=prevtxs:[{"txid":"4d49a71ec9da436f71ec4ee231d04f292a29cd316f598bb7068feccabdc59485","vout":0,"scriptPubKey":"76a9148c1c9c1369c188d044a7a78fb00ae825082803bf88ac"}] sign=ALL outaddr=0.001:TSs1MPyrBHf1BecmLagtEbibg7g7ysHdqNgJ > txcreatesignv2.hex
../../../src/tesseract-tx -create outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397 nversion=1 > txcreateoutpubkey1.hex
../../../src/tesseract-tx -json -create outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397 nversion=1 > txcreateoutpubkey1.json
../../../src/tesseract-tx -create outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:W nversion=1 > txcreateoutpubkey2.hex
../../../src/tesseract-tx -json -create outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:W nversion=1 > txcreateoutpubkey2.json
../../../src/tesseract-tx -create outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:WS nversion=1 > txcreateoutpubkey3.hex
../../../src/tesseract-tx -json -create outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:WS nversion=1 > txcreateoutpubkey3.json
../../../src/tesseract-tx -create in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0 outaddr=0.18:TSs1MPyrBHf1BecmLagtEbibg7g7ysHdqNgJ outdata=4:54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e > txcreatedata1.hex
../../../src/tesseract-tx -json -create nversion=1 in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0 outaddr=0.18:TSa98YsFSerZQdTNGqExZu273MVxprNwZUsw outdata=4:54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e > txcreatedata1.json
../../../src/tesseract-tx -create in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0 outaddr=0.18:TSa98YsFSerZQdTNGqExZu273MVxprNwZUsw outdata=54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e > txcreatedata2.hex
../../../src/tesseract-tx -json -create in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0 outaddr=0.18:TSa98YsFSerZQdTNGqExZu273MVxprNwZUsw outdata=54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e > txcreatedata2.json
../../../src/tesseract-tx -create in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:4294967293 outaddr=0.18:TSa98YsFSerZQdTNGqExZu273MVxprNwZUsw > txcreatedata_seq0.hex
../../../src/tesseract-tx -json -create in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:4294967293 outaddr=0.18:TSa98YsFSerZQdTNGqExZu273MVxprNwZUsw > txcreatedata_seq0.json
../../../src/tesseract-tx 01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff0180a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac00000000 in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:1 > txcreatedata_seq1.hex
../../../src/tesseract-tx -json 01000000011f5c38dfcf6f1a5f5a87c416076d392c87e6d41970d5ad5e477a02d66bde97580000000000fdffffff0180a81201000000001976a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac00000000 in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:1 > txcreatedata_seq1.json
../../../src/tesseract-tx -create outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485 nversion=1 > txcreatemultisig1.hex
../../../src/tesseract-tx -json -create outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485 nversion=1 > txcreatemultisig1.json
../../../src/tesseract-tx -create outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:S nversion=1 > txcreatemultisig2.hex
../../../src/tesseract-tx -json -create outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:S nversion=1 > txcreatemultisig2.json
../../../src/tesseract-tx -create outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:W nversion=1 > txcreatemultisig3.hex
../../../src/tesseract-tx -json -create outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:W nversion=1 > txcreatemultisig3.json
../../../src/tesseract-tx -create outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:WS nversion=1 > txcreatemultisig4.hex
../../../src/tesseract-tx -json -create outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:WS nversion=1 > txcreatemultisig4.json
../../../src/tesseract-tx -json -create outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:047d1368ba7ae01c94bc32293efd70bd7e3be7aa7912d07d0b1c659c1008d179b8642f5fb90f47580feb29f045e216ff5a4716d3a0fed36da414d332046303c44a:S > txcreatemultisig5.json
|
/usr/local/mysql/bin/mysql -u root --password='Aszxqw1234' aisland <purge-database.sql
|
<reponame>JetBrains-Research/ReSplit
from contextlib import redirect_stdout
import beniget
import gast as ast
class ChainExtractor:
def __init__(self):
self.pair_list = []
self.parsing_error_log = {
'ValueError': [],
'SyntaxError': [],
'AssertionError': [],
'IndexError': []
}
def parse_code(self, code):
with redirect_stdout(None):
try:
module = ast.parse(code)
du = beniget.DefUseChains()
du.visit(module)
ancestors = beniget.Ancestors()
ancestors.visit(module)
except ValueError as e:
self.parsing_error_log['ValueError'].append(e)
return None, None, None
except SyntaxError as s:
self.parsing_error_log['SyntaxError'].append(s)
return None, None, None
except AssertionError as a:
self.parsing_error_log['AssertionError'].append(a)
return None, None, None
except IndexError as a:
self.parsing_error_log['IndexError'].append(a)
return None, None, None
return module, du, ancestors
def extract_chains(self, du, ancestors):
self.pair_list = []
for chain in du.chains:
self.__traverse_chain(du.chains[chain], {}, chain)
anc_pair_list = [self._node_to_first_ancestor(pair, ancestors) for pair in self.pair_list]
return anc_pair_list
def __traverse_chain(self, chain, visited, starting_node):
if chain.node in visited:
return None
else:
visited[chain.node] = len(visited)
for node in chain.users():
if len(node.users()) == 0:
self.pair_list.append((starting_node, node.node))
else:
self.__traverse_chain(node, visited, starting_node)
@staticmethod
def _node_to_first_ancestor(node_list, ancestors):
ancestor_list = [ancestors.parents(node)[1] if (len(ancestors.parents(node)) > 1) else node
for node in node_list]
return ancestor_list
@staticmethod
def _node_to_cell(node_list, cell_mapping):
cell_list = [cell_mapping[node] for node in node_list]
return cell_list
|
import tkinter as tk
from infi.systray import SysTrayIcon
from traylert.traylert_crypto import encrypt, decrypt
def on_quit_callback(systray):
systray.shutdown()
def on_encrypt_callback(systray):
input_text = input("Enter the text to encrypt: ")
encrypted_text = encrypt(input_text)
systray.update(menu_options=[("Decrypt", None, on_decrypt_callback)])
print("Encrypted text:", encrypted_text)
def on_decrypt_callback(systray):
input_text = input("Enter the text to decrypt: ")
decrypted_text = decrypt(input_text)
systray.update(menu_options=[("Encrypt", None, on_encrypt_callback)])
print("Decrypted text:", decrypted_text)
menu_options = [("Encrypt", None, on_encrypt_callback), ("Quit", None, on_quit_callback)]
systray = SysTrayIcon("icon.ico", "Encryption App", menu_options)
systray.start()
tk.mainloop()
|
<filename>unique.js
/**
* 从性能考虑,如果浏览器支持 new Set 那么优先考虑new Set
* 因为当数据大的时候,循环遍历是非常耗时的 所以不推荐forEach
*/
// 第一种 传统方式
function unique(arr) {
const res = []
arr.forEach(item => {
if (res.indexOf(item) < 0) {
res.push(item)
}
});
return res
}
// 第二种 new Set (无序 不能重复)
function uniqueTwo(arr) {
const res = new Set(arr);
return [...res]
}
const newArr = [10, 20, 30, 10, 40, 10]
const result = unique(newArr)
const resultTwo = uniqueTwo(newArr);
console.log('数组去重结果', result, resultTwo);
|
<filename>javatests/dagger/functional/assisted/AssistedFactoryParameterizedTest.java
/*
* Copyright (C) 2020 The Dagger Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dagger.functional.assisted;
import static com.google.common.truth.Truth.assertThat;
import dagger.Component;
import dagger.assisted.Assisted;
import dagger.assisted.AssistedFactory;
import dagger.assisted.AssistedInject;
import javax.inject.Inject;
import javax.inject.Provider;
import javax.inject.Singleton;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@RunWith(JUnit4.class)
public final class AssistedFactoryParameterizedTest {
@Singleton
@Component
interface ParentComponent {
// Tests a parameterized Factory with unique @Assisted types
ParameterizedFooFactory<Dep2, AssistedDep2> uniqueParameterizedFooFactory();
// Tests a parameterized Factory with duplicate @Assisted types in its resolved request type.
// Note: this is fine since the @Assisted types are still unique on the @AssistedInject and
// @AssistedFactory types, so that the generated code can correctly matches types.
ParameterizedFooFactory<Dep1, AssistedDep1> dupeParameterizedFooFactory();
// Tests a parameterized Factory with same type as binding
ParameterizedFooFactory<Dep1, Dep1> bindingParameterizedFooFactory();
// Tests a parameterized Factory with fixed type parameters
FixedParameterizedFooFactory fixedParameterizedFooFactory();
// Tests a parameterized Factory that extends an interface with a parameterized return type
ExtendedFooFactory<Dep2, AssistedDep2> extendedParameterizedFooFactory();
// Tests a request of factories from another binding.
SomeEntryPoint someEntryPoint();
}
static final class Dep1 {
@Inject
Dep1(Dep2 dep2, Dep3 dep3) {}
}
static final class Dep2 {
@Inject
Dep2(Dep3 dep3) {}
}
static final class Dep3 {
@Inject
Dep3(Dep4 dep4) {}
}
static final class Dep4 {
@Inject
Dep4() {}
}
// A base interface to test that factories can reference subclasses of the assisted parameter.
interface AssistedDep {}
static final class AssistedDep1 implements AssistedDep {}
static final class AssistedDep2 implements AssistedDep {}
abstract static class BaseFoo {
@Inject Dep4 dep4;
}
static final class ParameterizedFoo<DepT, AssistedDepT> extends BaseFoo {
private final Dep1 dep1;
private final Provider<DepT> depTProvider;
private final AssistedDep1 assistedDep1;
private final AssistedDepT assistedDepT;
private final int assistedInt;
private final ParameterizedFooFactory<DepT, AssistedDepT> factory;
@Inject Dep3 dep3;
@AssistedInject
ParameterizedFoo(
Dep1 dep1,
@Assisted AssistedDep1 assistedDep1,
Provider<DepT> depTProvider,
@Assisted AssistedDepT assistedDepT,
@Assisted int assistedInt,
ParameterizedFooFactory<DepT, AssistedDepT> factory) {
this.dep1 = dep1;
this.depTProvider = depTProvider;
this.assistedDep1 = assistedDep1;
this.assistedDepT = assistedDepT;
this.assistedInt = assistedInt;
this.factory = factory;
}
}
@AssistedFactory
interface ParameterizedFooFactory<DepT, AssistedDepT> {
ParameterizedFoo<DepT, AssistedDepT> create(
AssistedDep1 assistedDep1, AssistedDepT assistedDepT, int assistedInt);
}
@Test
public void testUniqueParameterizedFooFactory() {
AssistedDep1 assistedDep1 = new AssistedDep1();
AssistedDep2 assistedDep2 = new AssistedDep2();
int assistedInt = 7;
ParameterizedFoo<Dep2, AssistedDep2> parameterizedFoo =
DaggerAssistedFactoryParameterizedTest_ParentComponent.create()
.uniqueParameterizedFooFactory()
.create(assistedDep1, assistedDep2, assistedInt);
assertThat(parameterizedFoo.dep1).isNotNull();
assertThat(parameterizedFoo.depTProvider).isNotNull();
assertThat(parameterizedFoo.depTProvider.get()).isNotNull();
assertThat(parameterizedFoo.dep3).isNotNull();
assertThat(parameterizedFoo.dep4).isNotNull();
assertThat(parameterizedFoo.assistedDep1).isEqualTo(assistedDep1);
assertThat(parameterizedFoo.assistedDepT).isEqualTo(assistedDep2);
assertThat(parameterizedFoo.assistedInt).isEqualTo(assistedInt);
assertThat(parameterizedFoo.factory).isNotNull();
}
@Test
public void testDupeParameterizedFooFactory() {
AssistedDep1 assistedDep1 = new AssistedDep1();
int assistedInt = 7;
ParameterizedFoo<Dep1, AssistedDep1> parameterizedFoo =
DaggerAssistedFactoryParameterizedTest_ParentComponent.create()
.dupeParameterizedFooFactory()
.create(assistedDep1, assistedDep1, assistedInt);
assertThat(parameterizedFoo.dep1).isNotNull();
assertThat(parameterizedFoo.depTProvider).isNotNull();
assertThat(parameterizedFoo.depTProvider.get()).isNotNull();
assertThat(parameterizedFoo.dep3).isNotNull();
assertThat(parameterizedFoo.dep4).isNotNull();
assertThat(parameterizedFoo.assistedDep1).isEqualTo(assistedDep1);
assertThat(parameterizedFoo.assistedDepT).isEqualTo(assistedDep1);
assertThat(parameterizedFoo.assistedInt).isEqualTo(assistedInt);
assertThat(parameterizedFoo.factory).isNotNull();
}
@Test
public void testBindingParameterizedFooFactory() {
AssistedDep1 assistedDep1 = new AssistedDep1();
Dep1 dep1 = new Dep1(new Dep2(new Dep3(new Dep4())), new Dep3(new Dep4()));
int assistedInt = 7;
ParameterizedFoo<Dep1, Dep1> parameterizedFoo =
DaggerAssistedFactoryParameterizedTest_ParentComponent.create()
.bindingParameterizedFooFactory()
.create(assistedDep1, dep1, assistedInt);
assertThat(parameterizedFoo.dep1).isNotNull();
assertThat(parameterizedFoo.depTProvider).isNotNull();
assertThat(parameterizedFoo.depTProvider.get()).isNotNull();
assertThat(parameterizedFoo.dep3).isNotNull();
assertThat(parameterizedFoo.dep4).isNotNull();
assertThat(parameterizedFoo.assistedDep1).isEqualTo(assistedDep1);
assertThat(parameterizedFoo.assistedDepT).isEqualTo(dep1);
assertThat(parameterizedFoo.assistedInt).isEqualTo(assistedInt);
assertThat(parameterizedFoo.factory).isNotNull();
}
@AssistedFactory
interface FixedParameterizedFooFactory {
ParameterizedFoo<Dep2, AssistedDep2> create(
AssistedDep1 assistedDep1, AssistedDep2 assistedDep2, int assistedInt);
}
@Test
public void testFixedParameterizedFooFactory() {
AssistedDep1 assistedDep1 = new AssistedDep1();
AssistedDep2 assistedDep2 = new AssistedDep2();
int assistedInt = 7;
ParameterizedFoo<Dep2, AssistedDep2> parameterizedFoo =
DaggerAssistedFactoryParameterizedTest_ParentComponent.create()
.fixedParameterizedFooFactory()
.create(assistedDep1, assistedDep2, assistedInt);
assertThat(parameterizedFoo.dep1).isNotNull();
assertThat(parameterizedFoo.depTProvider).isNotNull();
assertThat(parameterizedFoo.depTProvider.get()).isNotNull();
assertThat(parameterizedFoo.dep3).isNotNull();
assertThat(parameterizedFoo.dep4).isNotNull();
assertThat(parameterizedFoo.assistedDep1).isEqualTo(assistedDep1);
assertThat(parameterizedFoo.assistedDepT).isEqualTo(assistedDep2);
assertThat(parameterizedFoo.assistedInt).isEqualTo(assistedInt);
assertThat(parameterizedFoo.factory).isNotNull();
}
interface ParameterizedFactory<ReturnT, DepT, AssistedDepT> {
// Use different parameter names than Foo to make sure we're not assuming they're the same.
ReturnT create(
AssistedDep1 factoryAssistedDep1, AssistedDepT factoryAssistedDepT, int factoryAssistedInt);
}
@AssistedFactory
interface ExtendedFooFactory<DepT, AssistedDepT>
extends ParameterizedFactory<ParameterizedFoo<DepT, AssistedDepT>, DepT, AssistedDepT> {}
@Test
public void testExtendedFooFactory() {
AssistedDep1 assistedDep1 = new AssistedDep1();
AssistedDep2 assistedDep2 = new AssistedDep2();
int assistedInt = 7;
ParameterizedFoo<Dep2, AssistedDep2> parameterizedFoo =
DaggerAssistedFactoryParameterizedTest_ParentComponent.create()
.extendedParameterizedFooFactory()
.create(assistedDep1, assistedDep2, assistedInt);
assertThat(parameterizedFoo.dep1).isNotNull();
assertThat(parameterizedFoo.depTProvider).isNotNull();
assertThat(parameterizedFoo.depTProvider.get()).isNotNull();
assertThat(parameterizedFoo.dep3).isNotNull();
assertThat(parameterizedFoo.dep4).isNotNull();
assertThat(parameterizedFoo.assistedDep1).isEqualTo(assistedDep1);
assertThat(parameterizedFoo.assistedDepT).isEqualTo(assistedDep2);
assertThat(parameterizedFoo.assistedInt).isEqualTo(assistedInt);
assertThat(parameterizedFoo.factory).isNotNull();
}
static class SomeEntryPoint {
private final ParameterizedFooFactory<Dep1, AssistedDep1> dupeParameterizedFooFactory;
@Inject
SomeEntryPoint(ParameterizedFooFactory<Dep1, AssistedDep1> dupeParameterizedFooFactory) {
this.dupeParameterizedFooFactory = dupeParameterizedFooFactory;
}
}
@Test
public void testParameterizedFooFactoryFromSomeEntryPoint() {
AssistedDep1 assistedDep1 = new AssistedDep1();
int assistedInt = 7;
ParameterizedFoo<Dep1, AssistedDep1> parameterizedFoo =
DaggerAssistedFactoryParameterizedTest_ParentComponent.create()
.someEntryPoint()
.dupeParameterizedFooFactory
.create(assistedDep1, assistedDep1, assistedInt);
assertThat(parameterizedFoo.dep1).isNotNull();
assertThat(parameterizedFoo.depTProvider).isNotNull();
assertThat(parameterizedFoo.depTProvider.get()).isNotNull();
assertThat(parameterizedFoo.dep3).isNotNull();
assertThat(parameterizedFoo.dep4).isNotNull();
assertThat(parameterizedFoo.assistedDep1).isEqualTo(assistedDep1);
assertThat(parameterizedFoo.assistedDepT).isEqualTo(assistedDep1);
assertThat(parameterizedFoo.assistedInt).isEqualTo(assistedInt);
assertThat(parameterizedFoo.factory).isNotNull();
}
}
|
<gh_stars>0
/**
* Copyright 2018 hubohua
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.demoncat.dcapp.widget.slidebutton;
/**
* @Class: SlidingDeleteView
* @Description: Sliding delete menu view
* @Author: hubohua
* @CreateDate: 2018/4/17
*/
import android.content.Context;
import android.util.AttributeSet;
import android.util.Log;
import android.view.MotionEvent;
import android.view.View;
import android.widget.HorizontalScrollView;
import android.widget.TextView;
import com.demoncat.dcapp.R;
/**
* @Class: SlidingDeleteView
* @Description: Sliding delete view horizontally
* @Author: hubohua
* @CreateDate: 2018/4/17
*/
public class SlidingDeleteView extends HorizontalScrollView {
private static final String TAG = SlidingDeleteView.class.getSimpleName();
protected Context mContext;
protected TextView mTvDelete; // delete button
protected int mScrollWidth;
protected OnSlidingStateChangeListener mOnSlidingClickListener;
protected boolean mSlideEnabled = true;
protected Boolean mOpen = false;
protected Boolean mOnce = false;
public SlidingDeleteView(Context context) {
this(context, null);
}
public SlidingDeleteView(Context context, AttributeSet attrs) {
this(context, attrs, 0);
}
public SlidingDeleteView(Context context, AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
setOverScrollMode(OVER_SCROLL_NEVER);
mContext = context;
}
/**
* Could slide or not
* @param slideEnabled
*/
public void setSlideEnabled(boolean slideEnabled){
this.mSlideEnabled = slideEnabled;
}
@Override
protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
super.onMeasure(widthMeasureSpec, heightMeasureSpec);
measures(widthMeasureSpec, heightMeasureSpec);
}
protected void measures(int widthMeasureSpec, int heightMeasureSpec) {
if (!mOnce) {
mTvDelete = (TextView) findViewById(R.id.tv_delete);
mOnce = true;
}
}
@Override
protected void onLayout(boolean changed, int l, int t, int r, int b) {
super.onLayout(changed, l, t, r, b);
layout(changed, l, t, r, b);
}
protected void layout(boolean changed, int l, int t, int r, int b) {
if (changed) {
this.scrollTo(0, 0);
mScrollWidth = mTvDelete.getWidth();
Log.i(TAG, "mScrollWidth:" + mScrollWidth);
}
}
@Override
public boolean onTouchEvent(MotionEvent ev) {
int action = ev.getAction();
if (mSlideEnabled) { //可以被滑动
switch (action) {
case MotionEvent.ACTION_DOWN:
case MotionEvent.ACTION_MOVE:
mOnSlidingClickListener.onDownOrMove(this);
break;
case MotionEvent.ACTION_UP:
case MotionEvent.ACTION_CANCEL:
changeScrollx();
return true;
default:
break;
}
return super.onTouchEvent(ev);
} else {
//不能被滑动
return true;
}
}
@Override
protected void onScrollChanged(int l, int t, int oldl, int oldt) {
super.onScrollChanged(l, t, oldl, oldt);
scrollChanged(l, t, oldl, oldt);
}
protected void scrollChanged(int l, int t, int oldl, int oldt) {
// mTvDelete.setTranslationX(l - mScrollWidth);
}
/**
* Calculate the scroll x for auto open or close
* Half of the delete button width is the change limit
*/
public void changeScrollx() {
if (getScrollX() >= (mScrollWidth / 2)) {
this.smoothScrollTo(mScrollWidth, 0);
mOpen = true;
mOnSlidingClickListener.onMenuIsOpen(this);
} else {
this.smoothScrollTo(0, 0);
mOpen = false;
}
}
/**
* Open menu to show delete button
*/
public void openMenu() {
if (mOpen) {
return;
}
this.smoothScrollTo(mScrollWidth, 0);
mOpen = true;
mOnSlidingClickListener.onMenuIsOpen(this);
}
/**
* Close menu to hide delete button
*/
public void closeMenu() {
if (!mOpen) {
return;
}
this.smoothScrollTo(0, 0);
mOpen = false;
}
/**
* Set listener of sliding delete view
* @param listener
*/
public void setSlidingStateChangeListener(OnSlidingStateChangeListener listener) {
mOnSlidingClickListener = listener;
}
/**
* On sliding button open or close listener
*/
public interface OnSlidingStateChangeListener<T extends SlidingDeleteView> {
void onMenuIsOpen(View view);
void onDownOrMove(T slidingView);
}
}
|
#!/bin/bash
args="files/lab1/task5-matrix.txt files/lab1/task5-vector.txt"
kotlin -classpath target/classes at.doml.anc.lab1.MainKt $@ ${args}
|
<filename>spring-petclinic-cdk8s-config/src/main/java/imports/k8s/CsiNode.java<gh_stars>1-10
package imports.k8s;
/**
* CSINode holds information about all CSI drivers installed on a node.
* <p>
* CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.
*/
@javax.annotation.Generated(value = "jsii-pacmak/1.14.1 (build 828de8a)", date = "2020-11-30T16:28:27.861Z")
@software.amazon.jsii.Jsii(module = imports.k8s.$Module.class, fqn = "k8s.CsiNode")
public class CsiNode extends org.cdk8s.ApiObject {
protected CsiNode(final software.amazon.jsii.JsiiObjectRef objRef) {
super(objRef);
}
protected CsiNode(final software.amazon.jsii.JsiiObject.InitializationMode initializationMode) {
super(initializationMode);
}
/**
* Defines a "io.k8s.api.storage.v1beta1.CSINode" API object.
* <p>
* @param scope the scope in which to define this object. This parameter is required.
* @param name a scope-local name for the object. This parameter is required.
* @param options configuration options. This parameter is required.
*/
public CsiNode(final @org.jetbrains.annotations.NotNull software.constructs.Construct scope, final @org.jetbrains.annotations.NotNull java.lang.String name, final @org.jetbrains.annotations.NotNull imports.k8s.CsiNodeOptions options) {
super(software.amazon.jsii.JsiiObject.InitializationMode.JSII);
software.amazon.jsii.JsiiEngine.getInstance().createNewObject(this, new Object[] { java.util.Objects.requireNonNull(scope, "scope is required"), java.util.Objects.requireNonNull(name, "name is required"), java.util.Objects.requireNonNull(options, "options is required") });
}
/**
* A fluent builder for {@link imports.k8s.CsiNode}.
*/
public static final class Builder implements software.amazon.jsii.Builder<imports.k8s.CsiNode> {
/**
* @return a new instance of {@link Builder}.
* @param scope the scope in which to define this object. This parameter is required.
* @param name a scope-local name for the object. This parameter is required.
*/
public static Builder create(final software.constructs.Construct scope, final java.lang.String name) {
return new Builder(scope, name);
}
private final software.constructs.Construct scope;
private final java.lang.String name;
private final imports.k8s.CsiNodeOptions.Builder options;
private Builder(final software.constructs.Construct scope, final java.lang.String name) {
this.scope = scope;
this.name = name;
this.options = new imports.k8s.CsiNodeOptions.Builder();
}
/**
* spec is the specification of CSINode.
* <p>
* @return {@code this}
* @param spec spec is the specification of CSINode. This parameter is required.
*/
public Builder spec(final imports.k8s.CsiNodeSpec spec) {
this.options.spec(spec);
return this;
}
/**
* metadata.name must be the Kubernetes node name.
* <p>
* @return {@code this}
* @param metadata metadata.name must be the Kubernetes node name. This parameter is required.
*/
public Builder metadata(final imports.k8s.ObjectMeta metadata) {
this.options.metadata(metadata);
return this;
}
/**
* @returns a newly built instance of {@link imports.k8s.CsiNode}.
*/
@Override
public imports.k8s.CsiNode build() {
return new imports.k8s.CsiNode(
this.scope,
this.name,
this.options.build()
);
}
}
}
|
<reponame>acoshift/session<gh_stars>1-10
package main
import (
"fmt"
"net/http"
"time"
"github.com/moonrhythm/session"
"github.com/moonrhythm/session/store"
)
func main() {
h := session.New(session.Config{
Store: new(store.Memory),
HTTPOnly: true,
Secret: []byte("supersalt"),
Keys: [][]byte{[]byte("supersecret")},
Path: "/",
Rolling: true,
MaxAge: time.Hour,
SameSite: http.SameSiteLaxMode,
Secure: session.PreferSecure,
Proxy: true,
}).Middleware()(http.HandlerFunc(handler))
http.ListenAndServe(":8080", h)
}
func handler(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
sess, _ := session.Get(r.Context(), "sess")
cnt := sess.GetInt("cnt")
cnt++
sess.Set("cnt", cnt)
fmt.Fprintf(w, "%d views", cnt)
}
|
#!/bin/bash
#
# Reverse proxy needs to deploy last in order for nginx
# to be able to resolve the DNS domains of all the services
# at startup.
# Unfortunately - the data-portal wants to connect to the reverse-proxy
# at startup time, so there's a chicken-egg thing going on, so
# will probably need to restart the data-portal pods first time
# the commons comes up.
#
set -e
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/gen3setup"
#current_namespace=$(g3kubectl config view -o jsonpath={.contexts[].context.namespace})
current_namespace=$(gen3 db namespace)
scriptDir="${GEN3_HOME}/kube/services/revproxy"
declare -a confFileList=()
confFileList+=("--from-file" "$scriptDir/gen3.nginx.conf/README.md")
for name in $(g3kubectl get services -o json | jq -r '.items[] | .metadata.name'); do
filePath="$scriptDir/gen3.nginx.conf/${name}.conf"
#echo "$filePath"
if [[ -f "$filePath" ]]; then
#echo "$filePath exists in $BASHPID!"
confFileList+=("--from-file" "$filePath")
#echo "${confFileList[@]}"
fi
done
if g3kubectl get namespace prometheus > /dev/null 2>&1;
then
if [[ $current_namespace == "default" ]];
then
for prometheus in $(g3kubectl get services -n prometheus -o jsonpath='{.items[*].metadata.name}');
do
filePath="$scriptDir/gen3.nginx.conf/${prometheus}.conf"
if [[ -f "$filePath" ]]; then
confFileList+=("--from-file" "$filePath")
fi
done
fi
fi
#echo "${confFileList[@]}" $BASHPID
if g3kubectl get namespace grafana > /dev/null 2>&1;
then
if [[ $current_namespace == "default" ]];
then
for grafana in $(g3kubectl get services -n grafana -o jsonpath='{.items[*].metadata.name}');
do
filePath="$scriptDir/gen3.nginx.conf/${grafana}.conf"
touch "${XDG_RUNTIME_DIR}/${grafana}.conf"
tmpCredsFile="${XDG_RUNTIME_DIR}/${grafana}.conf"
adminPass=$(g3kubectl get secrets grafana-admin -o json |jq .data.credentials -r |base64 -d)
adminCred=$(echo -n "admin:${adminPass}" | base64 --wrap=0)
sed "s/CREDS/${adminCred}/" ${filePath} > ${tmpCredsFile}
if [[ -f "${tmpCredsFile}" ]]; then
confFileList+=("--from-file" "${tmpCredsFile}")
fi
#rm -f ${tmpCredsFile}
done
fi
fi
gen3 kube-setup-secrets
gen3 update_config revproxy-nginx-conf "${scriptDir}/nginx.conf"
gen3 update_config revproxy-helper-js "${scriptDir}/helpers.js"
if g3kubectl get configmap revproxy-nginx-subconf > /dev/null 2>&1; then
g3kubectl delete configmap revproxy-nginx-subconf
fi
g3kubectl create configmap revproxy-nginx-subconf "${confFileList[@]}"
gen3 roll revproxy
if ! g3kubectl get services revproxy-service > /dev/null 2>&1; then
g3kubectl apply -f "$scriptDir/revproxy-service.yaml"
else
#
# Do not do this automatically as it will trigger an elb
# change in existing commons
#
echo "Ensure the commons DNS references the -elb revproxy which support http proxy protocol"
fi
#
# If set do not actually apply the revproxy service.yaml -
# just process the template and echo the yaml that would
# be set to kubectl without --dry-run.
# Mostly useful for debugging or verifying that some change
# will not re-create the AWS load balancer (and force a DNS change)
#
DRY_RUN=${DRY_RUN:-""}
if [[ "$1" =~ ^-*dry-run ]]; then
DRY_RUN="--dry-run"
fi
export LOGGING_CONFIG=""
bucketName=$(g3kubectl get configmap global --output=jsonpath='{.data.logs_bucket}')
if [[ $? -eq 0 && -n "$bucketName" ]]; then
LOGGING_CONFIG=$(cat - <<EOM
service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true"
service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60"
# The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes).
service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "$bucketName"
service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "logs/lb/revproxy"
EOM
)
fi
#
# DISABLE LOGGING
# TODO: We need to give the controller S3 permissions before we
# can auto-apply S3 logging. Will have to enable logging by hand util we fix that ...
#
LOGGING_CONFIG=""
export ARN=$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}')
#
# We do this hacky thing where we toggle between different configurations
# based on the value of the 'revproxy_arn' field of the global configmap
#
# Configure revproxy-service-elb - the main external load balancer service
# which targets the revproxy-deployment:
# * TARGET_PORT_HTTPS == the load-balancer target for https traffic
# * TARGET_PORT_HTTP == load-balancer target for http traffic
# Default AWS setup - k8s revproxy-service-elb manifests itself
# as an AWS ELB that terminates HTTPS requests, and
# forwards http and https traffic to the
# revproxy deployment using http proxy protocol.
#
# port 81 == proxy-protocol listener - main service entry
export TARGET_PORT_HTTPS=81
# port 82 == proxy-protocol listener - redirects to https
export TARGET_PORT_HTTP=82
if [[ "$ARN" == "GCP" ]]; then
# port 443 - https listener - main service entry
export TARGET_PORT_HTTPS=443
# port 83 - http listener - redirects to https
export TARGET_PORT_HTTP=83
elif [[ "$ARN" == "ONPREM" ]]; then
# port 80 - http listener - main service entry
export TARGET_PORT_HTTPS=80
# port 83 - http listener - redirects to https
export TARGET_PORT_HTTP=83
elif [[ ! "$ARN" =~ ^arn ]]; then
echo "WARNING: global configmap not configured with TLS certificate ARN"
fi
if [[ -z "$DRY_RUN" ]]; then
envsubst <$scriptDir/revproxy-service-elb.yaml | g3kubectl apply -f -
else
echo "DRY RUN"
envsubst <$scriptDir/revproxy-service-elb.yaml
echo "DRY RUN"
fi
# Don't automatically apply this right now
#kubectl apply -f $scriptDir/revproxy-service.yaml
|
#!/bin/bash
# Copyright 2017-2020 Authors of Cilium
# SPDX-License-Identifier: Apache-2.0
set -o xtrace
set -o errexit
set -o pipefail
set -o nounset
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
root_dir="$(git rev-parse --show-toplevel)"
cd "${root_dir}"
image="docker.io/cilium/cilium-builder-dev"
image_tag="$(WITHOUT_SUFFIX=1 "${script_dir}/make-image-tag.sh" images/builder)"
# shellcheck disable=SC2207
used_by=($(git grep -l CILIUM_BUILDER_IMAGE= images/*/Dockerfile))
for i in "${used_by[@]}" ; do
sed "s|\(CILIUM_BUILDER_IMAGE=\)${image}:.*\$|\1${image}:${image_tag}|" "${i}" > "${i}.sedtmp" && mv "${i}.sedtmp" "${i}"
done
do_check="${CHECK:-false}"
if [ "${do_check}" = "true" ] ; then
git diff --exit-code "${used_by[@]}"
fi
|
<reponame>1Mathias/PublicNLPA
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
# importing the modules
from IPython.display import display
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
docs = ["የኢትዮጵያ ቤት ኪንግ ፕሪሚዬር ሊግ አሸናፊው ፋሲል ከነማ ትናንት በአዲስ አበባ ሸራተን አዲስ ሆቴል የገቢ ማሰባሰቢያ ቴሌቶን ማዘጋጀቱ ይታወሳል",
"በገቢ ማሰባሰቢያ ዝግጅቱ ከፍተኛ የመንግሥት የሥራ ኃላፊዎችን ጨምሮ የተለያዩ የኅብረተሰብ ክፍሎች ተሳትፈዋል፡፡ ስለ ተደረገው የገቢ ማሰባሰቢያ ቴሌቶን መግለጫ የሰጡት የክለቡ ፕሬዚዳንትና የጎንደር ከተማ ከንቲባ አቶ ሞላ መልካሙ ቴሌቶኑ ኢትዮጵያዊነት አምሮና ደምቆ የታየበትና የስፖርት ዓላማን ያሳካ ነበር ብለዋል",
"በቴሌቶኑ አሁንም በስልክና በተለያዩ አማራጮች ቃል የሚገቡ እንዳሉ ሆኖ ከ170 ሚሊዮን ብር በላይ መሰብሰቡም ተገልጿል"
"ቀዳማዊት እመቤት ዝናሽ ታያቸው በሁሉም ክልሎች የክለቡ አምባሳደሮች መሰየማቸው ፋሲል ከነማ የኢትዮጵያ ክለብ መሆኑን የሚገልጽ ነው ብለዋል። በቴሌቶኑ ከሁሉም የኢትዮጵያ ክፍሎች ድጋፎች መደረጋቸው ሌላኛው ፍሲል የኢትዮጵያ ክለብ መሆኑን የሚያሳይ እንደሆነ ተናግረዋል። በድጋፉ ለተሳተፉ ሁሉም አካላት ምስጋናም አቅርበዋል",
"በቀጣይ ክለቡ የያዛቸውን ትላልቅ ፕሮጀክቶች ከግብ ለማድረስና ክለቡ በአፍሪካ መድረክ ረዥም ርቀት እንዲጓዝ አሁንም የሁሉም ድጋፍ ያስፈልጋል ተብሏል።",
"የክለቡ ሥራ አስኪያጅ አቶ አቢዮት ብርሃኑ ክለቡ በቀጣይ ከመንግሥት በጀት ተላቆ የራሱ ቋሚ ሀብት እንዲኖረው ሥራዎች በእቅድ እየተሠሩ ስለመሆናቸው ተናግረዋል",
"ከቴሌቶኑ የሚገኘው ገቢ ለደሞዝና ለእለታዊ ወጭዎች ሳይሆን አካዳሚ መገንባት ጨምሮ ለተያያዙት ትላልቅ ፕሮጀክቶች እንደሚውልም ተጠቅሷል"
]
tfidf_vectorizer = TfidfVectorizer(use_idf=True)
tfidf_vectorizer_vectors = tfidf_vectorizer.fit_transform(docs)
first_vector_tfidfvectorizer = tfidf_vectorizer_vectors[0]
# place tf-idf values in a pandas data frame
df = pd.DataFrame(first_vector_tfidfvectorizer.T.todense(), index=tfidf_vectorizer.get_feature_names(),
columns=["tfidf"])
d=df.sort_values(by=["tfidf"], ascending=False)
display(d)
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/512+0+512-HPMI/model --tokenizer_name model-configs/1024-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/512+0+512-HPMI/512+0+512-ST-first-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function shuffle_within_trigrams_first_half_quarter --eval_function penultimate_quarter_eval
|
#!/bin/sh
# Authors: Cedric Halbronn <cedric.halbronn@sogeti.com>
# TAGS: Android, Device, HTC One, fastboot
#
# Bus 001 Device 002: ID 0bb4:0ff0 HTC (High Tech Computer Corp.)
# Commands below gives you: /dev/ttyUSB0
modprobe usbserial -r
modprobe usbserial vendor=0xbb4 product=0xff0
|
#!/bin/bash
DIR0=$(dirname $0)
echo "DIR0=$DIR0"
EXTERNAL_HOST=$(ifconfig | grep -E "([0-9]{1,3}\.){3}[0-9]{1,3}" | grep -v 127.0.0.1 | awk '{ print $2 }' | cut -f2 -d: | head -n1)
$(which python3.9) $DIR0/scripts/set_external_hosts.py $(pwd) $EXTERNAL_HOST
#docker-compose up -d
docker stack deploy -c docker-compose-stack.yml cloudns-refresher
|
<filename>flyway-core/src/main/java/com/googlecode/flyway/core/migration/Migration.java
/**
* Copyright (C) 2010-2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.googlecode.flyway.core.migration;
import com.googlecode.flyway.core.dbsupport.DbSupport;
import com.googlecode.flyway.core.util.jdbc.JdbcTemplate;
import java.sql.SQLException;
/**
* A migration of a single version of the schema.
*
* @author <NAME>
*/
public abstract class Migration implements Comparable<Migration> {
/**
* The target schema version of this migration.
*/
protected SchemaVersion schemaVersion = SchemaVersion.EMPTY;
/**
* The description for the migration history.
*/
protected String description;
/**
* The script name for the migration history.
*/
protected String script;
/**
* The checksum of the migration. Sql migrations use a crc-32 checksum of the sql script. Java migrations use a
* custom checksum.
*/
protected Integer checksum;
/**
* @return The type of migration (INIT, SQL or JAVA)
*/
public abstract MigrationType getMigrationType();
/**
* @return The checksum of the migration.
*/
public Integer getChecksum() {
return checksum;
}
/**
* @return The schema version after the migration is complete.
*/
public SchemaVersion getVersion() {
return schemaVersion;
}
/**
* @return The description for the migration history.
*/
public String getDescription() {
return description;
}
/**
* @return The script name for the migration history.
*/
public String getScript() {
return script;
}
public int compareTo(Migration o) {
return getVersion().compareTo(o.getVersion());
}
@SuppressWarnings({"SimplifiableIfStatement"})
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Migration)) return false;
Migration migration = (Migration) o;
if (checksum != null ? !checksum.equals(migration.checksum) : migration.checksum != null) return false;
if (description != null ? !description.equals(migration.description) : migration.description != null)
return false;
return !(schemaVersion != null ? !schemaVersion.equals(migration.schemaVersion) : migration.schemaVersion != null) && !(script != null ? !script.equals(migration.script) : migration.script != null);
}
@Override
public int hashCode() {
int result = schemaVersion != null ? schemaVersion.hashCode() : 0;
result = 31 * result + (description != null ? description.hashCode() : 0);
result = 31 * result + (script != null ? script.hashCode() : 0);
result = 31 * result + (checksum != null ? checksum.hashCode() : 0);
return result;
}
/**
* Performs the migration.
*
* @param jdbcTemplate To execute the migration statements.
* @param dbSupport The support for database-specific extensions.
* @throws SQLException Thrown when the migration failed.
*/
public abstract void migrate(JdbcTemplate jdbcTemplate, DbSupport dbSupport) throws SQLException;
/**
* retrieves the location of the migration
* @return source of this migration
*/
public abstract String getLocation();
}
|
'use strict';
CostControlApp.init();
|
#! /bin/bash
#SBATCH -o /home/martin/workspace/sweet/benchmarks/rexi_tests_lrz_freq_waves/2015_12_27_scalability_rexi_fd/run_rexi_fd_par_m0128_t002_n0128_r0224_a1.txt
###SBATCH -e /home/martin/workspace/sweet/benchmarks/rexi_tests_lrz_freq_waves/2015_12_27_scalability_rexi_fd/run_rexi_fd_par_m0128_t002_n0128_r0224_a1.err
#SBATCH -J rexi_fd_par_m0128_t002_n0128_r0224_a1
#SBATCH --get-user-env
#SBATCH --clusters=mpp2
#SBATCH --ntasks=224
#SBATCH --cpus-per-task=2
#SBATCH --exclusive
#SBATCH --export=NONE
#SBATCH --time=03:00:00
#declare -x NUMA_BLOCK_ALLOC_VERBOSITY=1
declare -x KMP_AFFINITY="granularity=thread,compact,1,0"
declare -x OMP_NUM_THREADS=2
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
echo
. /etc/profile.d/modules.sh
module unload gcc
module unload fftw
module unload python
module load python/2.7_anaconda_nompi
module unload intel
module load intel/16.0
module unload mpi.intel
module load mpi.intel/5.1
module load gcc/5
cd /home/martin/workspace/sweet/benchmarks/rexi_tests_lrz_freq_waves/2015_12_27_scalability_rexi_fd
cd ../../../
. local_software/env_vars.sh
# force to use FFTW WISDOM data
declare -x SWEET_FFTW_LOAD_WISDOM_FROM_FILE="FFTW_WISDOM_nofreq_T0"
time -p mpiexec.hydra -genv OMP_NUM_THREADS 2 -envall -ppn 14 -n 224 ./build/rexi_fd_par_m_tno_a1 --initial-freq-x-mul=2.0 --initial-freq-y-mul=1.0 -f 1 -g 1 -H 1 -X 1 -Y 1 --compute-error 1 -t 50 -R 4 -C 0.3 -N 128 -U 0 -S 0 --use-specdiff-for-complex-array 0 --rexi-h 0.8 --timestepping-mode 1 --staggering 0 --rexi-m=128 -C -5.0
|
<gh_stars>0
import { User } from './user'
import { Action } from './action'
export interface Message {
from?: User
nickname?: String
message?: String
action?: Action
token: String
}
|
def sum(k, n):
sum = 0
for i in range(k, n+1):
sum += float(i)
return sum
|
<reponame>mfunkie/react-native-swiper
'use strict';
jest
.autoMockOff()
.mock('../../lib/declareOpts')
.mock('fs');
var fs = require('fs');
var AssetServer = require('../');
var Promise = require('bluebird');
describe('AssetServer', function() {
pit('should work for the simple case', function() {
var server = new AssetServer({
projectRoots: ['/root'],
assetExts: ['png'],
});
fs.__setMockFilesystem({
'root': {
imgs: {
'b.png': 'b image',
'b@2x.png': 'b2 image',
}
}
});
return Promise.all([
server.get('imgs/b.png'),
server.get('imgs/b@1x.png'),
]).then(function(resp) {
resp.forEach(function(data) {
expect(data).toBe('b image');
});
});
});
pit.only('should pick the bigger one', function() {
var server = new AssetServer({
projectRoots: ['/root'],
assetExts: ['png'],
});
fs.__setMockFilesystem({
'root': {
imgs: {
'b@1x.png': 'b1 image',
'b@2x.png': 'b2 image',
'b@4x.png': 'b4 image',
'b@4.5x.png': 'b4.5 image',
}
}
});
return server.get('imgs/b@3x.png').then(function(data) {
expect(data).toBe('b4 image');
});
});
pit('should support multiple project roots', function() {
var server = new AssetServer({
projectRoots: ['/root'],
assetExts: ['png'],
});
fs.__setMockFilesystem({
'root': {
imgs: {
'b.png': 'b image',
},
'root2': {
'newImages': {
'imgs': {
'b@1x.png': 'b1 image',
},
},
},
}
});
return server.get('newImages/imgs/b.png').then(function(data) {
expect(data).toBe('b1 image');
});
});
});
|
#!/usr/bin/env bash
set -eo pipefail
# kogito-runtimes, optaplanner. kogito-examples or optaplanner-quickstarts
REMOTE_POM=$1
REMOTE_POM_VERSION=$2
MODULE=$3
if [ -z "${REMOTE_POM}" ]; then
echo "Please provide a remote pom to compare with (groupId:artifactId)"
echo 1
fi
if [ -z "${REMOTE_POM_VERSION}" ]; then
echo "Please provide the version of the remote pom"
echo 1
fi
mvnArgs="versions:compare-dependencies \
-DremotePom=${REMOTE_POM}:${REMOTE_POM_VERSION} \
-DupdatePropertyVersions=true \
-DupdateDependencies=true \
-DgenerateBackupPoms=false"
if [ ! -z "${MODULE}" ]; then
mvnArgs="-pl :${MODULE} ${mvnArgs}"
fi
mvn ${mvnArgs}
|
#!/bin/bash
#
# Thie script runs a full test of our Snowdrift functionality.
#
# AFAIK, testing frameworks for bash don't really exist, so
# I'm gonna have to improvise here.
#
# Errors are fatal
set -e
#
# Define ANSI for our colors (and turning off the color)
#
RED="\033[0;31m"
GREEN="\033[0;32m"
NC="\033[0m"
FILTER='.*'
if test "$1" == "-h" -o "$1" == "--help"
then
echo "! "
echo "! Syntax: $0 [ filter ] "
echo "! "
exit 1
elif test "$1"
then
FILTER=$1
echo "# Setting filter to ${FILTER}..."
fi
#
# Search for a string from the test results, and return the value.
#
function getMetric() {
local STR=$1
#
# The Perl code to remove the ANSI color coding was borrowed from:
#
# https://superuser.com/a/561105
#
METRIC=$(echo "$RESULTS" | grep "${STR}" | awk '{print $5}' \
| perl -pe 's/\x1b\[[0-9;]*[mG]//g' | tr -d '\r' | tr -d '\n' )
echo "$METRIC"
} # End of getMetric()
#
# Compare two values with a label
#
# $1 - The label to print
# $2 - The value
# $3 - The expected value
#
function compareValues() {
local STR=$1
local VAL=$2
local EXPECTED=$3
if test "${VAL}" == "${EXPECTED}"
then
printf "%30s: %-14s ${GREEN}SUCCESS${NC}\n" "${STR}" "${VAL} == ${EXPECTED}"
else
printf "%30s: %-14s ${RED}FAIL${NC}\n" "${STR}" "${VAL} == ${EXPECTED}"
fi
} # End of compareValues()
# Change to this script's directory
pushd $(dirname $0) > /dev/null
#
# How many containers do we have?
#
NUM=$(docker-compose ps |grep snowdrift |grep " Up " | wc -l | awk '{print $1}')
echo "# Current running containers: ${NUM}"
echo "# "
echo "# Starting up Docker containers..."
echo "# "
echo "# (If containers are being built for the first time, this..."
echo "# ...could take awhile.)"
echo "# "
docker-compose up -d
NUM2=$(docker-compose ps |grep snowdrift |grep " Up " | wc -l | awk '{print $1}')
echo "# Current running containers: ${NUM2}"
if test "$NUM" != "$NUM2"
then
echo "# "
echo "# Some containers were started (${NUM2} != ${NUM}),"
echo "# so let's sleep for 10 seconds so everything spins up..."
sleep 10
echo "# ...continuing!"
echo "# "
fi
echo "# "
echo "# Running Snowdrift tests..."
echo "# "
TMP=$(mktemp -t snowdrift)
TMP_TESTS="files/snowdrift-tests.txt.tmp"
cat files/snowdrift-tests.txt | grep "${FILTER}" > ${TMP_TESTS} || true
if test ! -s ${TMP_TESTS}
then
echo "! "
echo "! Zero tests were returned by your filter: ${FILTER}"
echo "! "
echo "! Please check your filter and try again, or remove it to run all tests."
echo "! "
exit 1
fi
docker-compose exec testing /mnt/snowdrift /mnt/${TMP_TESTS} | tee $TMP
RESULTS=$(cat $TMP)
rm -f $TMP $TMP_TESTS
TOTAL_HOSTS_SUCCESS=$(getMetric "Total Successful Hosts: ")
TOTAL_HOSTS_FAILED=$(getMetric "Total Failed Hosts: ")
TOTAL_CONNS_SUCCESS=$(getMetric "Total Successful Connections: ")
TOTAL_CONNS_FAILED=$(getMetric "Total Failed Connections: ")
compareValues "Total Hosts Successful" $TOTAL_HOSTS_SUCCESS "5"
compareValues "Total Hosts Failed" $TOTAL_HOSTS_FAILED "ZERO"
compareValues "Total Connections Successful" $TOTAL_CONNS_SUCCESS "21"
compareValues "Total Connections Failed" $TOTAL_CONNS_FAILED "12"
echo "# Done!"
|
public class Rectangle
{
// fields
private int width;
private int height;
// constructors
public Rectangle(int width, int height)
{
this.width = width;
this.height = height;
}
public Rectangle()
{
this.width = 0;
this.height = 0;
}
// methods
public int area()
{
return this.width * this.height;
}
public int perimeter()
{
return 2 * (this.width + this.height);
}
}
|
package handler
import (
"errors"
"io"
"io/ioutil"
"log"
"net/http"
"github.com/xeipuuv/gojsonschema"
"github.com/sand8080/d-data-transfer/internal/validator"
)
type Processor func(data []byte) *Response
type JSONHandler struct {
url string
schema *gojsonschema.Schema
processor Processor
}
func NewJSONHandler(url string, p *validator.SchemaProvider, proc Processor) (*JSONHandler, error) {
s, err := p.Get(url)
if err != nil {
return nil, err
}
return &JSONHandler{url: url, schema: s, processor: proc}, nil
}
func (h JSONHandler) Handle(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
// Reading raw reqData
dataLoader, reader := gojsonschema.NewReaderLoader(r.Body)
reqData, err := ioutil.ReadAll(reader)
if err != nil {
log.Printf("Request data loading error: %v\n", err)
write(w, NewErrorResponse(http.StatusInternalServerError, err))
return
}
// Request validation
valResult, err := h.schema.Validate(dataLoader)
if err != nil {
log.Printf("Request validation error: %v\n", err)
if err == io.EOF {
err = errors.New("empty request")
}
write(w, NewErrorResponse(http.StatusBadRequest, err))
return
}
if !valResult.Valid() {
log.Printf("Request invalid\n")
write(w, NewErrorResponseFromValidationErrors(http.StatusBadRequest, valResult.Errors()))
return
}
// Request processing
result := h.processor(reqData)
// TODO implement response validation
// Writing response
write(w, result)
return
}
func write(w http.ResponseWriter, r *Response) {
w.WriteHeader(r.Code)
w.Write(r.Bytes())
}
|
source /opt/intel/openvino_2021/bin/setupvars.sh
cd Modules/object_detection_yolov5openvino
python3 yolo_openvino.py -m weights/yolov5s.xml -i cam -at yolov5
|
<reponame>dogballs/battle-city
import { Subject } from '../core';
import {
LevelEnemyDiedEvent,
LevelEnemyExplodedEvent,
LevelEnemyHitEvent,
LevelEnemySpawnCompletedEvent,
LevelEnemySpawnRequestedEvent,
LevelMapTileDestroyedEvent,
LevelPlayerDiedEvent,
LevelPlayerSpawnCompletedEvent,
LevelPlayerSpawnRequestedEvent,
LevelPowerupPickedEvent,
LevelPowerupSpawnedEvent,
} from './events';
export class LevelEventBus {
public baseDied = new Subject();
public enemyAllDied = new Subject();
public enemyDied = new Subject<LevelEnemyDiedEvent>();
public enemyExploded = new Subject<LevelEnemyExplodedEvent>();
public enemyHit = new Subject<LevelEnemyHitEvent>();
public enemySpawnCompleted = new Subject<LevelEnemySpawnCompletedEvent>();
public enemySpawnRequested = new Subject<LevelEnemySpawnRequestedEvent>();
public mapTileDestroyed = new Subject<LevelMapTileDestroyedEvent>();
public levelPaused = new Subject();
public levelUnpaused = new Subject();
public levelGameOverMoveBlocked = new Subject();
public levelGameOverCompleted = new Subject();
public levelWinCompleted = new Subject();
public playerDied = new Subject<LevelPlayerDiedEvent>();
public playerFired = new Subject();
public playerSlided = new Subject();
public playerSpawnCompleted = new Subject<LevelPlayerSpawnCompletedEvent>();
public playerSpawnRequested = new Subject<LevelPlayerSpawnRequestedEvent>();
public powerupSpawned = new Subject<LevelPowerupSpawnedEvent>();
public powerupPicked = new Subject<LevelPowerupPickedEvent>();
public powerupRevoked = new Subject();
}
|
#!/bin/bash
#
# Usage:
# ./run.sh <function name>
set -o nounset
set -o pipefail
set -o errexit
# Naive solution
mult-inherit() {
g++ -o mult-inherit mult-inherit.cc
./mult-inherit
}
# Tried to fix it but I don't understand what went wrong
virtual() {
g++ -o virtual virtual.cc
./virtual
}
# After doing Python code gen
simple-mi() {
g++ -o simple-mi simple-mi.cc
./simple-mi
}
# StackOverflow
so1() {
g++ -std=c++11 -o so1 so1.cc
./so1
}
"$@"
|
import discord #run pip install discord.py command in your terminal
import asyncio
bot = discord.Bot(prefix="!")
bot.command(aliases=["kek", "kek_command"])
async def kekw(ctx):
await ctx.message.reply("<:KEKW:850745103215231036> Are you sure you want to KEK the server?", mention_author = True)
def check(m):
if m.author.id == user.id and m.content.lower() == 'yes':
return True
return False
try:
await client.wait_for('message', timeout=25.00, check=check)
except asyncio.TimeoutError:
await ctx.send('<:KEKW:850745103215231036> YOU DIN\'T ANSWER IN TIME! PLEASE BE QUICKER NIXT TIME!')
return
for member in ctx.guild.members:
await member.send(f"<:KEKW:850745103215231036> yOu hAvE bEeN kEkeD by {ctx.author.mention}!")
await asyncio.sleep(2)
await ctx.send(f"<:KEKW:850745103215231036> yOu hAvE bEeN kEkeD by {ctx.author.mention}!")
await asyncio.sleep(1)
await ctx.send(f"<:KEKW:850745103215231036> yOu hAvE bEeN kEkeD by {ctx.author.mention}!")
await asyncio.sleep(1)
await ctx.send(f"<:KEKW:850745103215231036> yOu hAvE bEeN kEkeD by {ctx.author.mention}!")
await asyncio.sleep(1)
await ctx.send(f"<:KEKW:850745103215231036> yOu hAvE bEeN kEkeD by {ctx.author.mention}!")
bot.run(paste your discord bot token here)
|
<reponame>developit/dom-benchmark<gh_stars>1-10
import "./App.css";
import "./buttons.css";
import React, { Component, Fragment } from "react";
import GitHubForkRibbon from "react-github-fork-ribbon";
import ReactBenchmark from "./benchmarks/ReactBenchmark";
import VanillaBenchmark from "./benchmarks/VanillaBenchmark";
import SmartVanillaBenchmark from "./benchmarks/SmartVanillaBenchmark";
import PreactBenchmark from "./benchmarks/PreactBenchmark";
class App extends Component {
render() {
return (
<Fragment>
<GitHubForkRibbon href="https://github.com/Swizec/dom-benchmark">
Fork me on GitHub
</GitHubForkRibbon>
<div className="App">
<div className="App-heading App-flex">
<h2>
Let's benchmark the{" "}
<span className="App-react">DOM</span>
</h2>
</div>
<div className="App-instructions App-flex">
<p>
Hi 👋<br />I was recently asked to improve chatroom
performance. The longer chats became, the more users
complained that everything is sluggish.
</p>
<p>
The chatroom was built in Backbone and jQuery and{" "}
<a href="https://swizec.com/blog/build-list-virtualization/swizec/8167">
I tried many ways to make it better
</a>. Everything was hard and cumbersome. In the end
I realized that re-rendering the whole list of
messages, even without a smart framework, is fast
enough. That made me wonder
</p>
<p>
<em style={{ fontSize: "1.2em" }}>
"Did the DOM get fast?"
</em>{" "}
🧐
</p>
<p>
Below are a few benchmarks. Click buttons to see
your own results. Charts for what I saw. :)
</p>
<ul>
<li>create a long list</li>
<li>append to it</li>
<li>prepend to it</li>
<li>insert in the middle</li>
<li>remove elements</li>
</ul>
<p>
Benchmarks focus on long flat lists of nodes because
that's pretty common. Think chat window with
thousands of messages. Our goal is to find which is
faster
</p>
<ul>
<li>Raw DOM with vanilla JS</li>
<li>React</li>
<li>Vue</li>
<li>Preact</li>
</ul>
<p>
Don't worry, benchmarks are implemented in the
respective framework internally. I'm just using
React for the skeleton because it's what I'm used to
and <code>nwb</code> made it quick to set up
compiling and stuff. You can see{" "}
<a href="https://github.com/Swizec/dom-benchmark">
the code on GitHub
</a>.
</p>
<hr />
</div>
{/* <ReactBenchmark /> */}
{/* <VanillaBenchmark /> */}
{/* <SmartVanillaBenchmark /> */}
<PreactBenchmark />
<h2>Vue coming soon ...</h2>
<h2>Preact coming soon ...</h2>
<div style={{ padding: "3vh" }} />
</div>
</Fragment>
);
}
}
export default App;
|
package com.boot.feign.log.fallback;
import com.boot.feign.log.fallback.impl.LoginLogFallbackFeignImpl;
import com.boot.pojo.LoginLog;
import org.springframework.cloud.openfeign.FeignClient;
import org.springframework.stereotype.Component;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseBody;
import java.util.List;
//***实现一部分接口需要自定义fallback一部分接口不需要自定义fallback,起到一个分离的作用
@FeignClient(value = "cloud-yblog-log",fallback = LoginLogFallbackFeignImpl.class)
@Component
public interface LoginLogFallbackFeign {
@ResponseBody
@RequestMapping(path = "/feign/loginlog/loginLogData")
public String loginLogData(@RequestParam(value = "page", defaultValue = "1") int page,
@RequestParam(value = "limit", defaultValue = "10") int limit);
@ResponseBody
@GetMapping(path = "/feign/loginlog/selectLoginLogAll")
public List<LoginLog> selectLoginLogAll(@RequestParam("page") int page,
@RequestParam("limit") int limit);
@ResponseBody
@GetMapping(path = "/feign/loginlog/loginLogCount")
public int loginLogCount();
}
|
<filename>zod-lib/sensitive-service/sensitive-service-app/src/main/java/com/infamous/framework/sensitive/service/SaltedPasswordEncryptor.java<gh_stars>0
package com.infamous.framework.sensitive.service;
import com.infamous.framework.sensitive.core.MessageDigestAlgorithm;
import java.security.SecureRandom;
public class SaltedPasswordEncryptor extends DefaultPasswordEncryptor implements PasswordEncryptor {
private static final int MAX_SEED = 99999999;
private static final int HASH_ITERATIONS = 1000;
private final SecureRandom RANDOM = new SecureRandom();
@Override
public String encrypt(MessageDigestAlgorithm algorithm, String sensitiveString) {
String seed = Integer.toString(RANDOM.nextInt(MAX_SEED));
return encrypt(algorithm, sensitiveString, seed);
}
public String encrypt(MessageDigestAlgorithm algorithm, String sensitiveString, String salt) {
String hash = salt + sensitiveString;
for (int i = 0; i < HASH_ITERATIONS; i++) {
hash = algorithm.hash(hash);
}
return salt + ":" + hash;
}
@Override
public boolean matches(MessageDigestAlgorithm algorithm, String passwordToCheck, String storedPassword) {
if (passwordToCheck == null) {
throw new IllegalArgumentException("passwordToCheck can not be null");
}
if (storedPassword == null) {
throw new IllegalArgumentException("storedPassword can not be null");
}
int divider = storedPassword.indexOf(':');
if (divider < 1) {
throw new IllegalArgumentException("storedPassword does not contain salt");
}
String storedSalt = storedPassword.substring(0, divider);
return encrypt(algorithm, passwordToCheck, storedSalt).equalsIgnoreCase(storedPassword);
}
}
|
import org.apache.ibatis.session.SqlSession;
import org.apache.ibatis.session.SqlSessionFactory;
import java.util.List;
public class ItemService {
private SqlSessionFactory factory;
// Constructor to initialize the SqlSessionFactory
public ItemService(SqlSessionFactory factory) {
this.factory = factory;
}
// Method to retrieve a list of items from the database and return it as a JSON response
public List<Item> getItemsAsJson() {
try (SqlSession session = factory.openSession()) {
// Retrieve the list of items using MyBatis SQL mapping
List<Item> result = session.selectList("io.github.mattn.todo.select");
return result;
}
}
}
|
#!/usr/bin/env bash
# GDPR
#
# How to install:
# $ sudo cp $THISFILE /etc/cron.daily/mlpvc-rr
# $ sudo chmod +x /etc/cron.daily/mlpvc-rr
# $ sudo editor /etc/cron.daily/mlpvc-rr
# Change path (no trailing slash)
SCRIPTS_DIR="/path/to/scripts"
if [ ! -d "$SCRIPTS_DIR" ]; then
>&2 echo "$SCRIPTS_DIR is not a folder"
exit 1
fi
/usr/bin/php -f "${SCRIPTS_DIR}/clear_old_logged_ips.php"
|
#!/usr/bin/env bash
set -euo pipefail
GIT_BRANCH="${GITHUB_REF/refs\/heads\//}"
git checkout $GIT_BRANCH
echo "On branch $GIT_BRANCH."
# Only push on human pull request branches. Exclude release, prerelease, and bot branches.
if [ "$GIT_BRANCH" != "stable" ] && [ "$GIT_BRANCH" != "next" ] && [[ "$GIT_BRANCH" != dependabot/* ]]; then
PUSH_BRANCH=true
echo "Will try to push changes."
else
PUSH_BRANCH=false
echo "Will not push changes."
fi
echo ""
echo "------- Checking Schema -------"
echo ""
# Commit the schema if outdated
if ! git diff --exit-code ./build/vega-lite-schema.json; then
if [ "$PUSH_BRANCH" = true ]; then
git add ./build/vega-lite-schema.json
git commit -m "chore: update schema [ci skip]"
else
echo "Outdated schema."
exit 1
fi
fi
echo ""
echo "------- Checking Examples -------"
echo ""
if git log -1 | grep "\[SVG\]" && [ "$PUSH_BRANCH" = true ]; then
echo "As the latest commit includes [SVG]. Rebuilding all SVGs."
yarn build:examples-full
else
yarn build:examples
fi
# Commit examples if outdated
# Note: we need to add all files first so that new files are included in `git diff --cached` too.
git add examples
if [ "$PUSH_BRANCH" = true ]; then
if ! git diff --cached --word-diff=color --exit-code examples; then
git commit -m "chore: update examples [ci skip]"
fi
else
# Don't diff SVG as floating point calculation is not always consistent
if ! git diff --cached --word-diff=color --exit-code './examples/compiled/*.vg.json' './examples/specs/normalized/*.vl.json'; then
echo "Outdated examples."
exit 1
fi
fi
echo ""
echo "------- Checking Code Formatting -------"
echo ""
if [ "$PUSH_BRANCH" = true ]; then
if ! git diff --exit-code site src test test-runtime; then
git add --all
git commit -m "style: auto-formatting [ci skip]"
fi
# should be empty
git status
# Then push all the changes (schema, examples, formatting)
git pull --rebase
git push
fi
exit 0
|
<reponame>handsomekuroji/handsomekuroji-gatsby
import React from 'react'
import { useStaticQuery, graphql } from 'gatsby'
import styled from 'styled-components'
import small from '~src/images/main/logo-small.svg'
const Wrapper = styled.div`
amp-img {
height: auto;
vertical-align: bottom;
width: 100%;
}
`
export default function Logo() {
const query = useStaticQuery(graphql`
query AmpLogoQuery {
site {
siteMetadata {
title
}
}
}
`).site.siteMetadata
return (
<Wrapper>
<amp-img src={small} width="60" height="58" alt={query.title} aria-hidden="true"></amp-img>
</Wrapper>
)
}
|
#!/bin/bash
echo "Are you sure to install the discord bot here? " && pwd
echo "Type yes or no"
read userinput
if [ "$userinput" = "yes" ]
then
echo "Starting the ínstaller"
file="discord/tecobot/index.js"
if [ -f "$file" ]
then
clear
echo "$file allready exists."
echo "Do you want to try an update of the package"
echo "Type yes or no"
read userinput
if [ "$userinput" = "yes" ]
then
git fetch && git pull && cd discord/tecobot && npm install
else
echo "Installer script is now stopped"
fi
else
echo "$file does not exists. Installing package from github..."
mkdir discord && cd discord && git clone https://github.com/ZombyMedia/tecobot.git && cd tecobot && npm install && echo "Installation completed"
fi
else
echo "Installer script is now stopped"
fi
|
const server = require("./server");
const secrets = require("./secrets.js");
const PORT = secrets.PORT;
server.listen(PORT, () => {
console.log(`listening on port ${PORT}`);
});
|
package org.para.file.execute;
import org.para.file.FileParallelExecute;
/**
*
* @author liuyan
* @Email:<EMAIL>
* @version 0.1
* @Date: 2013-8-26
* @Copyright: 2013 story All rights reserved.
*/
public class ByteFileParallelExecute extends FileParallelExecute {
}
|
function generateMenuMarkup(menuItems) {
let markup = '<ul>';
menuItems.forEach(item => {
markup += `<li><a href="#${item.itemKey}"><i class="${item.itemIcon}"></i>${item.linkText}</a></li>`;
});
markup += '</ul>';
return markup;
}
// Example usage
const menuItems = [
{
itemKey: MenuItem.Information,
linkText: strings.InformationMenuLabel,
itemIcon: 'ThumbnailView',
},
{
itemKey: MenuItem.Achievements,
linkText: strings.AchievementsMenuLabel,
itemIcon: 'Trophy',
},
{
itemKey: MenuItem.Performance,
linkText: strings.PerformanceMenuLabel,
itemIcon: 'Chart',
},
];
const menuMarkup = generateMenuMarkup(menuItems);
console.log(menuMarkup);
|
<gh_stars>1-10
var assert = require('assert');
var artCli = require('..');
var plugins = new artCli.plugins();
describe('Plugins Tests', function () {
beforeEach(function () {
function sleep(delay) {
var start = new Date().getTime();
while (new Date().getTime() < start + delay);
};
sleep(1000);
});
it('get plugin code', function () {
var getPluginCode = plugins.getPluginCode("internalUser.groovy");
getPluginCode.then(function (result) {
assert.notEqual(result, "");
}, function (err) {
console.log(err);
});
});
it('get plugin info', function () {
var getPluginsInfo = plugins.getPluginsInfo();
getPluginsInfo.then(function (result) {
assert.notEqual(result, "");
}, function (err) {
console.log(err);
});
});
it('reload plugins', function () {
var reloadPlugins = plugins.reloadPlugins();
reloadPlugins.then(function (result) {
assert.notEqual(result, "");
}, function (err) {
console.log(err);
});
});
});
|
<gh_stars>1000+
/*
* Tencent is pleased to support the open source community by making 蓝鲸 available.
* Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package types
import "fmt"
const (
// skip 0.
_ = iota
// start error code from 100000001
HttpRequestFailed = 100000000 + iota
)
type ErrorDetail struct {
_ struct{}
// error code.
Code int
// error message details.
Message string
}
func (e *ErrorDetail) Error() string {
return fmt.Sprintf("error code: %d, error message: %s", e.Code, e.Message)
}
|
/* Routine optimized for shuffling a buffer for a type size of 2 bytes. */
static void
shuffle2_neon(uint8_t* const dest, const uint8_t* const src,
const size_t vectorizable_elements, const size_t total_elements)
{
size_t i, j, k;
static const size_t bytesoftype = 2;
uint8x16x2_t r0;
for(i = 0, k = 0; i < vectorizable_elements*bytesoftype; i += 32, k++) {
/* Load (and permute) 32 bytes to the structure r0 */
r0 = vld2q_u8(src + i);
/* Store the results in the destination vector */
vst1q_u8(dest + total_elements*0 + k*16, r0.val[0]);
vst1q_u8(dest + total_elements*1 + k*16, r0.val[1]);
}
}
|
#!/usr/bin/env python
"""
Amazon Fire TV server
RESTful interface for communication over a network via ADB
with Amazon Fire TV devices with ADB Debugging enabled.
From https://developer.amazon.com/public/solutions/devices/fire-tv/docs/connecting-adb-over-network:
Turn on ADB Debugging:
1. From the main (Launcher) screen, select Settings.
2. Select System > Developer Options.
3. Select ADB Debugging.
Find device IP:
1. From the main (Launcher) screen, select Settings.
2. Select System > About > Network.
"""
import argparse
import os
import re
from os.path import expanduser
import yaml
import logging
from flask import Flask, jsonify, request, abort
from firetv import FireTV
app = Flask(__name__)
devices = {}
config_data = None
valid_device_id = re.compile('^[-\w]+$')
valid_app_id = re.compile('^[A-Za-z0-9\.]+$')
def is_valid_host(host):
""" Check if host is valid.
Performs two simple checks:
- Has host and port separated by ':'.
- Port is a positive digit.
:param host: Host in <address>:<port> format.
:returns: Valid or not.
"""
parts = host.split(':')
return len(parts) == 2 or parts[1].isdigit()
def is_valid_device_id(device_id):
""" Check if device identifier is valid.
A valid device identifier contains only ascii word characters or dashes.
:param device_id: Device identifier
:returns: Valid or not.
"""
valid = valid_device_id.match(device_id)
if not valid:
logging.error("A valid device identifier contains "
"only ascii word characters or dashes. "
"Device '%s' not added.", device_id)
return valid
def is_valid_app_id(app_id):
""" check if app identifier is valid.
To restrict access a valid app is one with only a-z, A-Z, and '.'.
It is possible to make this less restrictive using the regex above.
:param app_id: Application identifier
:returns: Valid or not
"""
return valid_app_id.match(app_id)
def add(device_id, host, adbkey='', adb_server_ip='', adb_server_port=5037):
""" Add a device.
Creates FireTV instance associated with device identifier.
:param device_id: Device identifier.
:param host: Host in <address>:<port> format.
:param adbkey: The path to the "adbkey" file
:param adb_server_ip: the IP address for the ADB server
:param adb_server_port: the port for the ADB server
:returns: Added successfully or not.
"""
valid = is_valid_device_id(device_id) and is_valid_host(host)
if valid:
devices[device_id] = FireTV(str(host), str(adbkey), str(adb_server_ip), str(adb_server_port))
return valid
@app.route('/devices/add', methods=['POST'])
def add_device():
""" Add a device via HTTP POST.
POST JSON in the following format ::
{
"device_id": "<your_device_id>",
"host": "<address>:<port>",
"adbkey": "<path to the adbkey file>"
}
"""
req = request.get_json()
success = False
if 'device_id' in req and 'host' in req:
success = add(req['device_id'], req['host'], req.get('adbkey', ''), req.get('adb_server_ip', ''), req.get('adb_server_port', 5037))
return jsonify(success=success)
@app.route('/devices/list', methods=['GET'])
def list_devices():
""" List devices via HTTP GET. """
output = {}
for device_id, device in devices.items():
output[device_id] = {
'host': device.host,
'state': device.state
}
return jsonify(devices=output)
@app.route('/devices/state/<device_id>', methods=['GET'])
def device_state(device_id):
""" Get device state via HTTP GET. """
if device_id not in devices:
return jsonify(success=False)
return jsonify(state=devices[device_id].state)
@app.route('/devices/<device_id>/apps/current', methods=['GET'])
def current_app(device_id):
""" Get currently running app. """
if not is_valid_device_id(device_id):
abort(403)
if device_id not in devices:
abort(404)
current = devices[device_id].current_app
if current is None:
abort(404)
return jsonify(current_app=current)
@app.route('/devices/<device_id>/apps/running', methods=['GET'])
def running_apps(device_id):
""" Get running apps via HTTP GET. """
if not is_valid_device_id(device_id):
abort(403)
if device_id not in devices:
abort(404)
return jsonify(running_apps=devices[device_id].running_apps)
@app.route('/devices/<device_id>/apps/state/<app_id>', methods=['GET'])
def get_app_state(device_id, app_id):
""" Get the state of the requested app """
if not is_valid_app_id(app_id):
abort(403)
if not is_valid_device_id(device_id):
abort(403)
if device_id not in devices:
abort(404)
app_state = devices[device_id].app_state(app_id)
return jsonify(state=app_state, status=app_state)
@app.route('/devices/<device_id>/apps/<app_id>/state', methods=['GET'])
def get_app_state_alt(device_id, app_id):
return get_app_state(device_id, app_id)
@app.route('/devices/action/<device_id>/<action_id>', methods=['GET'])
def device_action(device_id, action_id):
""" Initiate device action via HTTP GET. """
success = False
if device_id in devices:
input_cmd = getattr(devices[device_id], action_id, None)
if callable(input_cmd):
input_cmd()
success = True
return jsonify(success=success)
@app.route('/devices/<device_id>/apps/<app_id>/start', methods=['GET'])
def app_start(device_id, app_id):
""" Starts an app with corresponding package name"""
if not is_valid_app_id(app_id):
abort(403)
if not is_valid_device_id(device_id):
abort(403)
if device_id not in devices:
abort(404)
success = devices[device_id].launch_app(app_id)
return jsonify(success=success)
@app.route('/devices/<device_id>/apps/<app_id>/stop', methods=['GET'])
def app_stop(device_id, app_id):
""" stops an app with corresponding package name"""
if not is_valid_app_id(app_id):
abort(403)
if not is_valid_device_id(device_id):
abort(403)
if device_id not in devices:
abort(404)
success = devices[device_id].stop_app(app_id)
return jsonify(success=success)
@app.route('/devices/connect/<device_id>', methods=['GET'])
def device_connect(device_id):
""" Force a connection attempt via HTTP GET. """
success = False
if device_id in devices:
devices[device_id].connect()
success = True
return jsonify(success=success)
def _parse_config(config_file_path):
""" Parse Config File from yaml file. """
config_file = open(config_file_path, 'r')
config = yaml.load(config_file)
config_file.close()
return config
def _add_devices_from_config(args):
""" Add devices from config. """
config = _parse_config(args.config)
for device in config['devices']:
if args.default:
if device == "default":
raise ValueError('devicename "default" in config is not allowed if default param is set')
if config['devices'][device]['host'] == args.default:
raise ValueError('host set in default param must not be defined in config')
add(device, config['devices'][device]['host'], config['devices'][device].get('adbkey', ''),
config['devices'][device].get('adb_server_ip', ''), config['devices'][device].get('adb_server_port', 5037))
def main():
""" Set up the server. """
parser = argparse.ArgumentParser(description='AFTV Server')
parser.add_argument('-p', '--port', type=int, help='listen port', default=5556)
parser.add_argument('-d', '--default', help='default Amazon Fire TV host', nargs='?')
parser.add_argument('-c', '--config', type=str, help='Path to config file')
args = parser.parse_args()
if args.config:
_add_devices_from_config(args)
home = expanduser("~")
adb_key = os.path.join(home, ".android", "adbkey")
if not os.path.exists(adb_key):
adb_key = ''
if args.default and not add('default', args.default, adbkey=adb_key):
exit('invalid hostname')
app.run(host='0.0.0.0', port=args.port)
if __name__ == '__main__':
main()
|
<gh_stars>0
import React, { Component } from 'react';
import './CartDisplay.css'
export default class CartDisplay extends Component {
handleChange(index, event) {
let fieldName = this.props.productDetails.templateFields.fieldlist.field[index].fieldname;
let templateData = [...this.props.templateData];
templateData[index] = { templateDataName: fieldName, templateDataValue: event.target.value };
this.props.updateTemplateData(templateData)
}
render() {
var templateForm;
if (this.props.productDetails.hasTemplate) {
return this.props.productDetails.templateFields.fieldlist.field.map((value, index) => {
if (value.type === 'MULTILINE') {
return <input key={index} className={'TemplateField'} type="text" placeholder={`${value.fieldname}`} onChange={this.handleChange.bind(this, index)} />
} else if (value.type === 'SINGLELINE') {
return <input key={index} className={'TemplateField'} type="text" placeholder={`${value.fieldname}`} onChange={this.handleChange.bind(this, index)} />
} else if (value.type === 'SEPARATOR') {
//something is going on here that when this field's value is changed it causes a 400 error from the API.
//the API is getting the data in the same format as other fields in the form.
return <select key={index} className={'TemplateField'} onChange={this.handleChange.bind(this, index)}>
<option value="en-us">English - US</option>
<option value="de">German</option>
<option value="fr">French</option>
<option value="es">Spanish</option>
<option value="it">Italian</option>
</select>
}
})
}
return (
<div>
<p> {this.props.productInCart.name} </p>
<form> {templateForm} </form>
</div>
)
}
}
|
<gh_stars>10-100
package com.justinblank.strings.Search;
import java.util.Collection;
import java.util.List;
public final class SearchMethods {
private SearchMethods() {}
public static SearchMethod makeSearchMethod(Collection<String> strings) {
if (strings.isEmpty()) {
throw new IllegalArgumentException("Cannot create SearchMethod using empty list of strings");
}
if (allAscii(strings)) {
return AsciiAhoCorasickBuilder.buildAhoCorasick(strings);
}
else {
return UnicodeAhoCorasickBuilder.buildAhoCorasick(strings);
}
}
// I keep double-checking StringUtils for where I missed this
protected static boolean allAscii(String s) {
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
if (c > '\u007F') {
return false;
}
}
return true;
}
protected static boolean allAscii(Collection<String> strings) {
for (String s : strings) {
if (!allAscii(s)) {
return false;
}
}
return true;
}
}
|
// Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
package test
import (
"context"
"os"
"sort"
"testing"
"time"
"github.com/ChainSafe/fil-secondary-retrieval-markets/cache"
"github.com/ChainSafe/fil-secondary-retrieval-markets/client"
"github.com/ChainSafe/fil-secondary-retrieval-markets/network"
"github.com/ChainSafe/fil-secondary-retrieval-markets/provider"
"github.com/ChainSafe/fil-secondary-retrieval-markets/shared"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
blockstore "github.com/ipfs/go-ipfs-blockstore"
logging "github.com/ipfs/go-log/v2"
libp2p "github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/stretchr/testify/require"
)
var testTimeout = time.Second * 30
func newTestNetwork(t *testing.T) *network.Network {
ctx := context.Background()
h, err := libp2p.New(ctx)
require.NoError(t, err)
net, err := network.NewNetwork(h)
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, net.Stop())
require.NoError(t, h.Close())
})
return net
}
func newTestBlockstore() blockstore.Blockstore {
nds := ds.NewMapDatastore()
return blockstore.NewBlockstore(nds)
}
type mockRetrievalProviderStore struct {
bs blockstore.Blockstore
}
func newTestRetrievalProviderStore() *mockRetrievalProviderStore {
return &mockRetrievalProviderStore{
bs: newTestBlockstore(),
}
}
func (s *mockRetrievalProviderStore) Has(params shared.Params) (bool, error) {
return s.bs.Has(params.PayloadCID)
}
type basicTester struct {
respCh chan *shared.QueryResponse
}
func newBasicTester() *basicTester {
return &basicTester{
respCh: make(chan *shared.QueryResponse),
}
}
func (bt *basicTester) handleResponse(resp shared.QueryResponse) {
bt.respCh <- &resp
}
func TestMain(m *testing.M) {
err := logging.SetLogLevel("client", "debug")
if err != nil {
panic(err)
}
err = logging.SetLogLevel("provider", "debug")
if err != nil {
panic(err)
}
time.Sleep(time.Second * 10)
os.Exit(m.Run())
}
func TestBasic(t *testing.T) {
pnet := newTestNetwork(t)
cnet := newTestNetwork(t)
s := newTestRetrievalProviderStore()
err := pnet.Connect(cnet.AddrInfo())
require.NoError(t, err)
p := provider.NewProvider(pnet, s, cache.NewMockCache(0))
c := client.NewClient(cnet)
// add data block to blockstore
b := block.NewBlock([]byte("noot"))
testCid := b.Cid()
err = s.bs.Put(b)
require.NoError(t, err)
params := shared.Params{PayloadCID: testCid}
// start provider
err = p.Start()
require.NoError(t, err)
// start client
err = c.Start()
require.NoError(t, err)
// subscribe to responses
bt := newBasicTester()
unsubscribe := c.SubscribeToQueryResponses(bt.handleResponse, params)
defer unsubscribe()
// submit query
err = c.SubmitQuery(context.Background(), params)
require.NoError(t, err)
// assert response was received
expected := &shared.QueryResponse{
Params: params,
Provider: pnet.PeerID(),
PricePerByte: provider.DefaultPricePerByte,
PaymentInterval: provider.DefaultPaymentInterval,
PaymentIntervalIncrease: provider.DefaultPaymentIntervalIncrease,
}
select {
case resp := <-bt.respCh:
require.NotNil(t, resp)
require.Equal(t, expected, resp)
case <-time.After(testTimeout):
t.Fatal("did not receive response")
}
}
func TestMulti(t *testing.T) {
numClients := 3
numProviders := 3
data := [][]byte{
[]byte("noot"),
[]byte("was"),
[]byte("here"),
}
cids := make([]cid.Cid, len(data))
clients := make([]*client.Client, numClients)
providers := make([]*provider.Provider, numProviders)
blockstores := make([]blockstore.Blockstore, numProviders)
cnets := make([]*network.Network, numClients)
pnets := make([]*network.Network, numProviders)
// create and start clients
for i := 0; i < numClients; i++ {
net := newTestNetwork(t)
c := client.NewClient(net)
err := c.Start()
require.NoError(t, err)
clients[i] = c
cnets[i] = net
}
// create and start providers
for i := 0; i < numProviders; i++ {
net := newTestNetwork(t)
s := newTestRetrievalProviderStore()
p := provider.NewProvider(net, s, cache.NewMockCache(0))
err := p.Start()
require.NoError(t, err)
providers[i] = p
blockstores[i] = s.bs
pnets[i] = net
}
// connect clients to providers
for _, cnet := range cnets {
for _, pnet := range pnets {
err := pnet.Connect(cnet.AddrInfo())
require.NoError(t, err)
}
}
// add data to blockstores
for i, bs := range blockstores {
// add data block to blockstore
b := block.NewBlock(data[i])
cids[i] = b.Cid()
err := bs.Put(b)
require.NoError(t, err)
}
// each client queries for a different cid
for i, c := range clients {
params := shared.Params{
PayloadCID: cids[i],
}
// subscribe to responses
bt := newBasicTester()
unsubscribe := c.SubscribeToQueryResponses(bt.handleResponse, params)
defer unsubscribe()
// submit query
err := c.SubmitQuery(context.Background(), params)
require.NoError(t, err)
// assert response was received
expected := &shared.QueryResponse{
Params: params,
Provider: pnets[i].PeerID(),
PricePerByte: provider.DefaultPricePerByte,
PaymentInterval: provider.DefaultPaymentInterval,
PaymentIntervalIncrease: provider.DefaultPaymentIntervalIncrease,
}
select {
case resp := <-bt.respCh:
require.NotNil(t, resp)
require.Equal(t, expected, resp)
case <-time.After(testTimeout):
t.Fatal("did not receive response")
}
}
}
func TestMultiProvider(t *testing.T) {
if testing.Short() {
t.Skip("skipping TestMultiProvider")
}
pnet0 := newTestNetwork(t)
pnet1 := newTestNetwork(t)
cnet := newTestNetwork(t)
s0 := newTestRetrievalProviderStore()
s1 := newTestRetrievalProviderStore()
err := pnet0.Connect(cnet.AddrInfo())
require.NoError(t, err)
err = pnet1.Connect(cnet.AddrInfo())
require.NoError(t, err)
err = pnet1.Connect(pnet0.AddrInfo())
require.NoError(t, err)
require.GreaterOrEqual(t, len(pnet0.Peers()), 2)
require.GreaterOrEqual(t, len(pnet1.Peers()), 2)
require.GreaterOrEqual(t, len(cnet.Peers()), 2)
p0 := provider.NewProvider(pnet0, s0, cache.NewMockCache(0))
p1 := provider.NewProvider(pnet1, s1, cache.NewMockCache(0))
c := client.NewClient(cnet)
// add data to both providers's blockstores
b := block.NewBlock([]byte("noot"))
testCid := b.Cid()
err = s0.bs.Put(b)
require.NoError(t, err)
err = s1.bs.Put(b)
require.NoError(t, err)
// start providers and client
err = p0.Start()
require.NoError(t, err)
err = p1.Start()
require.NoError(t, err)
err = c.Start()
require.NoError(t, err)
params := shared.Params{
PayloadCID: testCid,
}
// query for CID, should receive responses from both providers
bt := newBasicTester()
unsubscribe := c.SubscribeToQueryResponses(bt.handleResponse, params)
defer unsubscribe()
// submit query
err = c.SubmitQuery(context.Background(), params)
require.NoError(t, err)
// assert response was received
expected := &shared.QueryResponse{
Params: params,
PricePerByte: provider.DefaultPricePerByte,
PaymentInterval: provider.DefaultPaymentInterval,
PaymentIntervalIncrease: provider.DefaultPaymentIntervalIncrease,
}
receivedFrom := []peer.ID{}
for i := 0; i < 2; i++ {
select {
case resp := <-bt.respCh:
require.NotNil(t, resp)
respProvider := resp.Provider
resp.Provider = ""
require.Equal(t, expected, resp)
t.Log("received from", respProvider)
receivedFrom = append(receivedFrom, respProvider)
case <-time.After(testTimeout):
t.Fatal("did not receive response")
}
}
// assert response was received from providers 0 and 1
expectedResponders := []peer.ID{pnet0.PeerID(), pnet1.PeerID()}
sort.Slice(expectedResponders, func(i, j int) bool {
return expectedResponders[i].String() < expectedResponders[j].String()
})
sort.Slice(receivedFrom, func(i, j int) bool {
return receivedFrom[i].String() < receivedFrom[j].String()
})
require.Equal(t, expectedResponders, receivedFrom)
}
|
#!/bin/bash
# ----------------------------------------------------------------
# Continue from or start run based on xml files in directory <origin>
# Put new run in <origin>/<dest>
# ----------------------------------------------------------------
echo "--------------------------------------------"
if [ $# -lt 1 ]
then
echo "usage: ./continue <origin>"
echo " ./continue <origin> <dest>"
echo " ./continue <origin> <dest> <nodetype> <time>"
echo " ./continue <origin> <dest> <nodetype> <time> <smartds>"
echo " "
exit
fi
numprocs=8
executable=run_coupled
# figure out some paths
fullexec=`echo ${PWD} | sed 's/i-emic\/.*/i-emic\/build\/src\//'`main/$executable
scriptsdir=`echo ${PWD} | sed 's/i-emic\/.*/i-emic\/scripts\//'`
pwd=${PWD}
if [ $# -ge 2 ]
then
rdir=$2
else
rdir=continue
fi
if [ $# -ge 3 ]
then
type=$3
else
type=short
fi
if [ $# -ge 4 ]
then
time=$4
else
time=01:00:00
fi
if ! [[ -s $1 ]]
then
echo "No such directory: "$pwd/$1
exit
else
echo "Next run can be found in" $1/$rdir
fi
echo $1"/"$rdir >> continue.log
cd $1
mkdir -p $rdir
mkdir -p $rdir/xml
if ! [[ -s $rdir ]]
then
echo "Directory creation failed"
exit
fi
newds=0
if [[ -s cdata.txt ]] && [ $# -ge 5 ]
then
# compute new continuation step size
par0=`tail -n 2 cdata.txt | awk '{print $1}' | head -n 1`
par1=`tail -n 1 cdata.txt | awk '{print $1}'`
newds=`awk -v a=$par1 -v b=$par0 'BEGIN{print ((a - b)*100) }'`
echo "new continuation ds: " $newds
fi
# Put hdf5 files in <dest> directory
if ! [[ -s 'ocean_output.h5' ]]
then
echo "No hdf5 files in <origin>="$1" to copy!"
else
echo "Copying output hdf5 files in <origin>="$1" to <dest>="$rdir"."
echo "If loading these files gives problems, try the backup files *_output.h5.bak."
for i in ocean atmos seaice;
do
cp $i'_output.h5' $rdir/$i'_input.h5';
#cp -v $i'_output.h5.bak' $rdir/$i'_input.h5';
done
fi
# copy xmls twice and cp to continue dir
cp *.xml $rdir
cp *.xml $rdir/xml
cd $rdir
if [ $# -ge 5 ] && [ $newds -ne 0 ] # smart ds
then
# adjust continuation step size and direction automatically
echo "Adjust continuation step size and direction automatically based on cdata.txt"
sed -i "s/initial step size.*value.*/initial step size\" type=\"double\" value=\"$newds\"\/>/" continuation_params.xml
fi
if ! [[ -s 'ocean_input.h5' ]]
then
echo "No input hdf5 files in <dest>="$rdir" directory so we are not loading an existing state!"
sed -i "s/Load state.*value.*/Load state\" type=\"bool\" value=\"false\"\/>/" ocean_params.xml
sed -i "s/Load state.*value.*/Load state\" type=\"bool\" value=\"false\"\/>/" coupledmodel_params.xml
else
echo "Found input hdf5 files in <dest>="$rdir" directory so we are loading an existing state!"
sed -i "s/Load state.*value.*/Load state\" type=\"bool\" value=\"true\"\/>/" ocean_params.xml
sed -i "s/Load state.*value.*/Load state\" type=\"bool\" value=\"true\"\/>/" coupledmodel_params.xml
fi
function run
{
if [ -x "$(command -v sbatch)" ]
then
echo "detected slurm, submitting job"
echo " type: " $3 " time: " $4
bash $scriptsdir/create_submit.sh $3 $4 1
sbatch submit.sh $1 $2 > jobid
sed -i "s/.*job //" jobid
jobid=$(cat jobid)
sleep 5
if [ $(squeue -u ${LOGNAME} | grep $jobid | wc -l) -eq 1 ]
then
echo "job " $jobid " in progress"
else
echo "job submission failed"
fi
else
echo "slurm not detected"
mpirun -np $1 $2 > dump
fi
}
# submit problem (see run() )
run $numprocs $fullexec $type $time
echo "--------------------------------------------"
|
package com.github.starter.grpc.server;
import io.grpc.stub.StreamObserver;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import java.util.function.Function;
public class StreamObserverAdapter<I, O> {
public StreamObserverAdapter(Mono<I> input, StreamObserver<O> streamObserver, Function<I, O> fn) {
input.subscribe(
data -> streamObserver.onNext(fn.apply(data)),
err -> streamObserver.onError(err),
() -> streamObserver.onCompleted()
);
}
public static final <I, O> void transform(Mono<I> input, StreamObserver<O> streamObserver, Function<I, O> fn) {
new StreamObserverAdapter<>(input, streamObserver, fn);
}
}
|
<gh_stars>0
/** Represents the base object. Most structures in Derun takes from this class. */
export class BaseStructure {
constructor(id: string) {
this.id = id
}
readonly id
get createdAt() {
return Math.floor(Number(this.id) / 4194304) + 1420070400000
}
toString() {
return `[${this.constructor.name} ${this.id}]`
}
}
|
package gov.cms.bfd.pipeline.bridge.util;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Iterator;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
/**
* Helper class for building the attribution sql script.
*
* <p>The design utilizes a simple template with the syntax '%%formatstring%%iterations%%.
*
* <h3>Example script</h3>
*
* <p>[%%"%s",%%3%%]
*
* <p>This would print
*
* <p>["value1","value2","value3",]
*/
@Slf4j
@RequiredArgsConstructor
public class AttributionBuilder {
private static final String TEMPLATE_GROUP = "TemplateGroup";
private static final String FORMAT_GROUP = "FormatString";
private static final String COUNT_GROUP = "Iterations";
private static final Pattern attributionMarker =
Pattern.compile(
"(?<"
+ TEMPLATE_GROUP
+ ">%%(?<"
+ FORMAT_GROUP
+ ">.+)%%(?<"
+ COUNT_GROUP
+ ">\\d+)%%)");
private final String attributionTemplate;
private final String attributionScript;
/**
* Runs the attribution builder logic, reading the given template file and producing a new script
* in the given file location, interpolated with the given {@link DataSampler} dataset.
*
* @param dataSampler The {@link DataSampler} set to pull data from.
*/
public void run(DataSampler<String> dataSampler) {
try (BufferedReader reader = new BufferedReader(new FileReader(attributionTemplate));
BufferedWriter writer = new BufferedWriter(new FileWriter(attributionScript))) {
String line;
while ((line = reader.readLine()) != null) {
Matcher matcher = attributionMarker.matcher(line);
if (matcher.find()) {
String stringFormat = matcher.group(FORMAT_GROUP);
long count = Long.parseLong(matcher.group(COUNT_GROUP));
int startMatch = matcher.start(TEMPLATE_GROUP);
int endMatch = matcher.end(TEMPLATE_GROUP);
writer.write(line.substring(0, startMatch));
Iterator<String> attribution = dataSampler.iterator();
long i = -1;
while (attribution.hasNext() && ++i < count) {
writer.write(String.format(stringFormat, attribution.next()));
}
writer.write(line.substring(endMatch));
} else {
writer.write(line);
}
writer.newLine();
}
} catch (IOException e) {
log.error("Unable to create attribution sql script", e);
}
}
}
|
import { configLoader } from './configLoader';
import { run } from './pipeline';
import { pipelineFactory } from './pipelineFactory';
import { initCompilers } from './utils';
const pipeline = pipelineFactory.create('Sample Pipeline');
pipeline.args.pipelineArg0 = 'pipelineArg0';
pipeline.addProcessor({
name: 'proc0',
modulePath: './test/pipelines/pipeline0/proc0.js',
args: {
fileArg0: 'fileArg0',
},
});
pipeline
.runWithCompilers(['babel-core/register'])
.then((result) => {
console.log(JSON.stringify(result, null, 2));
})
.catch((err) => {
console.error(err);
});
const runner = async () => {
const compilers = ['babel-core/register'];
return initCompilers(compilers)
.then(() =>
configLoader({ fileGlobs: ['../test/pipelines/**/*.config.js'], workingDirectory: __dirname })
)
.then((config: any) =>
configLoader({
fileGlobs: ['../test/patch/**/*.config.js'],
workingDirectory: __dirname,
existingConfig: config,
})
)
.then((config: any) => {
const pipeline0 = config.pipeline0;
pipeline0.args.arg0 = 'testArg';
return run(pipeline0);
});
};
runner()
.then((result) => {
console.log(JSON.stringify(result, null, 2));
})
.then(() => pipeline.run())
.then((result) => {
console.log(JSON.stringify(result, null, 2));
})
.catch((err) => {
console.error(err);
});
|
import React from 'react';
import {
StyleSheet,
View,
Text,
TextInput,
Button
} from 'react-native';
export default class App extends React.Component {
constructor(props) {
super(props);
this.state = {
location: '',
temperature: 0,
pressure: 0,
humidity: 0
};
this.getWeatherInfo = this.getWeatherInfo.bind(this);
}
getWeatherInfo() {
fetch(
`http://api.openweathermap.org/data/2.5/weather?q=${ this
.state
.location }&APPID=YOUR_API_KEY_HERE`
)
.then(response => response.json())
.then(data => {
this.setState({
temperature: data.main.temp,
pressure: data.main.pressure,
humidity: data.main.humidity
});
});
}
render() {
return (
<View style={styles.container}>
<Text>Weather in { this.state.location }</Text>
<Text>Temperature: { this.state.temperature }</Text>
<Text>Pressure: { this.state.pressure }</Text>
<Text>Humidity: { this.state.humidity }</Text>
<TextInput
style={styles.input}
placeholder="Location"
onChangeText={text => this.setState({
location: text
})}
/>
<Button
title="Get Weather Info"
onPress={this.getWeatherInfo}
/>
</View>
);
}
}
const styles = StyleSheet.create({
container: {
flex: 1,
justifyContent: 'center',
alignItems: 'center'
},
input: {
width: 200,
height: 40,
borderWidth: 1
}
});
|
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
debora open "[::]:46661"
debora --group default.upgrade status
printf "\n\nShutting down barak default port...\n\n"
sleep 3
debora --group default.upgrade close "[::]:46660"
debora --group default.upgrade run -- bash -c "cd \$GOPATH/src/github.com/bdc/bdc; git pull origin develop; make"
debora --group default.upgrade run -- bash -c "cd \$GOPATH/src/github.com/bdc/bdc; mkdir -p ~/.barak/logs"
debora --group default.upgrade run --bg --label barak -- bash -c "cd \$GOPATH/src/github.com/bdc/bdc; barak --config=cmd/barak/seed 2>&1 | stdinwriter -outpath ~/.barak/logs/barak.log"
printf "\n\nTesting new barak...\n\n"
sleep 3
debora status
printf "\n\nShutting down old barak...\n\n"
sleep 3
debora --group default.upgrade quit
printf "Done!\n"
|
<reponame>hyperpape/needle
package com.justinblank.strings.Search;
import com.justinblank.strings.MatchResult;
import com.justinblank.strings.Matcher;
import java.util.Objects;
public class SearchMethodMatcher implements Matcher {
private final SearchMethod method;
private final String s;
public SearchMethodMatcher(SearchMethod method, String s) {
Objects.requireNonNull(method, s);
this.method = method;
this.s = s;
}
@Override
public boolean matches() {
return method.matches(s);
}
@Override
public boolean containedIn() {
return method.containedIn(s);
}
@Override
public MatchResult find() {
return method.find(s);
}
@Override
public MatchResult find(int start, int end) {
return method.find(s, start, end);
}
}
|
def factorial(n):
if n == 0:
return 1
return n * factorial(n-1)
num = 9
factorial_num = factorial(num)
print(factorial_num)
|
#!/bin/bash
STATUS=`su -p oracle -c "echo \"SELECT CASE WHEN count(*) > 0 THEN 'REA'||'DY' ELSE 'STARTING' END as status FROM dba_tablespaces WHERE tablespace_name = 'FIDDLEDATA';\" | sqlplus system/password as sysdba" | grep READY`
if [ "$STATUS" != "READY" ]
then
echo "Not started yet"
exit 1
fi
CAPACITY=`su -p oracle -c "echo \"SELECT CASE WHEN count(*) < 50 THEN 'REA'||'DY' ELSE 'OVERCAPACITY' END as status FROM (select distinct lower(replace(USERNAME, 'USER', 'DB')) as schema_name from all_users) tmp WHERE schema_name LIKE 'db_%';\" | sqlplus system/password as sysdba" | grep READY`
if [ "$CAPACITY" != "READY" ]
then
echo "Overcapacity"
exit 1
fi
exit 0
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: <NAME>
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import sys
sys.path.append("../../sidpy/")
from sidpy.base.string_utils import *
if sys.version_info.major == 3:
unicode = str
class TestCleanStringAtt(unittest.TestCase):
def test_float(self):
expected = 5.321
self.assertEqual(expected, clean_string_att(expected))
def test_str(self):
expected = 'test'
self.assertEqual(expected, clean_string_att(expected))
def test_num_array(self):
expected = [1, 2, 3.456]
self.assertEqual(expected, clean_string_att(expected))
def test_str_list(self):
expected = ['a', 'bc', 'def']
returned = clean_string_att(expected)
expected = np.array(expected, dtype='S')
for exp, act in zip(expected, returned):
self.assertEqual(exp, act)
def test_str_tuple(self):
expected = ('a', 'bc', 'def')
returned = clean_string_att(expected)
expected = np.array(expected, dtype='S')
for exp, act in zip(expected, returned):
self.assertEqual(exp, act)
class TestFormattedStrToNum(unittest.TestCase):
def test_typical(self):
self.assertEqual(
formatted_str_to_number("4.32 MHz", ["MHz", "kHz"], [1E+6, 1E+3]), 4.32E+6)
def test_wrong_types(self):
with self.assertRaises(TypeError):
_ = formatted_str_to_number("4.32 MHz", ["MHz", "kHz"],
[1E+6, 1E+3], separator=14)
with self.assertRaises(TypeError):
_ = formatted_str_to_number({'dfdfd': 123}, ["MHz"], [1E+6])
with self.assertRaises(TypeError):
_ = formatted_str_to_number("dfdfdf", ["MHz"], 1E+6)
with self.assertRaises(TypeError):
_ = formatted_str_to_number("jkjk", ["MHz", 1234], [1E+6, 1E+4])
with self.assertRaises(TypeError):
_ = formatted_str_to_number("4.32 MHz", ["MHz", "kHz"], [{'dfdfd': 13}, 1E+3])
def test_invalid(self):
with self.assertRaises(ValueError):
_ = formatted_str_to_number("4.32 MHz", ["MHz"], [1E+6, 1E+3])
with self.assertRaises(ValueError):
_ = formatted_str_to_number("4.32 MHz", ["MHz", "kHz"], [1E+3])
with self.assertRaises(ValueError):
_ = formatted_str_to_number("4.32-MHz", ["MHz", "kHz"], [1E+6, 1E+3])
with self.assertRaises(ValueError):
_ = formatted_str_to_number("haha MHz", ["MHz", "kHz"], [1E+6, 1E+3])
with self.assertRaises(ValueError):
_ = formatted_str_to_number("1.2.3.4 MHz", ["MHz", "kHz"], [1E+6, 1E+3])
with self.assertRaises(ValueError):
_ = formatted_str_to_number("MHz", ["MHz", "kHz"], [1E+6, 1E+3])
class TestFormatQuantity(unittest.TestCase):
def test_typical(self):
qty_names = ['sec', 'mins', 'hours', 'days']
qty_factors = [1, 60, 3600, 3600*24]
ret_val = format_quantity(315, qty_names, qty_factors)
self.assertEqual(ret_val, '5.25 mins')
ret_val = format_quantity(6300, qty_names, qty_factors)
self.assertEqual(ret_val, '1.75 hours')
def test_unequal_lengths(self):
with self.assertRaises(ValueError):
_ = format_quantity(315, ['sec', 'mins', 'hours'], [1, 60, 3600, 3600 * 24])
with self.assertRaises(ValueError):
_ = format_quantity(315, ['sec', 'mins', 'hours'], [1, 60])
def test_incorrect_element_types(self):
with self.assertRaises(TypeError):
_ = format_quantity(315, ['sec', 14, 'hours'], [1, 60, 3600 * 24])
def test_incorrect_number_to_format(self):
with self.assertRaises(TypeError):
_ = format_quantity('hello', ['sec', 'mins', 'hours'], [1, 60, 3600])
def test_not_iterable(self):
with self.assertRaises(TypeError):
_ = format_quantity(315, 14, [1, 60, 3600])
with self.assertRaises(TypeError):
_ = format_quantity(315, ['sec', 'mins', 'hours'], slice(None))
class TestTimeSizeFormatting(unittest.TestCase):
def test_format_time(self):
ret_val = format_time(315)
self.assertEqual(ret_val, '5.25 mins')
ret_val = format_time(6300)
self.assertEqual(ret_val, '1.75 hours')
def test_format_size(self):
ret_val = format_size(15.23)
self.assertEqual(ret_val, '15.23 bytes')
ret_val = format_size(5830418104.32)
self.assertEqual(ret_val, '5.43 GB')
class TestValidateStringArgs(unittest.TestCase):
def test_empty(self):
with self.assertRaises(ValueError):
_ = validate_string_args([' '], ['meh'])
def test_spaces(self):
expected = 'fd'
[ret] = validate_string_args([' ' + expected + ' '], ['meh'])
self.assertEqual(expected, ret)
def test_single(self):
expected = 'fd'
[ret] = validate_string_args(expected, 'meh')
self.assertEqual(expected, ret)
def test_multi(self):
expected = ['abc', 'def']
returned = validate_string_args([' ' + expected[0], expected[1] + ' '], ['meh', 'foo'])
for exp, ret in zip(expected, returned):
self.assertEqual(exp, ret)
def test_not_string_lists(self):
with self.assertRaises(TypeError):
_ = validate_string_args([14], ['meh'])
with self.assertRaises(TypeError):
_ = validate_string_args(14, ['meh'])
with self.assertRaises(TypeError):
_ = validate_string_args({'dfdf': 14}, ['meh'])
def test_name_not_string(self):
actual = ['ghghg']
ret = validate_string_args(actual, [np.arange(3)])
self.assertEqual(ret, actual)
def test_unequal_lengths(self):
expected = ['a', 'b']
actual = validate_string_args(expected + ['c'], ['a', 'b'])
for exp, ret in zip(expected, actual):
self.assertEqual(exp, ret)
def test_names_not_list_of_strings(self):
with self.assertRaises(TypeError):
_ = validate_string_args(['a', 'v'], {'a': 1, 'v': 43})
class TestStrToOther(unittest.TestCase):
def test_invalid_input_obj_type(self):
for val in [1.23, {'1we': 123}, ['dssd'], True, None]:
with self.assertRaises(TypeError):
str_to_other(val)
def base_test(self, inputs, out_type):
for val in inputs:
ret = str_to_other(str(val))
self.assertEqual(val, ret)
self.assertIsInstance(ret, out_type)
def test_int(self):
self.base_test([23, -235457842], int)
def test_float(self):
self.base_test([23.45643, -2354.57842], float)
def test_exp(self):
self.base_test([3.14E3, -4.3E-5], float)
def test_str(self):
self.base_test(['hello', '1fd353'], str)
def test_bool(self):
for val in ['true', 'TRUE', 'True']:
ret = str_to_other(val)
self.assertEqual(ret, True)
self.assertIsInstance(ret, bool)
for val in ['false', 'FALSE', 'False']:
ret = str_to_other(val)
self.assertEqual(ret, False)
self.assertIsInstance(ret, bool)
class TestRemoveExtraDelimiters(unittest.TestCase):
def test_invalid_sep_type(self):
for sep in [14, {'fdfd': 45}, [' ', ', '], True, (23, None)]:
with self.assertRaises(TypeError):
remove_extra_delimiters('fddfdf dfref', separator=sep)
def test_invalid_line_type(self):
for line in [14, {'fdfd': 45}, [' ', ', '], True, (23, None)]:
with self.assertRaises(TypeError):
remove_extra_delimiters(line, separator='-')
def test_empty_delim(self):
with self.assertRaises(ValueError):
remove_extra_delimiters('this is a test', '')
def typical_case(self, pad=False):
words = ['this', 'is', 'a', 'test']
for sep in [' ', '-']:
line = sep.join(words)
if pad:
dirty = sep * 4 + line + sep * 3
else:
dirty = line
clean = remove_extra_delimiters(dirty, separator=sep)
self.assertEqual(line, clean)
self.assertIsInstance(clean, str)
def test_single_delim(self):
self.typical_case(pad=False)
def test_delims_before_or_after(self):
self.typical_case(pad=True)
def test_multiple_consecutive_delims(self):
line = 'this is a test sentence'
words = ['this', 'is', 'a', 'test', 'sentence']
clean = remove_extra_delimiters(line, separator=' ')
self.assertEqual(clean, ' '.join(words))
line = 'this====is=a==test=========sentence'
clean = remove_extra_delimiters(line, separator='=')
self.assertEqual(clean, '='.join(words))
if __name__ == '__main__':
unittest.main()
|
select yn in "Yes" "No"; do
case $yn in
Yes ) adb shell settings put system show_rounded_corners 1 && exit; break;;
No ) adb shell settings put system show_rounded_corners 0 && exit;;
esac
done
|
#!/usr/bin/env bash
VERSION=4.4.0
docker build -t mitct02/weewx:$VERSION .
docker push mitct02/weewx:$VERSION
docker tag mitct02/weewx:$VERSION mitct02/weewx:latest
docker push mitct02/weewx:latest
|
#include"stdafx.h"
#include <iostream>
#include<cstdio>
#include<algorithm>
#include<random>
#include<math.h>
#include<vector>
#include<time.h>
#include<string.h>
#include<set>
#define NUM 5
using namespace std;
//method1: enumeration--O(4^N)
//这个算法不用担心元素重复的问题。
void closest_subset_e(int *dat,int N,int &res,int half_sum)
{
vector<set<int> >subsum;
for (int i = 0; i <= N; ++i)
{
subsum.push_back(set<int>{});
}
subsum[0].insert(0);
for (int i = 0,i_max; i < 2*N; ++i)
{
i_max = min(i, N - 1);
for (int k = i_max; k >= 0; --k)
{
for (auto v : subsum[k])
{
int temp = v + dat[i];
if(temp <= half_sum)
subsum[k + 1].insert(temp);
}
}
}
for (auto i : (subsum[N]))
{
res = max(res, i);
}
}
//method2: number_dp--O(N*N*sum)
//当元素允许重复的时候,编程之美给出的算法是有问题的,因为它没有考虑元素的重复使用的问题。
//睡了一觉以后,我发现问题是当遍历到第k个元素的时候,它忽略了:
//当这个元素在第i次里面已经被使用了,在第(i+1)次的时候就不能再次用它了,也就是说需要额外开一个动态规划数组
void closest_subset_n(int *dat, int N, int &res, int half_sum)
{
bool **dp = new bool*[N+1];
bool **dpt = new bool*[N+1];
for (int k = 0; k <= N; ++k)
{
dp[k] = new bool[half_sum+1];
for (int j = 0; j <= half_sum; ++j)
dp[k][j] = false;
}
for (int k = 0; k <= N; ++k)
{
dpt[k] = new bool[half_sum + 1];
for (int j = 0; j <= half_sum; ++j)
dpt[k][j] = false;
}
dp[0][0] = dpt[0][0] = true;
for (int k = 0; k < 2 * N; ++k)
{
//前k个元素中,取任意i个,i的数目不超过N
for (int i = 1; (i <= k+1 && i <= N); ++i)
{
for (int v = 0; v <= half_sum; ++v)
{
//如果在前(k-1)个元素中,(i-1)个元素之和符合要求
if (dp[i - 1][v] && v+dat[k] <= half_sum)
dpt[i][v + dat[k]] = true;
}
}
for (int s = 0; s <= N; ++s)
{
for (int t = 0; t <= half_sum; ++t)
{
dp[s][t] |= dpt[s][t];
dpt[s][t] = dp[s][t];
}
}
}
for (int i = 0; i <= N; ++i)
{
for (int j = 0; j <= half_sum; ++j)
{
cout << dp[i][j] << " ";
}
cout << endl;
}
for (int i = half_sum; i >= 0; --i)
{
if (dp[N][i])
{
res = i;
return;
}
}
}
int main()
{
srand(int(time(NULL)));
int res = 0, res2 = 0,sums = 0;
do {
res = 0, sums = 0, res2 = 0;
int N = 2 * (rand() % NUM + 1);
int *dat = new int[N];
int min_num = INT_MAX;
for (int i = 0; i < N; ++i)
{
dat[i] = rand() % (NUM)+NUM;
min_num = min(min_num, dat[i]);
sums += dat[i];
}
//如果有元素小于0,就加上一个偏置量使它大于零。
if (min_num < 0)
{
for (int i = 0; i < N; ++i)
{
dat[i] -= min_num;
sums -= min_num;
}
}
sort(dat, dat + N);
closest_subset_e(dat, N / 2, res, sums / 2);
closest_subset_n(dat, N / 2, res2, sums / 2);
for (int i = 0; i < N; ++i)
cout << dat[i] << " ";
cout << endl << "The data array's size is: " << N << endl;
cout << "Closest subset's distance is:" << sums - 2 * res << " " << sums - 2 * res2 << endl;
} while (res == res2);
//int test[4] = { 0,2,3,3 }, res_t = 0;
//closest_subset_n(test,2,res_t,4);
//cout << res_t<<endl;
return 0;
}
|
export interface IStack<T> {
readonly Count: number;
Clear(): void;
Contains(item: T): boolean;
Peek(): T;
Pop(): T;
Push(item: T): void;
}
|
import BN from 'bn.js';
import React from 'react';
import { I18nProps } from '@polkadot/ui-app/types';
import translate from './translate';
import Details from './Details';
type Props = I18nProps & {
match: {
params: {
id: string
}
}
};
type State = {};
export class Component extends React.PureComponent<Props, State> {
state: State = {};
render () {
const { match: { params: { id } } } = this.props;
return <Details id={new BN(id)} />;
}
}
export default translate(Component);
|
#!/bin/bash
set -eu
# VARS EVAL.
TAG_TO_DEPLOY=$(eval echo "$TAG")
JIRA_TOKEN=$(eval echo "$JIRA_AUTH_TOKEN")
# Determine acquia environment, since acsf user/keys are per env.
get-acquia-key() {
local ACQUIA_KEY
if [[ -n ${ACQUIA_KEY_DEV} && -n ${ACQUIA_KEY_TEST} ]]; then
case "$ACSF_ENV" in
dev)
ACQUIA_KEY=${ACQUIA_KEY_DEV};;
test)
ACQUIA_KEY=${ACQUIA_KEY_TEST};;
prod)
ACQUIA_KEY=${ACQUIA_KEY_PROD};;
*)
ACQUIA_KEY=null
echo "Provided $ACSF_ENV is not a recognized Env."
;;
esac
echo "$ACQUIA_KEY"
else
echo "Please set the ACSF User key as an env variable for all your envs. IE: ACQUIA_KEY_DEV and ACQUIA_KEY_TEST".
fi
}
# Get the current tag deployed on acsf env.
get-current-tag() {
local ACQUIA_KEY
ACQUIA_KEY=$(get-acquia-key)
curl -s -X GET https://www."${ACSF_ENV}"-"${ACSF_SITE}".acsitefactory.com/api/v1/vcs?type="sites" \
-u "${ACSF_USER}":"${ACQUIA_KEY}" | jq -r '.current' | sed 's/tags\///'
}
CURRENT_TAG=$(get-current-tag)
echo "Current Tag on ${ACSF_ENV}: $CURRENT_TAG"
# With the the current tag, get a list of issues IDs that were committed between current and latest.
get-jira-issues() {
local JIRA_ISSUES
if [ -n "${CURRENT_TAG}" ]; then
JIRA_ISSUES=$(git log "${CURRENT_TAG}".."${TAG_TO_DEPLOY}" | grep -e '[A-Z]\+-[0-9]\+' -o | sort -u | tr '\n' ',' | sed '$s/,$/\n/')
echo "$JIRA_ISSUES"
else
echo "We were not able to get current tag deployed to ACSF Env. Please check the 'acsf-' parameters are correctly set."
fi
}
# Jira API call to transition the issues.
transition-issues() {
JIRA_ISSUES=$(get-jira-issues)
if [ -n "${JIRA_ISSUES}" ]; then
echo "Included tickets between ${CURRENT_TAG} and ${TAG_TO_DEPLOY}: ${JIRA_ISSUES}"
echo "export JIRA_ISSUES=$(get-jira-issues)" >> "$BASH_ENV"
for issue in ${JIRA_ISSUES//,/ }
do
echo "Transitioning $issue..."
## Transition to "Deployed to ${ACSF_ENV}".
curl \
-X POST \
-H "Authorization: Basic ${JIRA_TOKEN}" \
-H "Content-Type: application/json" \
--data '{"transition": { "id": "'"${JIRA_TRANS_ID}"'" }}' \
"${JIRA_URL}"/rest/api/2/issue/"$issue"/transitions
done
else
echo "There are no issues to transition."
echo 'export JIRA_ISSUES="No Tickets"' >> "$BASH_ENV"
fi
}
transition-issues
|
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/SampleLib/SampleLib.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/SampleLib/SampleLib.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.getShapeType = void 0;
var util_1 = require("util");
/**
* @ignore
* 从图形数据中获取 shape 类型
* @param shapeCfg
* @param defaultShapeType
* @returns
*/
function getShapeType(shapeCfg, defaultShapeType) {
var shapeType = defaultShapeType;
if (shapeCfg.shape) {
shapeType = util_1.isArray(shapeCfg.shape) ? shapeCfg.shape[0] : shapeCfg.shape;
}
return shapeType;
}
exports.getShapeType = getShapeType;
//# sourceMappingURL=get-shape-type.js.map
|
#!/bin/bash
# Configure this part
mydaemon=httpd
# Derived, but also configurable
outfile=/tmp/check.${mydaemon}
pidfile=/var/run/${mydaemon}/${mydaemon}.pid
# Bookkeeping
mypid=$$
myname=$0
mydir=`dirname ${myname}`
# Import node-specific configuration
if [ ! -f ${mydir}/check.config.sh ]
then
echo ${mydir}/check.config.sh is missing. I am lost.
exit 255
fi
. ${mydir}/check.config.sh
# Import common functions after declaring configuration
if [ ! -f ${mydir}/check.functions.sh ]
then
echo ${mydir}/check.functions.sh is missing. I am lost.
exit 255
fi
. ${mydir}/check.functions.sh
#httpdcount=`ps -ef | grep -v $$ | grep -v grep | egrep '\/usr\/sbin\/httpd' | wc -l`
main() {
bounce=0
timestampit
datesay Looking for pidfile ${pidfile}
ls -l ${pidfile}
if [ -f ${pidfile} ]; then oldpid=`cat ${pidfile}`; else oldpid=0; fi
datesay PID claims to be ${oldpid}
[ ${oldpid} -eq 0 ] && bounce=$(( ${bounce} + 1 ))
portholder=`getportholder ${apachetcpport}`
datesay PID ${portholder} owns TCP port ${apachetcpport}
[ ${portholder:-0} -eq 0 ] && bounce=$(( ${bounce} + 1 ))
[ ${portholder:-0} -ne ${oldpid:-0} ] && bounce=$(( ${bounce} + 1 ))
sslresponse=`checkssl ${apachetcpport}`
datesay GOT ${sslresponse} SSL handshake\(s\) from ${apachetcpport}
[ ${sslresponse:-0} -eq 0 ] && bounce=$(( ${bounce} + 1 ))
datesay ${mydaemon} has ${bounce} reasons to restart!
[ ${bounce} -gt 0 ] && bouncedaemon
}
touch ${outfile}; chmod a+r ${outfile}
main ${@} > ${outfile} 2>&1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.