id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1643110
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
sns.set_style("white")
palette = "Set3"
def load_results(df):
loaded = pd.read_csv(df)
return loaded
def node_dependent_performance(df):
df2 = df[["N", "Py3plex", "Pymnet", "p"]]
df2 = pd.melt(df2, id_vars=['N', 'p'], value_vars=['Pymnet', 'Py3plex'])
df2 = df2.groupby(['p', 'N', 'variable']).mean().reset_index()
df2.columns = ["p", "|N|", "Library", "time (s)"]
print(df2)
# grid = sns.FacetGrid(df2, col="N", hue="variable", col_wrap=2)
# grid.map(sns.swarmplot, "E", "value",marker="o").add_legend()
df2['time (s)'] = df2['time (s)'].apply(np.log)
sns.boxplot(x="|N|",
y="time (s)",
data=df2,
hue="Library",
whis="range",
palette="vlag")
plt.ylabel("Log of time (s)")
plt.savefig("nodes.png", dpi=300)
plt.clf()
# df2['time (s)'] = df2['time (s)'].apply(np.log)
# df2['|E|'] = df2['|N|']* (df2['|N|']-1)
# sns.boxplot(x="|E|", y="time (s)", data=df2,hue="Library",palette="vlag")
# plt.ylabel("Log of time (s)")
# plt.savefig("edges.png", dpi = 300)
if __name__ == "__main__":
df = load_results("example_benchmark.csv")
node_dependent_performance(df)
|
1643136
|
class FirstPassOutput:
def __init__(self, lines=None):
self._second_pass_is_requested = False
self.is_second_pass = lines is not None
if lines is None:
lines = []
self._lines = lines
self._source = None
def __bool__(self):
return False
def request_second_pass(self):
self._second_pass_is_requested = True
@property
def second_pass_is_requested(self):
return not self.is_second_pass and self._second_pass_is_requested
@property
def lines(self):
self.request_second_pass()
return self._lines
@property
def source(self):
if self._source is None:
self._source = '\n'.join(self.lines)
return self._source
|
1643142
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base import BASE
class LRD2(BASE):
'''
META-LEARNING WITH DIFFERENTIABLE CLOSED-FORM SOLVERS
'''
def __init__(self, ebd_dim, args):
super(LRD2, self).__init__(args)
self.ebd_dim = ebd_dim
self.iters = args.lrd2_num_iters
# meta parameters to learn
self.lam = nn.Parameter(torch.tensor(-1, dtype=torch.float))
def _compute_w(self, XS, YS_inner):
'''
Use Newton's method to obtain w from support set XS, YS_inner
https://github.com/bertinetto/r2d2/blob/master/fewshots/models/lrd2.py
'''
for i in range(self.iters):
# use eta to store w_{i-1}^T X
if i == 0:
eta = torch.zeros_like(XS[:, 0]) # support_size
else:
eta = (XS @ w).squeeze(1)
mu = torch.sigmoid(eta)
s = mu * (1 - mu)
z = eta + (YS_inner - mu) / s
Sinv = torch.diag(1.0/s)
# Woodbury with regularization
w = XS.t() @ torch.inverse(XS @ XS.t() + (10. ** self.lam) * Sinv) @ z.unsqueeze(1)
return w
def forward(self, XS, YS, XQ, YQ):
'''
@param XS (support x): support_size x ebd_dim
@param YS (support y): support_size
@param XQ (support x): query_size x ebd_dim
@param YQ (support y): query_size
@return acc
@return loss
'''
# train with Newton's method on support set
YS, YQ = self.reidx_y(YS, YQ)
YS_onehot = self._label2onehot(YS)
YQ_onehot = self._label2onehot(YQ)
# 1 vs all
pred = torch.zeros_like(YQ_onehot)
for y in range(self.args.way):
# treat y as positive, all others as negative
YS_inner = YS_onehot[:, y]
w = self._compute_w(XS, YS_inner) # ebd_dim, 1
pred_inner = XQ @ w # query_size, 1
pred[:, y] = pred_inner.squeeze(1)
loss = F.cross_entropy(pred, YQ)
acc = BASE.compute_acc(pred, YQ)
return acc, loss
|
1643164
|
import asyncio
import logging.config
from pathlib import Path
from symphony.bdk.core.activity.command import CommandActivity, CommandContext
from symphony.bdk.core.activity.form import FormReplyActivity, FormReplyContext
from symphony.bdk.core.config.loader import BdkConfigLoader
from symphony.bdk.core.service.message.message_service import MessageService
from symphony.bdk.core.symphony_bdk import SymphonyBdk
async def run():
async with SymphonyBdk(BdkConfigLoader.load_from_symphony_dir("config.yaml")) as bdk:
bdk.activities().register(SlashGifCommandActivity(bdk.messages()))
bdk.activities().register(ReplyFormReplyActivity(bdk.messages()))
await bdk.datafeed().start()
class SlashGifCommandActivity(CommandActivity):
def __init__(self, messages: MessageService):
self._messages = messages
def matches(self, context: CommandContext) -> bool:
return context.text_content.startswith("@" + context.bot_display_name + " /gif")
async def on_activity(self, context: CommandContext):
await self._messages.send_message(context.stream_id, load_gif_elements_form())
class ReplyFormReplyActivity(FormReplyActivity):
def __init__(self, messages: MessageService):
self.messages = messages
def matches(self, context: FormReplyContext) -> bool:
return context.form_id == "gif-category-form" \
and context.get_form_value("action") == "submit" \
and context.get_form_value("category")
async def on_activity(self, context: FormReplyContext):
category = context.get_form_value("category")
await self.messages.send_message(context.source_event.stream.stream_id,
"<messageML> You just submitted this category: " + category + "</messageML>")
def load_gif_elements_form():
return (Path(__file__).parent.parent / "resources/gif.mml.xml").read_text(encoding="utf-8")
logging.config.fileConfig(Path(__file__).parent.parent / "logging.conf", disable_existing_loggers=False)
try:
logging.info("Running activity example...")
asyncio.run(run())
except KeyboardInterrupt:
logging.info("Ending activity example")
|
1643173
|
from boa3.builtin import public
from boa3.builtin.interop.blockchain import get_transaction_height
from boa3.builtin.type import UInt256
@public
def main(hash_: UInt256) -> int:
return get_transaction_height(hash_)
|
1643182
|
import ffilib
import uctypes
import uarray
import uos
import os
import utime
from signal import *
libc = ffilib.libc()
librt = ffilib.open("librt")
CLOCK_REALTIME = 0
CLOCK_MONOTONIC = 1
SIGEV_SIGNAL = 0
sigval_t = {
"sival_int": uctypes.INT32 | 0,
"sival_ptr": (uctypes.PTR | 0, uctypes.UINT8),
}
sigevent_t = {
"sigev_value": (0, sigval_t),
"sigev_signo": uctypes.INT32 | 8,
"sigev_notify": uctypes.INT32 | 12,
}
timespec_t = {
"tv_sec": uctypes.INT32 | 0,
"tv_nsec": uctypes.INT64 | 8,
}
itimerspec_t = {
"it_interval": (0, timespec_t),
"it_value": (16, timespec_t),
}
__libc_current_sigrtmin = libc.func("i", "__libc_current_sigrtmin", "")
SIGRTMIN = __libc_current_sigrtmin()
timer_create_ = librt.func("i", "timer_create", "ipp")
timer_settime_ = librt.func("i", "timer_settime", "PiPp")
def new(sdesc):
buf = bytearray(uctypes.sizeof(sdesc))
s = uctypes.struct(uctypes.addressof(buf), sdesc, uctypes.NATIVE)
return s
def timer_create(sig_id):
sev = new(sigevent_t)
#print(sev)
sev.sigev_notify = SIGEV_SIGNAL
sev.sigev_signo = SIGRTMIN + sig_id
timerid = uarray.array('P', [0])
r = timer_create_(CLOCK_MONOTONIC, sev, timerid)
os.check_error(r)
#print("timerid", hex(timerid[0]))
return timerid[0]
def timer_settime(tid, hz):
period = 1000000000 // hz
new_val = new(itimerspec_t)
new_val.it_value.tv_nsec = period
new_val.it_interval.tv_nsec = period
#print("new_val:", bytes(new_val))
old_val = new(itimerspec_t)
#print(new_val, old_val)
r = timer_settime_(tid, 0, new_val, old_val)
os.check_error(r)
#print("old_val:", bytes(old_val))
#print("timer_settime", r)
class Timer:
def __init__(self, id, freq):
self.id = id
self.tid = timer_create(id)
self.freq = freq
def callback(self, cb):
self.cb = cb
timer_settime(self.tid, self.freq)
org_sig = signal(SIGRTMIN + self.id, self.handler)
#print("Sig %d: %s" % (SIGRTMIN + self.id, org_sig))
def handler(self, signum):
#print('Signal handler called with signal', signum)
self.cb(self)
|
1643188
|
from Interpreter.Expression.expression import Expression
class Arithmetic(Expression):
def __init__(self, left, right=None):
self.left = left
self.right = right
def getValue(self, env):
pass
def isNumeric(self, value):
return isinstance(value, int) or isinstance(value, float)
class Addition(Arithmetic):
def __init__(self, left, right):
Arithmetic.__init__(self, left, right)
def getValue(self, env):
leftValue = self.left.getValue(env)
rightValue = self.right.getValue(env)
areNums = self.isNumeric(leftValue) and self.isNumeric(rightValue)
if areNums:
return leftValue + rightValue
class Subtraction(Arithmetic):
def __init__(self, left, right):
Arithmetic.__init__(self, left, right)
def getValue(self, env):
leftValue = self.left.getValue(env)
rightValue = self.right.getValue(env)
areNums = self.isNumeric(leftValue) and self.isNumeric(rightValue)
if areNums:
return leftValue - rightValue
class Multiplication(Arithmetic):
def __init__(self, left, right):
Arithmetic.__init__(self, left, right)
def getValue(self, env):
leftValue = self.left.getValue(env)
rightValue = self.right.getValue(env)
areNums = self.isNumeric(leftValue) and self.isNumeric(rightValue)
if areNums:
return leftValue * rightValue
class Division(Arithmetic):
def __init__(self, left, right):
Arithmetic.__init__(self, left, right)
def getValue(self, env):
leftValue = self.left.getValue(env)
rightValue = self.right.getValue(env)
areNums = self.isNumeric(leftValue) and self.isNumeric(rightValue)
if areNums:
return leftValue / rightValue
class Power(Arithmetic):
def __init__(self, left, right):
Arithmetic.__init__(self, left, right)
def getValue(self, env):
leftValue = self.left.getValue(env)
rightValue = self.right.getValue(env)
areNums = self.isNumeric(leftValue) and self.isNumeric(rightValue)
if areNums:
return leftValue ** rightValue
class Modulo(Arithmetic):
def __init__(self, left, right):
Arithmetic.__init__(self, left, right)
def getValue(self, env):
leftValue = self.left.getValue(env)
rightValue = self.right.getValue(env)
areNums = self.isNumeric(leftValue) and self.isNumeric(rightValue)
if areNums:
return leftValue % rightValue
class UnaryMinus(Arithmetic):
def __init__(self, left):
Arithmetic.__init__(self, left)
def getValue(self, env):
value = self.left.getValue(env)
isNum = self.isNumeric(value)
if isNum:
return - value
class UnaryPlus(Arithmetic):
def __init__(self, left):
Arithmetic.__init__(self, left)
def getValue(self, env):
value = self.left.getValue(env)
isNum = self.isNumeric(value)
if isNum:
return - value if value < 0 else value
|
1643214
|
import os
import torch
import sys
from seq_encoding import one_hot_PLUS_blosum_encode
from config_parser import Config
from model import Model
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
def read_hla_sequences():
"""Read hla sequences from [CLUATAL_OMEGA_B_chains_aligned_FLATTEN.txt]
and [CLUATAL_OMEGA_A_chains_aligned_FLATTEN.txt]
"""
def read(f, d):
file_path = os.path.join(BASE_DIR, 'dataset', f)
with open(file_path, 'r') as in_file:
for line_num, line in enumerate(in_file):
if line_num == 0:
continue
info = line.strip('\n').split('\t')
d[info[0]] = info[1]
hla_sequence_A = {}
hla_sequence_B = {}
read('CLUATAL_OMEGA_A_chains_aligned_FLATTEN.txt', hla_sequence_A)
read('CLUATAL_OMEGA_B_chains_aligned_FLATTEN.txt', hla_sequence_B)
return hla_sequence_A, hla_sequence_B
def run(model_path, hla_a, hla_b, peptide):
"""Get ic50
"""
# load model
config = Config("config_main.json")
config.device = 'cpu'
state_dict = torch.load(os.path.join(BASE_DIR, model_path))
model = Model(config)
model.load_state_dict(state_dict)
model.eval()
peptide_encoded, pep_mask, pep_len = one_hot_PLUS_blosum_encode(peptide, config.max_len_pep)
hla_sequence_A, hla_sequence_B = read_hla_sequences()
hla_a_seq = hla_sequence_A[hla_a]
hla_b_seq = hla_sequence_B[hla_b]
hla_a_encoded, hla_a_mask, hla_a_len = one_hot_PLUS_blosum_encode(hla_a_seq, config.max_len_hla_A)
hla_b_encoded, hla_b_mask, hla_b_len = one_hot_PLUS_blosum_encode(hla_b_seq, config.max_len_hla_B)
pred_ic50, _ = model(
torch.stack([hla_a_encoded], dim=0),
torch.stack([hla_a_mask], dim=0),
torch.tensor([hla_a_len]),
torch.stack([hla_b_encoded], dim=0),
torch.stack([hla_b_mask], dim=0),
torch.tensor([hla_b_len]),
torch.stack([peptide_encoded], dim=0),
torch.stack([pep_mask], dim=0),
torch.tensor([pep_len]),
)
print("IC 50: ", pred_ic50.item())
if __name__ == '__main__':
run(
model_path=sys.argv[1],
hla_a=sys.argv[2],
hla_b=sys.argv[3],
peptide=sys.argv[4],
)
|
1643215
|
def test_add_to_cart(desktop_web_driver):
desktop_web_driver.get('https://www.saucedemo.com/v1/inventory.html')
desktop_web_driver.find_element_by_class_name('btn_primary').click()
assert desktop_web_driver.find_element_by_class_name('shopping_cart_badge').text == '1'
desktop_web_driver.get('https://www.saucedemo.com/v1/cart.html')
expected = desktop_web_driver.find_elements_by_class_name('inventory_item_name')
assert len(expected) == 1
def test_add_two_to_cart(desktop_web_driver):
desktop_web_driver.get('https://www.saucedemo.com/v1/inventory.html')
desktop_web_driver.find_element_by_class_name('btn_primary').click()
desktop_web_driver.find_element_by_class_name('btn_primary').click()
assert desktop_web_driver.find_element_by_class_name('shopping_cart_badge').text == '2'
desktop_web_driver.get('https://www.saucedemo.com/v1/cart.html')
expected = desktop_web_driver.find_elements_by_class_name('inventory_item_name')
assert len(expected) == 2
|
1643283
|
from __future__ import absolute_import
from __future__ import print_function
from flytekit.sdk.tasks import inputs, outputs, dynamic_task
from flytekit.sdk.types import Types
from flytekit.sdk.workflow import workflow_class, Input, Output
from compose.inner import IdentityWorkflow, secondary_sibling_identity_lp
@workflow_class()
class StaticSubWorkflowCaller(object):
outer_a = Input(Types.Integer, default=5, help="Input for inner workflow")
identity_wf_execution = IdentityWorkflow(a=outer_a)
wf_output = Output(identity_wf_execution.outputs.task_output, sdk_type=Types.Integer)
# Note that this causes a duplicate copy of the default launch plan effectively, stored with the name id_lp
id_lp = IdentityWorkflow.create_launch_plan()
# Fetch a specific launch plan that's already been registered with Flyte Admin. Note that when you run registration on
# this file, unlike the first example, this will not be registered, no copy will be made.
# Also, using a fetch call like this is a bit of an anti-pattern, since it requires access to Flyte control plane
# from within a running task, something we try to avoid.
# from flytekit.common.launch_plan import SdkLaunchPlan
# fetched_identity_lp = SdkLaunchPlan.fetch('flyteeexamples', 'development',
# 'cookbook.sample_workflows.formula_1.inner.IdentityWorkflow',
# '8d291bdf163674dcb6ea8a047a4de6cc7cf4853f')
@workflow_class()
class StaticLaunchPlanCaller(object):
outer_a = Input(Types.Integer, default=5, help="Input for inner workflow")
identity_lp_execution = id_lp(a=outer_a)
wf_output = Output(identity_lp_execution.outputs.task_output, sdk_type=Types.Integer)
@inputs(num=Types.Integer)
@outputs(out=Types.Integer, imported_output=Types.Integer)
@dynamic_task
def lp_yield_task(wf_params, num, out, imported_output):
wf_params.logging.info("Running inner task... yielding a launchplan")
wf_params.logging.info("{} {}".format(id_lp._id, id_lp.workflow_id))
# Test one launch plan defined in this file, but from a workflow imported from another file
identity_lp_execution = id_lp(a=num)
yield identity_lp_execution
out.set(identity_lp_execution.outputs.task_output)
# Test another defined in another file and just imported.
imported_lp_execution = secondary_sibling_identity_lp()
yield imported_lp_execution
imported_output.set(imported_lp_execution.outputs.task_output)
# Test a launch plan that's fetched
# fetched_lp_execution = fetched_identity_lp(a=15)
# yield fetched_lp_execution
# fetched_output.set(fetched_lp_execution.outputs.task_output)
# Note that a launch plan created here, like SomeWorkflow.create_launch_plan() would never work, because
# it will never have been registered with the Flyte control plane (because the body of tasks like this one do
# not run at registration time).
@workflow_class
class DynamicLaunchPlanCaller(object):
outer_a = Input(Types.Integer, default=5, help="Input for inner workflow")
lp_task = lp_yield_task(num=outer_a)
wf_output = Output(lp_task.outputs.out, sdk_type=Types.Integer)
@inputs(num=Types.Integer)
@outputs(out=Types.Integer)
@dynamic_task
def sub_wf_yield_task(wf_params, num, out):
wf_params.logging.info("Running inner task... yielding a sub-workflow")
identity_wf_execution = IdentityWorkflow(a=num)
yield identity_wf_execution
out.set(identity_wf_execution.outputs.task_output)
@workflow_class
class DynamicSubWorkflowCaller(object):
outer_a = Input(Types.Integer, default=5, help="Input for inner workflow")
sub_wf_task = sub_wf_yield_task(num=outer_a)
wf_output = Output(sub_wf_task.outputs.out, sdk_type=Types.Integer)
|
1643306
|
import os
import torch
# Environment parameters
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
RESULT_FOLDER = PROJECT_ROOT + '/results'
STATS_FOLDER = RESULT_FOLDER + '/stats'
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Parameters for data loading
DATA_FOLDER = PROJECT_ROOT + '/data'
CIFAR10_NUM_TRN_SAMPLES = 50000
NUM_WORKERS = 4
# Network parameters
INPUT_SHAPE = (3, 32, 32) # Channels, height and width
NUM_CLASSES = 10
# Experiment configuration parameters
CONFIG_FAMILY_HEBB = 'hebb' # Key to identify configurations based on hebbian learning
CONFIG_FAMILY_GDES = 'gdes' # Key to identify configurations based on gradient descent learning
DEFAULT_CONFIG = 'gdes/config_base'
|
1643316
|
import random
from itertools import product
from bisect import bisect_right
from Q_functions_optimized import Q_US, Q_ES, Q_EA
from G_functions import powerset, union_set, G_EA_possible_distributions
from input_validator import validate_input2
# ----------------------------------------------------------------------------------------------------------------------
# G_ES_uniform
def G_ES_uniform(iterations, n, P, C, validate=True, Q_US_prev_values=None, Q_ES_prev_values=None):
"""
Wrapper for the optimized uniform version of G_ES
:param iterations: positive integer, number of formulae to generate
:param n: zero or positive integer, depth
:param P: set of strings, atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param validate: if True, validates that the input provided is in the correct form
:param Q_US_prev_values: None or dict, saves the result of previous values so that it does not repeat calculations
:param Q_ES_prev_values: None or dict, saves the result of previous values so that it does not repeat calculations
:return: list of random formulae with exactly n depth and containing (all or some) P
"""
if Q_ES_prev_values is None:
Q_ES_prev_values = dict()
if Q_US_prev_values is None:
Q_US_prev_values = dict()
if validate:
validate_input2(n, P, C)
formulae = list()
for i in range(iterations):
f = G_ES_uniform2(n, P, C, Q_US_prev_values, Q_ES_prev_values)
Q_US_prev_values.update(f[1])
Q_ES_prev_values.update(f[2])
f = f[0]
formulae.append(f)
return formulae
def G_ES_uniform2(n, P, C, Q_US_prev_values, Q_ES_prev_values):
"""
Optimized uniform version of G_ES
:param n: zero or positive integer, depth
:param P: set of strings, atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param Q_US_prev_values: dict, saves the result of previous values so that it does not repeat calculations
:param Q_ES_prev_values: dict, saves the result of previous values so that it does not repeat calculations
:return: tuple (random formula with exactly n depth and containing (all or some) P, prev_Q_US, prev_Q_ES)
"""
if n == 0:
return random.choice(tuple(P)), Q_US_prev_values, Q_ES_prev_values
else:
all_constants = list()
for c in C:
all_constants.extend(list(C[c]))
# Weighted choice of the logical constant
constant_weights = get_constant_weights(n, P, C, all_constants, Q_US_prev_values, Q_ES_prev_values)
Q_US_prev_values.update(constant_weights[1])
Q_ES_prev_values.update(constant_weights[2])
constant_weights = constant_weights[0]
denominator = constant_weights[-1]
try:
rnd = random.random() * denominator
except OverflowError:
# If the denominator is too large, Python cannot compute the float. The follwing will be almost uniform
rnd = random.randint(0, denominator-1)
chosen_index = bisect_right(constant_weights, rnd)
constant = all_constants[chosen_index]
arity = [a for a in C if constant in C[a]][0]
s_arguments = dict()
# Weighted generation of a distribution of depths
possible_depth_distributions = [x for x in product(range(0, n), repeat=arity) if n-1 in x]
depth_weights = get_depth_weights(possible_depth_distributions, P, C, Q_US_prev_values, Q_ES_prev_values)
Q_US_prev_values.update(depth_weights[1])
Q_ES_prev_values.update(depth_weights[2])
depth_weights = depth_weights[0]
denominator2 = depth_weights[-1]
try:
rnd2 = random.random() * denominator2
except OverflowError:
rnd2 = random.randint(0, denominator2-1)
chosen_index2 = bisect_right(depth_weights, rnd2)
chosen_depths = possible_depth_distributions[chosen_index2]
for x in range(0, len(chosen_depths)):
prev = G_ES_uniform2(chosen_depths[x], P, C, Q_US_prev_values, Q_ES_prev_values)
Q_US_prev_values.update(prev[1])
Q_ES_prev_values.update(prev[2])
s_arguments[x] = prev[0]
s = constant + '('
for y in sorted(s_arguments):
s += s_arguments[y] + ', '
s = s[:-2] + ')'
return s, Q_US_prev_values, Q_ES_prev_values
def get_constant_weights(n, P, C, all_constants, Q_US_prev_values, Q_ES_prev_values):
"""
:param n: zero or positive integer, depth
:param P: set of strings, atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param all_constants: list of strings, all logical constants of all arities
:param Q_US_prev_values: None or dict, saves the result of previous values so that it does not repeat calculations
:param Q_ES_prev_values: None or dict, saves the result of previous values so that it does not repeat calculations
:return: tuple (list of integers, representing the accumulated numerators of the weighing function, prev_values)
"""
weights = []
totals = 0
for constant in all_constants:
constant_arity = [x for x in C if constant in C[x]][0]
num1 = Q_US(n-1, len(P), C, False, Q_US_prev_values)
Q_US_prev_values.update(num1[1])
num2 = Q_US(n-2, len(P), C, False, Q_US_prev_values)
Q_US_prev_values.update(num2[1])
num = num1[0] ** constant_arity - num2[0] ** constant_arity
totals += num
weights.append(totals)
return weights, Q_US_prev_values, Q_ES_prev_values
def get_depth_weights(possible_weight_distributions, P, C, Q_US_prev_values, Q_ES_prev_values):
"""
:param possible_weight_distributions: list of tuples of integers (representing depths of arguments)
:param P: set of strings, atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param Q_US_prev_values: None or dict, saves the result of previous values so that it does not repeat calculations
:param Q_ES_prev_values: None or dict, saves the result of previous values so that it does not repeat calculations
:return: tuple (list of integers, representing the accumulated numerators of the weighing function, prev_values)
"""
weights = []
totals = 0
for distribution in possible_weight_distributions:
distrib_total = 1
for argument in distribution:
num = Q_ES(argument, len(P), C, False, Q_US_prev_values, Q_ES_prev_values)
Q_US_prev_values.update(num[1])
Q_ES_prev_values.update(num[2])
num = num[0]
distrib_total *= num
totals += distrib_total
weights.append(totals)
return weights, Q_US_prev_values, Q_ES_prev_values
# ----------------------------------------------------------------------------------------------------------------------
# G_US_uniform
def G_US_uniform(iterations, n, P, C, validate=True, Q_US_prev_values=None, Q_ES_prev_values=None):
"""
Optimized uniform version of G_US
:param iterations: positive integer, number of formulae to generate
:param n: zero or positive integer, depth
:param P: set of strings, atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param validate: if True, validates that the input provided is in the correct form
:param Q_US_prev_values: None or dict, saves the result of previous values so that it does not repeat calculations
:param Q_ES_prev_values: None or dict, saves the result of previous values so that it does not repeat calculations
:return: list of random formulae with up to n depth and containing (all or some) P
"""
if Q_ES_prev_values is None:
Q_ES_prev_values = dict()
if Q_US_prev_values is None:
Q_US_prev_values = dict()
if validate:
validate_input2(n, P, C)
formulae = list()
for i in range(iterations):
# Weighted choice of depth
possible_depths = list(range(0, n+1))
depth_weights = get_depth_weights2(possible_depths, P, C, Q_US_prev_values, Q_ES_prev_values)
Q_US_prev_values.update(depth_weights[1])
Q_ES_prev_values.update(depth_weights[2])
depth_weights = depth_weights[0]
denominator = depth_weights[-1]
try:
rnd = random.random() * denominator
except OverflowError:
rnd = random.randint(0, denominator-1)
chosen_index = bisect_right(depth_weights, rnd)
chosen_depth = possible_depths[chosen_index]
f = G_ES_uniform2(chosen_depth, P, C, Q_US_prev_values, Q_ES_prev_values)
Q_US_prev_values.update(f[1])
Q_ES_prev_values.update(f[2])
f = f[0]
formulae.append(f)
return formulae
def get_depth_weights2(possible_depths, P, C, Q_US_prev_values, Q_ES_prev_values):
"""
:param possible_depths: list of integers
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param Q_US_prev_values: dict, saves the result of previous values so that it does not repeat calculations
:param Q_ES_prev_values: dict, saves the result of previous values so that it does not repeat calculations
:return: tuple (list of integers, representing the accumulated numerators of the weighing function, prev_values)
"""
weights = []
totals = 0
for depth in possible_depths:
num = Q_ES(depth, len(P), C, False, Q_US_prev_values, Q_ES_prev_values)
Q_US_prev_values.update(num[1])
Q_ES_prev_values.update(num[2])
totals += num[0]
weights.append(totals)
return weights, Q_US_prev_values, Q_ES_prev_values
# ----------------------------------------------------------------------------------------------------------------------
# G_EA_uniform
def G_EA_uniform(iterations, n, P, C, validate=True, Q_US_prev_values=None, Q_ES_prev_values=None, Q_EA_prev_values=None):
"""
Wrapper for the optimized uniform version of G_ES
:param iterations: positive integer, number of formulae to generate
:param n: zero or positive integer, depth
:param P: set of strings, atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param validate: if True, validates that the input provided is in the correct form
:param Q_US_prev_values: None or dict, saves the result of previous values so that it does not repeat calculations
:param Q_ES_prev_values: None or dict, saves the result of previous values so that it does not repeat calculations
:param Q_EA_prev_values: None or dict, saves the result of previous values so that it does not repeat calculations
:return: list of random formulae with exactly n depth and containing all P
"""
if Q_EA_prev_values is None:
Q_EA_prev_values = dict()
if Q_ES_prev_values is None:
Q_ES_prev_values = dict()
if Q_US_prev_values is None:
Q_US_prev_values = dict()
if validate:
validate_input2(n, P, C)
if len(P) > max(C) ** n:
raise ValueError("len(P) can be at most n ** max(C)")
formulae = list()
for i in range(iterations):
f = G_EA_uniform2(n, P, C, Q_US_prev_values, Q_ES_prev_values, Q_EA_prev_values)
Q_US_prev_values.update(f[1])
Q_ES_prev_values.update(f[2])
Q_EA_prev_values.update(f[3])
f = f[0]
formulae.append(f)
return formulae
def G_EA_uniform2(n, P, C, Q_US_prev_values, Q_ES_prev_values, Q_EA_prev_values):
"""
Optimized uniform version of G_EA
:param n: zero or positive integer, depth
:param P: set of strings, atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param Q_US_prev_values: dict, saves the result of previous values so that it does not repeat calculations
:param Q_ES_prev_values: dict, saves the result of previous values so that it does not repeat calculations
:param Q_EA_prev_values: dict, saves the result of previous values so that it does not repeat calculations
:return: tuple (random formula with exactly n depth and containing (all or some) P, prev_Q_US, prev_Q_ES, prev_Q_EA)
"""
if n == 0:
return tuple(P)[0], Q_US_prev_values, Q_ES_prev_values, Q_EA_prev_values
else:
all_constants = list()
for c in C:
all_constants.extend(list(C[c]))
possible_distribs = list()
for arity in C:
possible_distribs.extend(G_EA_possible_distributions(arity, n - 1, P, max(C)))
weights = get_G_EA_weights(possible_distribs, C,
Q_US_prev_values, Q_ES_prev_values, Q_EA_prev_values)
Q_US_prev_values.update(weights[1])
Q_ES_prev_values.update(weights[2])
Q_EA_prev_values.update(weights[3])
weights = weights[0]
denominator = weights[-1]
try:
rnd = random.random() * denominator
except OverflowError:
rnd = random.randint(0, denominator - 1)
chosen_index = bisect_right(weights, rnd)
chosen_distrib = possible_distribs[chosen_index]
chosen_arity = len(chosen_distrib[0])
chosen_constant = random.choice(tuple(C[chosen_arity]))
formula_arguments = list()
for index in range(len(chosen_distrib[0])):
f = G_EA_uniform2(chosen_distrib[0][index], chosen_distrib[1][index], C,
Q_US_prev_values, Q_ES_prev_values, Q_EA_prev_values)
Q_US_prev_values.update(f[1])
Q_ES_prev_values.update(f[2])
Q_EA_prev_values.update(f[3])
f = f[0]
formula_arguments.append(f)
formula_arguments = ', '.join(formula_arguments)
formula = f'{chosen_constant}({formula_arguments})'
return formula, Q_US_prev_values, Q_ES_prev_values, Q_EA_prev_values
def get_G_EA_weights(possible_distribs, C, Q_US_prev_values, Q_ES_prev_values, Q_EA_prev_values):
"""
:param possible_distribs: list of the form returned in the function above
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param Q_US_prev_values: dict, saves the result of previous values so that it does not repeat calculations
:param Q_ES_prev_values: dict, saves the result of previous values so that it does not repeat calculations
:param Q_EA_prev_values: dict, saves the result of previous values so that it does not repeat calculations
:return: tuple (list of integers, representing the accumulated numerators of the weighing function, prev_values)
"""
weights = []
totals = 0
for distrib in possible_distribs:
num = 1
for index in range(len(distrib[0])):
val = Q_EA(distrib[0][index], len(distrib[1][index]), C, False,
Q_US_prev_values, Q_ES_prev_values, Q_EA_prev_values)
Q_US_prev_values.update(val[1])
Q_ES_prev_values.update(val[2])
Q_EA_prev_values.update(val[3])
num *= val[0]
num *= len(C[len(distrib[0])]) # Multiply by the number of connectives of that arity
totals += num
weights.append(totals)
return weights, Q_US_prev_values, Q_ES_prev_values, Q_EA_prev_values
|
1643330
|
import pytest
import sqlalchemy as sa
from sqlalchemy_utils.types import locale
@pytest.fixture
def User(Base):
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
locale = sa.Column(locale.LocaleType)
def __repr__(self):
return 'User(%r)' % self.id
return User
@pytest.fixture
def init_models(User):
pass
@pytest.mark.skipif('locale.babel is None')
class TestLocaleType(object):
def test_parameter_processing(self, session, User):
user = User(
locale=locale.babel.Locale(u'fi')
)
session.add(user)
session.commit()
user = session.query(User).first()
def test_territory_parsing(self, session, User):
ko_kr = locale.babel.Locale(u'ko', territory=u'KR')
user = User(locale=ko_kr)
session.add(user)
session.commit()
assert session.query(User.locale).first()[0] == ko_kr
def test_coerce_territory_parsing(self, User):
user = User()
user.locale = 'ko_KR'
assert user.locale == locale.babel.Locale(u'ko', territory=u'KR')
def test_scalar_attributes_get_coerced_to_objects(self, User):
user = User(locale='en_US')
assert isinstance(user.locale, locale.babel.Locale)
def test_unknown_locale_throws_exception(self, User):
with pytest.raises(locale.babel.UnknownLocaleError):
User(locale=u'unknown')
def test_literal_param(self, session, User):
clause = User.locale == 'en_US'
compiled = str(clause.compile(compile_kwargs={'literal_binds': True}))
assert compiled == '"user".locale = \'en_US\''
|
1643331
|
import OpenEXR
import Imath
import numpy as np
import time
import data.util_exr as exr_utils
import os
def _crop(img, pos, size):
ow, oh = img.shape[0], img.shape[1]
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
# return img.crop((x1, y1, x1 + tw, y1 + th)) #CHANGED
return img[x1:(x1 + tw), y1:(y1 + th), :]
return img
def get_distinct_prefix(dir_path):
names = set()
for f in os.listdir(dir_path):
if os.path.isfile(os.path.join(dir_path, f)):
names.add(f.split(".")[0].rsplit("-",1)[0])
return list(names)
# Divide variance by mean^2 to get relative variance
def CalcRelVar(data, var, calcLog, calcLum=True, calcMean=False):
if calcLum:
denom = np.expand_dims(CalcLuminance(data), axis=2)
elif calcMean:
denom = np.expand_dims(CalcMean(data), axis=2)
else:
denom = data
var = var / ((denom * denom) + 1.0e-5)
if calcLog:
var = LogTransform(var)
return var
# Calculate log transform (with an offset to map zero to zero)
def LogTransform(data):
assert(np.sum(data < 0) == 0)
return np.log(data + 1.0)
# Calculate luminance (3 channels in and 1 channel out)
def CalcLuminance(data):
return (0.2126*data[:,:,0] + 0.7152*data[:,:,1] + 0.0722*data[:,:,2])
# Calculate mean (3 channels in and 1 channel out)
def CalcMean(data):
return (0.3333*data[:,:,0] + 0.3333*data[:,:,1] + 0.3333*data[:,:,2])
# for shading
def loadDisneyEXR_feature_shading(path, FEATURE_LIST):
# time0 = time.time()
prefix = path.split(".")[0]
# color_path = prefix + "_color.exr"
variance_path = prefix + "_variance.exr"
normal_path = prefix + "_normal.exr"
depth_path = prefix + "_depth.exr"
texture_path = prefix + "_texture.exr"
visibility_path = prefix + "_visibility.exr"
diffuse_path = prefix + "_diffuse.exr"
specular_path = prefix + "_specular.exr"
# inFile = exr_utils.open(variance_path)
# variance = inFile.get_all()["default"]
if "normal" in FEATURE_LIST:
try:
inFile = exr_utils.open(normal_path)
normal = inFile.get_all()["default"]
normal = _crop(normal, (1,1), 128)
except Exception:
normal = np.zeros((128,128,3))
if "depth" in FEATURE_LIST:
try:
inFile = exr_utils.open(depth_path)
depth = inFile.get_all()["default"]
depth = _crop(depth, (1,1), 128)
except Exception:
depth = np.zeros((128,128,1))
# if "albedo" in FEATURE_LIST: //always load in albedo
try:
inFile = exr_utils.open(texture_path)
texture = inFile.get_all()["default"]
texture = _crop(texture, (1,1), 128)
except Exception:
texture = np.zeros((128,128,3))
if "visibility" in FEATURE_LIST:
try:
inFile = exr_utils.open(visibility_path)
visibility = inFile.get_all()["default"]
visibility = _crop(visibility, (1,1), 128)
except Exception:
visibility = np.zeros((128,128,1))
if "diffuse" in FEATURE_LIST:
try:
inFile = exr_utils.open(diffuse_path)
diffuse = inFile.get_all()["default"]
diffuse = _crop(diffuse, (1,1), 128)
except Exception:
diffuse = np.zeros((128,128,3))
if "specular" in FEATURE_LIST:
try:
inFile = exr_utils.open(specular_path)
specular = inFile.get_all()["default"]
specular = _crop(specular, (1,1), 128)
except Exception:
specular = np.zeros((128,128,3))
# variance = CalcRelVar( (1+ color.copy()) , variance, False, False, True )
if "diffuse" in FEATURE_LIST:
diffuse[diffuse < 0.0] = 0.0
diffuse = diffuse / (texture + 0.00316)
diffuse = LogTransform(diffuse)
color = diffuse
if "specular" in FEATURE_LIST:
specular[specular < 0.0] = 0.0
specular = LogTransform(specular)
color = specular
feature_tuple = ()
if "normal" in FEATURE_LIST:
normal = np.nan_to_num(normal)
if "specular" in FEATURE_LIST:
normal = (normal + 1.0)*0.5
normal = np.maximum(np.minimum(normal,1.0),0.0)
feature_tuple += (normal,)
if "depth" in FEATURE_LIST:
# Normalize current frame depth to [0,1]
maxDepth = np.max(depth)
if maxDepth != 0:
depth /= maxDepth
feature_tuple += (depth,)
if "albedo" in FEATURE_LIST:
# texture = np.clip(texture,0.0,1.0)
feature_tuple += (texture, )
if "visibility" in FEATURE_LIST:
feature_tuple += (visibility, )
if len(feature_tuple) == 0:
return color, np.zeros(color.shape)
feautres = np.concatenate(feature_tuple, axis=2) #
return color, feautres
def loadDisneyEXR_multi_ref_shading(path, FEATURE_LIST):
# time0 = time.time()
prefix = path.split(".")[0]
color_path = prefix + "_color.exr"
diffuse_path = prefix + "_diffuse.exr"
specular_path = prefix + "_specular.exr"
texture_path = prefix + "_texture.exr"
if "diffuse" in FEATURE_LIST:
try:
inFile = exr_utils.open(diffuse_path)
diffuse = inFile.get_all()["default"]
diffuse = _crop(diffuse, (1,1), 128)
except Exception:
diffuse = np.zeros((128,128,3))
if "specular" in FEATURE_LIST:
try:
inFile = exr_utils.open(specular_path)
specular = inFile.get_all()["default"]
specular = _crop(specular, (1,1), 128)
except Exception:
specular = np.zeros((128,128,3))
try:
inFile = exr_utils.open(texture_path)
texture = inFile.get_all()["default"]
texture = _crop(texture, (1,1), 128)
except Exception:
texture = np.zeros((128,128,3))
if "diffuse" in FEATURE_LIST:
diffuse[diffuse < 0.0] = 0.0
diffuse = diffuse / (texture + 0.00316)
diffuse = LogTransform(diffuse)
color = diffuse
if "specular" in FEATURE_LIST:
specular[specular < 0.0] = 0.0
specular = LogTransform(specular)
color = specular
return color
def loadDisneyEXR_ref(path):
inFile = exr_utils.open(path)
data = inFile.get_all()["default"]
data = LogTransform(data)
return data
# def loadDisneyEXR_feature_from_whole(path, channel=3):
# image = OpenEXR.InputFile(path)
# dataWindow = image.header()['dataWindow']
# size = (dataWindow.max.x - dataWindow.min.x + 1, dataWindow.max.y - dataWindow.min.y + 1)
# FLOAT = Imath.PixelType(Imath.PixelType.FLOAT)
# channel_to_extract = ["B","G","R",'colorVariance.Z','normal.B',"normal.G","normal.R",'depth.Z','albedo.B',"albedo.G","albedo.R",'visibility.Z']
# time0 = time.time()
# data = np.array([np.fromstring(image.channel(c, FLOAT), dtype=np.float32) for c in channel_to_extract])
# data = np.moveaxis(data, 0, -1)
# data = data.reshape(size[1], size[0], -1)
# time1 = time.time()
# color = data[:,:,:3]
# variance = data[:,:,3:4]
# normal = data[:,:,4:7]
# depth = data[:,:, 7:8]
# texture = data[:,:, 8:11]
# visibility = data[:,:, 11:12]
# time2 = time.time()
# variance = CalcRelVar( (1+ color.copy()) , variance, False, False, True )
# color = LogTransform(color)
# normal = (normal + 1.0)*0.5
# # Normalize current frame depth to [0,1]
# maxDepth = np.max(depth)
# if maxDepth != 0:
# depth /= maxDepth
# features = np.concatenate((variance,normal,depth,texture,visibility), axis=2)
# time3 = time.time()
# print("time 0 =%f, time1 = %f, time2 = %f " %(time1-time0, time2-time1,time3-time2))
# return color, features
# def loadDisneyEXR_feature(path, FEATURE_LIST):
# # time0 = time.time()
# prefix = path.split(".")[0]
# color_path = prefix + "_color.exr"
# variance_path = prefix + "_variance.exr"
# normal_path = prefix + "_normal.exr"
# depth_path = prefix + "_depth.exr"
# texture_path = prefix + "_texture.exr"
# # visibility_path = prefix + "_visibility.exr"
# diffuse_path = prefix + "_diffuse.exr"
# specular_path = prefix + "_specular.exr"
# try:
# inFile = exr_utils.open(color_path)
# color = inFile.get_all()["default"]
# color = _crop(color, (1,1), 128)
# except Exception:
# color = np.zeros((128,128,3))
# # inFile = exr_utils.open(variance_path)
# # variance = inFile.get_all()["default"]
# try:
# inFile = exr_utils.open(normal_path)
# normal = inFile.get_all()["default"]
# normal = _crop(normal, (1,1), 128)
# except Exception:
# normal = np.zeros((128,128,3))
# try:
# inFile = exr_utils.open(depth_path)
# depth = inFile.get_all()["default"]
# depth = _crop(depth, (1,1), 128)
# except Exception:
# depth = np.zeros((128,128,1))
# try:
# inFile = exr_utils.open(texture_path)
# texture = inFile.get_all()["default"]
# texture = _crop(texture, (1,1), 128)
# except Exception:
# texture = np.zeros((128,128,3))
# # try:
# # inFile = exr_utils.open(visibility_path)
# # visibility = inFile.get_all()["default"]
# # visibility = _crop(visibility
# # , (1,1), 128)
# # except Exception:
# # visibility = np.zeros((128,128,1))
# try:
# inFile = exr_utils.open(diffuse_path)
# diffuse = inFile.get_all()["default"]
# diffuse = _crop(diffuse, (1,1), 128)
# except Exception:
# diffuse = np.zeros((128,128,3))
# try:
# inFile = exr_utils.open(specular_path)
# specular = inFile.get_all()["default"]
# specular = _crop(specular, (1,1), 128)
# except Exception:
# specular = np.zeros((128,128,3))
# # variance = CalcRelVar( (1+ color.copy()) , variance, False, False, True )
# color[color < 0.0] = 0.0
# color = LogTransform(color)
# diffuse[diffuse < 0.0] = 0.0
# diffuse = LogTransform(diffuse)
# specular[specular < 0.0] = 0.0
# specular = LogTransform(specular)
# normal = np.nan_to_num(normal)
# normal = (normal + 1.0)*0.5
# normal = np.maximum(np.minimum(normal,1.0),0.0)
# # Normalize current frame depth to [0,1]
# maxDepth = np.max(depth)
# if maxDepth != 0:
# depth /= maxDepth
# # texture = np.clip(texture,0.0,1.0)
# # feautres = np.concatenate((variance, normal, depth, texture, visibility), axis=2)
# feautres = np.concatenate((normal, depth, texture), axis=2) #visibility
# return color, diffuse, specular, feautres
# # return np.concatenate((color, normal, depth, texture), axis=2)
# def loadDisneyEXR_multi_ref(path, FEATURE_LIST):
# # time0 = time.time()
# prefix = path.split(".")[0]
# color_path = prefix + "_color.exr"
# diffuse_path = prefix + "_diffuse.exr"
# specular_path = prefix + "_specular.exr"
# try:
# inFile = exr_utils.open(color_path)
# color = inFile.get_all()["default"]
# color = _crop(color, (1,1), 128)
# except Exception:
# color = np.zeros((128,128,3))
# try:
# inFile = exr_utils.open(diffuse_path)
# diffuse = inFile.get_all()["default"]
# diffuse = _crop(diffuse, (1,1), 128)
# except Exception:
# diffuse = np.zeros((128,128,3))
# try:
# inFile = exr_utils.open(specular_path)
# specular = inFile.get_all()["default"]
# specular = _crop(specular, (1,1), 128)
# except Exception:
# specular = np.zeros((128,128,3))
# color[color<0.0] = 0.0
# color = LogTransform(color)
# diffuse[diffuse < 0.0] = 0.0
# diffuse = LogTransform(diffuse)
# specular[specular < 0.0] = 0.0
# specular = LogTransform(specular)
# return color, diffuse, specular
|
1643385
|
from collections import defaultdict
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand
from django.core.urlresolvers import reverse
from symposion.proposals.kinds import get_kind_slugs, get_proposal_model
from email_log.models import Email
from pycon.models import PyConProposal
from pycon.finaid.models import FinancialAidApplication, APPLICATION_TYPE_SPEAKER
from pycon.finaid.utils import has_application, send_email_message
SLUGS = get_kind_slugs()
DOMAIN = Site.objects.get_current().domain
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--kind', action='store', dest='kind', required=True,
help='Proposal Kind to notify: {}'.format(', '.join(SLUGS)))
def handle(self, *args, **options):
if options['kind'] not in SLUGS:
print('ERROR: Unknown Proposal Kind: {}\n Must be one of: {}'.format(options['kind'], ', '.join(SLUGS)))
return False
to_apply = defaultdict(list)
to_confirm = defaultdict(list)
accepted = get_proposal_model(options['kind']).objects.filter(overall_status=PyConProposal.STATUS_ACCEPTED)
for proposal in accepted:
if proposal.speaker.financial_support and has_application(proposal.speaker.user):
application = FinancialAidApplication.objects.get(user=proposal.speaker.user)
application.application_type = APPLICATION_TYPE_SPEAKER
application.presenting = True
application.save()
path = reverse('speaker_grant_edit')
url = 'https://{domain}{path}'.format(domain=DOMAIN, path=path)
to_confirm[proposal.speaker.email].append(proposal)
if proposal.speaker.financial_support and not has_application(proposal.speaker.user):
path = reverse('speaker_grant_apply')
url = 'https://{domain}{path}'.format(domain=DOMAIN, path=path)
to_apply[proposal.speaker.email].append(proposal)
for email, proposals in to_apply.items():
notified = Email.objects.filter(
recipients='; '.join(['<EMAIL>', email]),
subject='Speaker assistance for your {}.'.format(options['kind'].title())
).exists()
if notified:
continue
send_email_message(
'speaker_grant_apply',
from_='<EMAIL>',
to=['<EMAIL>', email],
context={
'proposal_kind': options['kind'],
'user': proposals[0].speaker.user,
'domain': DOMAIN,
'proposal': proposals[0],
},
)
for email, proposals in to_confirm.items():
notified = Email.objects.filter(
recipients='; '.join(['<EMAIL>', email]),
subject='Speaker assistance for your {}.'.format(options['kind'].title())
).exists()
if notified:
continue
send_email_message(
'speaker_grant_confirm',
from_='<EMAIL>',
to=['<EMAIL>', email],
context={
'proposal_kind': options['kind'],
'user': proposals[0].speaker.user,
'domain': DOMAIN,
'proposal': proposals[0],
},
)
|
1643395
|
from django.db import models
from pyplan.pyplan.dashboard.models import Dashboard
from pyplan.pyplan.usercompanies.models import UserCompany
class DashboardComment(models.Model):
comment = models.TextField()
extra_data = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
dashboard = models.ForeignKey(Dashboard, on_delete=models.CASCADE, null=True)
owner = models.ForeignKey(UserCompany, on_delete=models.DO_NOTHING)
class Meta:
permissions = (
# "CREATEDASHBOARDCOMMENT" >> add_dashboardcomment
# "LISTDASHBOARDCOMMENTS"
("list_dashboardcomments", "Can list dashboard comments"),
)
|
1643446
|
def extractYuikotranslationWordpressCom(item):
'''
Parser for 'yuikotranslation.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
chp_prefixes = [
('The Daily Life of Being the Campus Idol’s Fake Boyfriend Chapter ', 'The Daily Life of Being the Campus Idol’s Fake Boyfriend', 'translated'),
('The Daily Life of Being the Campus Idol’s Fake Boyfriend’s Chapter ', 'The Daily Life of Being the Campus Idol’s Fake Boyfriend', 'translated'),
]
for prefix, series, tl_type in chp_prefixes:
if item['title'].lower().startswith(prefix.lower()):
return buildReleaseMessageWithType(item, series, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
1643472
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from typing import Any
from typing import List
from typing import Optional
from ....types import tensor_dict_type
from ....protocol import TrainerState
from ....protocol import ModelProtocol
from ....constants import INPUT_KEY
from ....constants import PREDICTIONS_KEY
from ....constants import ORIGINAL_LABEL_KEY
from ....modules.blocks import get_conv_blocks
from ....modules.blocks import Conv2d
from ....modules.blocks import Lambda
from ....modules.blocks import ChannelPadding
class MaskedConv2d(Conv2d):
mask: Tensor
def __init__(
self,
mask_type: str,
in_channels: int,
out_channels: int,
*,
kernel_size: int,
groups: int = 1,
stride: int = 1,
dilation: int = 1,
padding: Any = "same",
transform_kernel: bool = False,
bias: bool = True,
demodulate: bool = False,
):
assert mask_type in {"A", "B"}
super().__init__(
in_channels,
out_channels,
kernel_size=kernel_size,
groups=groups,
stride=stride,
dilation=dilation,
padding=padding,
transform_kernel=transform_kernel,
bias=bias,
demodulate=demodulate,
)
self.register_buffer("mask", self.weight.data.clone())
_, _, h, w = self.weight.shape
self.mask.fill_(1.0)
self.mask[:, :, h // 2, w // 2 + (mask_type == "B") :] = 0.0
self.mask[:, :, h // 2 + 1 :] = 0.0
def forward(
self,
net: Tensor,
style: Optional[Tensor] = None,
*,
transpose: bool = False,
) -> Tensor:
self.weight.data *= self.mask
return super().forward(net, style, transpose=transpose)
@ModelProtocol.register("pixel_cnn")
class PixelCNN(ModelProtocol):
def __init__(
self,
in_channels: int,
num_classes: int,
need_embedding: bool = False,
latent_channels: int = 128,
*,
norm_type: Optional[str] = "batch",
num_layers: int = 6,
channel_padding: Optional[int] = 16,
num_conditional_classes: Optional[int] = None,
):
super().__init__()
if in_channels != 1:
raise ValueError("`PixelCNN` requires `in_channels` to be 1")
self.in_channels = in_channels
self.num_classes = num_classes
self.latent_channels = latent_channels
self.num_conditional_classes = num_conditional_classes
def _get_block(in_nc: int, out_nc: int, mask_type: str) -> nn.Sequential:
return nn.Sequential(
*get_conv_blocks(
in_nc,
out_nc,
7,
1,
bias=False,
norm_type=norm_type,
activation=nn.LeakyReLU(0.2, inplace=True),
conv_base=MaskedConv2d,
padding=3,
mask_type=mask_type,
)
)
# to blocks
if not need_embedding:
start_channels = in_channels
normalize = lambda t: t.float() / (num_classes - 1)
self.to_blocks = nn.Sequential(Lambda(normalize, name="normalize"))
else:
start_channels = latent_channels
self.to_blocks = nn.Sequential(
Lambda(lambda t: t.squeeze(1), name="squeeze"),
nn.Embedding(num_classes, latent_channels),
Lambda(lambda t: t.permute(0, 3, 1, 2), name="permute"),
)
# channel padding
self.channel_padding = None
if channel_padding is not None:
self.channel_padding = ChannelPadding(
start_channels,
channel_padding,
num_classes=num_conditional_classes,
)
elif num_conditional_classes is not None:
raise ValueError(
"`channel_padding` should be provided "
"when `num_conditional_classes` is provided"
)
# blocks
blocks: List[nn.Module] = [_get_block(start_channels, latent_channels, "A")]
for i in range(num_layers - 1):
blocks.append(_get_block(latent_channels, latent_channels, "B"))
blocks.append(Conv2d(latent_channels, num_classes, kernel_size=1))
self.net = nn.Sequential(*blocks)
def forward(
self,
batch_idx: int,
batch: tensor_dict_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> tensor_dict_type:
net = batch[INPUT_KEY]
net = self.to_blocks(net)
if self.channel_padding is not None:
net = self.channel_padding(net, batch[ORIGINAL_LABEL_KEY])
return {PREDICTIONS_KEY: self.net(net)}
def sample(
self,
num_sample: int,
img_size: int,
class_idx: Optional[int] = None,
**kwargs: Any,
) -> Tensor:
shape = num_sample, self.in_channels, img_size, img_size
sampled = torch.zeros(shape, dtype=torch.long, device=self.device)
if self.num_conditional_classes is None:
labels = None
else:
if class_idx is not None:
labels = torch.full([num_sample], class_idx, device=self.device)
else:
labels = torch.randint(
self.num_conditional_classes,
[num_sample],
device=self.device,
)
for i in range(img_size):
for j in range(img_size):
batch = {INPUT_KEY: sampled, ORIGINAL_LABEL_KEY: labels}
out = self.forward(0, batch, **kwargs)[PREDICTIONS_KEY]
probabilities = F.softmax(out[:, :, i, j], dim=1).data
sampled[:, :, i, j] = torch.multinomial(probabilities, 1)
return sampled
__all__ = ["PixelCNN"]
|
1643493
|
import requests
import urllib3
from loguru import logger
from requests_toolbelt import MultipartEncoder
from PicImageSearch.Utils.iqdb import IqdbResponse
class Iqdb:
"""
Iqdb and Iqdb 3d
-----------
Reverse image from http://www.iqdb.org\n
Params Keys
-----------
:param **requests_kwargs: proxy settings
"""
def __init__(self, **requests_kwargs):
self.url = 'http://www.iqdb.org/'
self.url_3d = 'http://3d.iqdb.org/'
self.requests_kwargs = requests_kwargs
@staticmethod
def _errors(code):
if code == 404:
return "Source down"
elif code == 302:
return "Moved temporarily, or blocked by captcha"
elif code == 413 or code == 430:
return "image too large"
elif code == 400:
return "Did you have upload the image ?, or wrong request syntax"
elif code == 403:
return "Forbidden,or token unvalid"
elif code == 429:
return "Too many request"
elif code == 500 or code == 503:
return "Server error, or wrong picture format"
else:
return "Unknown error, please report to the project maintainer"
def search(self, url):
"""
Iqdb
-----------
Reverse image from http://www.iqdb.org\n
Return Attributes
-----------
• .origin = Raw data from scrapper\n
• .raw = Simplified data from scrapper\n
• .raw[0].content = First index of content <Index 0 `Best match` or Index 1 etc `Additional match`>\n
• .raw[0].title = First index of title that was found\n
• .raw[0].url = First index of url source that was found\n
• .raw[0].thumbnail = First index of url image that was found\n
• .raw[0].similarity = First index of similarity image that was found\n
• .raw[0].size = First index detail of image size that was found
"""
try:
if url[:4] == 'http': # 网络url
datas = {
"url": url
}
res = requests.post(self.url, data=datas, **self.requests_kwargs)
else: # 是否是本地文件
m = MultipartEncoder(
fields={
'file': ('filename', open(url, 'rb'), "type=multipart/form-data")
}
)
headers = {'Content-Type': m.content_type}
urllib3.disable_warnings()
res = requests.post(self.url, headers=headers, **self.requests_kwargs)
if res.status_code == 200:
return IqdbResponse(res.content)
else:
logger.error(self._errors(res.status_code))
except Exception as e:
logger.error(e)
def search_3d(self, url):
"""
Iqdb 3D
-----------
Reverse image from http://3d.iqdb.org\n
Return Attributes
-----------
• .origin = Raw data from scrapper\n
• .raw = Simplified data from scrapper\n
• .raw[0].content = First index of content <Index 0 `Best match` or Index 1 etc `Additional match`>\n
• .raw[0].title = First index of title that was found\n
• .raw[0].url = First index of url source that was found\n
• .raw[0].thumbnail = First index of url image that was found\n
• .raw[0].similarity = First index of similarity image that was found\n
• .raw[0].size = First index detail of image size that was found
"""
try:
if url[:4] == 'http': # 网络url
datas = {
"url": url
}
res = requests.post(self.url_3d, data=datas, **self.requests_kwargs)
else: # 是否是本地文件
m = MultipartEncoder(
fields={
'file': ('filename', open(url, 'rb'), "type=multipart/form-data")
}
)
headers = {'Content-Type': m.content_type}
urllib3.disable_warnings()
res = requests.post(self.url_3d, headers=headers, **self.requests_kwargs)
if res.status_code == 200:
return IqdbResponse(res.content)
else:
logger.error(self._errors(res.status_code))
except Exception as e:
logger.error(e)
|
1643506
|
import sut
import random
t = sut.t()
LEN = 2000
N = 1000
for n in xrange(0,N):
t.restart()
for s in xrange(0,LEN):
oldstate = t.state()
next = []
for action in t.enabled():
action[2]()
assert(t.check())
fitness = 0
avl1 = t.p_AVL[0]
avl2 = t.p_AVL[1]
if avl1 != None:
fitness = len(avl1.inorder_traverse())
if avl2 != None:
fitness = max(fitness, len(avl2.inorder_traverse()))
next.append((action, fitness))
t.backtrack(oldstate)
sortedacts = sorted(next, key=lambda x : x[1], reverse = True)
print sortedacts[0]
sortedacts[0][0][2]()
|
1643523
|
from django.urls import reverse
from rest_framework.test import APIClient
from apis.betterself.v1.events.tests.views.test_views import User
from apis.betterself.v1.signup.fixtures.builders import DemoHistoricalDataBuilder
from apis.betterself.v1.tests.mixins.test_post_requests import PostRequestsTestsMixin
from apis.betterself.v1.tests.test_base import BaseAPIv1Tests
from events.models import SupplementReminder
from supplements.models import Supplement
class TestSupplementReminderViews(BaseAPIv1Tests, PostRequestsTestsMixin):
TEST_MODEL = SupplementReminder
PAGINATION = False
@classmethod
def setUpTestData(cls):
cls.user_1, _ = User.objects.get_or_create(username='default')
builder = DemoHistoricalDataBuilder(cls.user_1)
builder.create_historical_fixtures()
builder.create_supplement_reminders(limit=4)
cls.url = reverse(SupplementReminder.RESOURCE_NAME)
super().setUpTestData()
def setUp(self):
supplement = Supplement.objects.filter(user=self.user_1).first()
supplement_uuid = str(supplement.uuid)
self.DEFAULT_POST_PARAMS = {
'reminder_time': '15:20',
'quantity': 5,
'supplement_uuid': supplement_uuid
}
self.client_1 = self.create_authenticated_user_on_client(APIClient(), self.user_1)
self.client_2 = self.create_authenticated_user_on_client(APIClient(), self.user_2)
def test_post_when_over_limit(self):
# hardcoded value of 5 to prevent spam
supplements = Supplement.objects.filter(user=self.user_1)
for supplement in supplements:
params = {
'reminder_time': '10:20',
'quantity': 5,
'supplement_uuid': str(supplement.uuid)
}
self.client_1.post(self.url, data=params)
cutoff_limit = 5
user_supplement_reminders = SupplementReminder.objects.filter(user=self.user_1).count()
self.assertEqual(cutoff_limit, user_supplement_reminders)
def test_view_no_auth(self):
client = APIClient()
response = client.get(self.url)
self.assertEqual(response.status_code, 401)
def test_view_no_data(self):
new_user, _ = User.objects.get_or_create(username='no-data')
client = APIClient()
client.force_login(new_user)
response = client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_view(self):
response = self.client_1.get(self.url)
self.assertEqual(response.status_code, 200)
supplement_reminder_count = SupplementReminder.objects.filter(user=self.user_1).count()
self.assertEqual(supplement_reminder_count, len(response.data))
|
1643552
|
from .random_sampling import RandomSampling
from .least_confidence import LeastConfidence
from .margin_sampling import MarginSampling
from .entropy_sampling import EntropySampling
from .least_confidence_dropout import LeastConfidenceDropout
from .margin_sampling_dropout import MarginSamplingDropout
from .entropy_sampling_dropout import EntropySamplingDropout
from .kmeans_sampling import KMeansSampling
from .kcenter_greedy import KCenterGreedy
from .bayesian_active_learning_disagreement_dropout import BALDDropout
from .adversarial_bim import AdversarialBIM
from .adversarial_deepfool import AdversarialDeepFool
|
1643553
|
import argparse
from celery_app.celery_tasks import *
# Create the parser
arg_parser = argparse.ArgumentParser(
prog="python -m scripts.fetchers.rest",
description="Starts a REST fetcher for an exchange using Celery"
)
# Add the arguments
arg_parser.add_argument(
'action',
metavar='action',
type=str,
choices=["fetch", "resume"],
help='fetch or resume'
)
arg_parser.add_argument(
'--exchange',
metavar='exchange',
type=str,
required=True,
help='name of the exchange'
)
arg_parser.add_argument(
'--start',
metavar='start',
type=str,
help='Start date; Must comply to this format: %%Y-%%m-%%dT%%H:%%M:%%S; \nMust be entered if action is fetch',
# required=True,
)
arg_parser.add_argument(
'--end',
metavar='end',
type=str,
help='End date; Must comply to this format: %%Y-%%m-%%dT%%H:%%M:%%S; \nMust be entered if action is fetch',
# required=True,
)
# Execute the parse_args() method
args = arg_parser.parse_args()
action = args.action
exchange = args.exchange
start = args.start
end = args.end
if action == "fetch":
if exchange == "bitfinex":
bitfinex_fetch_ohlcvs_all_symbols.delay(start, end)
elif exchange == "binance":
binance_fetch_ohlcvs_all_symbols.delay(start, end)
elif exchange == "bittrex":
bittrex_fetch_ohlcvs_all_symbols.delay(start, end)
elif action == "resume":
if exchange == "bitfinex":
bitfinex_resume_fetch.delay()
elif exchange == "binance":
binance_resume_fetch.delay()
elif exchange == "bittrex":
bittrex_resume_fetch.delay()
|
1643565
|
import sys
import binascii
import re
class StdinController:
def __init__(self, model, informat):
if informat not in ['utf8', 'hex']:
raise NotImplementedError('"{}" is not a valid input format for this controller'.format(informat))
self.informat = informat
self.model = model
self.spaceregex = re.compile(r'\s+')
def read_lines(self):
for num, line in enumerate(sys.stdin):
if self.informat == 'utf8':
data = line[:-1]
elif self.informat == 'hex':
hexs = re.sub(self.spaceregex, '', line)
data = binascii.unhexlify(hexs)
self.model.add(data, info='{}'.format(num))
|
1643586
|
from typing import List
from app.models.domain.comments import Comment
from app.models.schemas.rwschema import RWSchema
class ListOfCommentsInResponse(RWSchema):
comments: List[Comment]
class CommentInResponse(RWSchema):
comment: Comment
class CommentInCreate(RWSchema):
body: str
|
1643602
|
from typing import Dict
import torch
import numpy as np
from catalyst.dl.core import Callback, RunnerState, CallbackOrder
import cv2
from collections import OrderedDict
def calculate_confusion_matrix_from_arrays(
prediction: np.array, ground_truth: np.array, num_classes: int
) -> np.array:
"""Calculate confusion matrix for a given set of classes.
if GT value is outside of the [0, num_classes) it is excluded.
Args:
prediction:
ground_truth:
num_classes:
Returns:
"""
# a long 2xn array with each column being a pixel pair
replace_indices = np.vstack((ground_truth.flatten(), prediction.flatten()))
valid_index = replace_indices[0, :] < num_classes
replace_indices = replace_indices[:, valid_index].T
# add up confusion matrix
confusion_matrix, _ = np.histogramdd(
replace_indices,
bins=(num_classes, num_classes),
range=[(0, num_classes), (0, num_classes)],
)
return confusion_matrix.astype(np.uint64)
def get_confusion_matrix(y_pred_logits: torch.Tensor, y_true: torch.Tensor):
num_classes = y_pred_logits.shape[1]
y_pred = torch.argmax(y_pred_logits, dim=1)
ground_truth = y_true.cpu().numpy()
prediction = y_pred.cpu().numpy()
return calculate_confusion_matrix_from_arrays(ground_truth, prediction, num_classes)
def calculate_tp_fp_fn(confusion_matrix):
true_positives = {}
false_positives = {}
false_negatives = {}
for index in range(confusion_matrix.shape[0]):
true_positives[index] = confusion_matrix[index, index]
false_positives[index] = (
confusion_matrix[:, index].sum() - true_positives[index]
)
false_negatives[index] = (
confusion_matrix[index, :].sum() - true_positives[index]
)
return {
"true_positives": true_positives,
"false_positives": false_positives,
"false_negatives": false_negatives,
}
def calculate_dice(tp_fp_fn_dict):
epsilon = 1e-7
dice = {}
for i in range(len(tp_fp_fn_dict["true_positives"])):
tp = tp_fp_fn_dict["true_positives"][i]
fp = tp_fp_fn_dict["false_positives"][i]
fn = tp_fp_fn_dict["true_positives"][i]
dice[i] = (2 * tp + epsilon) / (2 * tp + fp + fn + epsilon)
if not 0 <= dice[i] <= 1:
raise ValueError()
return dice
class MulticlassDiceMetricCallback(Callback):
def __init__(
self,
prefix: str = "dice",
input_key: str = "targets",
output_key: str = "logits",
**metric_params,
):
super().__init__(CallbackOrder.Metric)
self.prefix = prefix
self.input_key = input_key
self.output_key = output_key
self.metric_params = metric_params
self.confusion_matrix = None
self.class_names = metric_params[
"class_names"
] # dictionary {class_id: class_name}
self.class_prefix = metric_params["class_prefix"]
def _reset_stats(self):
self.confusion_matrix = None
def on_batch_end(self, state: RunnerState):
outputs = state.output[self.output_key]
targets = state.input[self.input_key]
confusion_matrix = get_confusion_matrix(outputs, targets)
if self.confusion_matrix is None:
self.confusion_matrix = confusion_matrix
else:
self.confusion_matrix += confusion_matrix
def on_loader_end(self, state: RunnerState):
tp_fp_fn_dict = calculate_tp_fp_fn(self.confusion_matrix)
batch_metrics: Dict = calculate_dice(tp_fp_fn_dict)
for metric_id, dice_value in batch_metrics.items():
if metric_id not in self.class_names:
continue
metric_name = self.class_names[metric_id]
state.metrics.epoch_values[state.loader_name][
f"{self.class_prefix}_{metric_name}"
] = dice_value
state.metrics.epoch_values[state.loader_name]["mean"] = np.mean(
[x for x in batch_metrics.values()]
)
self._reset_stats()
class CustomSegmentationInferCallback(Callback):
def __init__(self, return_valid: bool = False):
super().__init__(CallbackOrder.Internal)
self.valid_masks = []
self.probabilities = np.zeros((2220, 350, 525))
self.return_valid = return_valid
def on_batch_end(self, state: RunnerState):
image, mask = state.input
output = state.output["logits"]
if self.return_valid:
for m in mask:
if m.shape != (350, 525):
m = cv2.resize(m, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)
self.valid_masks.append(m)
for j, probability in enumerate(output):
if probability.shape != (350, 525):
probability = cv2.resize(
probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR
)
self.probabilities[j, :, :] = probability
|
1643608
|
import torch
from torch import nn
import gym
from gym.spaces import Box, Discrete, Space
from copy import copy, deepcopy
import numpy as np
from typing import Optional, Union, Iterable, List, Dict, Tuple, Any
from numbers import Real, Integral
from .runningstat import RunningStat
from .misc import (
fill_parameters,
get_parameter_vector,
positive_int_or_none,
positive_int,
positive_float,
get_env_spaces,
get_1D_box_length,
get_action_space_length
)
ParamVector = Union[List[Real], np.ndarray]
Action = Union[List[Real], np.ndarray, Integral]
class Policy:
"""Base class for a policy."""
def __init__(self,
*,
env_name: str,
env_config: Optional[dict]=None,
observation_normalization: bool=True,
seed: Optional[Integral]=None):
"""``__init__(...)``: Initialize the policy object.
The initializer must be called from the initializer
of the inheriting classes.
Args:
env_name: Expected as a string specifying the gym
environment ID (e.g. 'Humanoid-v2').
env_config: Expected as None, or as a dictionary
containing the keyword arguments to be passed
to ``gym.make`` when creating the environment.
observation_normalization: Expected as boolean,
specifying whether or not the observations
are to be normalized.
seed: Expected as None or as an integer.
Pass here an integer for explicitly setting a
random seed for the stochastic operations of
the gym environment.
"""
self._policy: nn.Module
if bool(observation_normalization):
self._main_obs_stats = RunningStat()
self._collected_obs_stats = RunningStat()
else:
self._main_obs_stats = None
self._collected_obs_stats = None
if not isinstance(env_name, str):
raise TypeError(
"Environment name was expected as an str,"
+ " but it was received as: "
+ repr(env_name)
)
self._env_name = env_name
if env_config is None:
self._env_config = {}
else:
self._env_config = env_config
self._env: Optional[gym.Env] = None
self._observation_space, self._action_space = (
get_env_spaces(self._env_name, self._env_config)
)
self._seed = seed
self._collect_obs_stats = True
self.notes: Any = None
def _get_env(self) -> gym.Env:
if self._env is None:
self._env = gym.make(self._env_name, **(self._env_config))
if self._seed is not None:
self._env.seed(self._seed)
return self._env
def __getstate__(self):
state = {"_env": None}
for k, v in self.__dict__.items():
if k != "_env":
state[k] = v
return state
def __setstate__(self, state):
state: dict
for k, v in state.items():
self.__dict__[k] = v
def _use_policy(self, observation: Iterable[Real]) -> Action:
x = torch.as_tensor(observation, dtype=torch.float32)
with torch.no_grad():
action = self._policy(x).numpy()
if isinstance(self._action_space, Box):
action = np.clip(
action,
self._action_space.low,
self._action_space.high
)
elif isinstance(self._action_space, Discrete):
action = np.argmax(action)
else:
raise TypeError(
"Cannot work with this action space: "
+ repr(self._action_space)
)
return action
def run(self,
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> Tuple[float, int]:
"""Run an episode.
Args:
decrease_rewards_by: The reward at each timestep will be
decreased by this given amount.
max_episode_length: The maximum number of interactions
allowed in an episode.
Returns:
A tuple (cumulative_reward, number_of_interactions).
"""
max_episode_length = positive_int_or_none(max_episode_length)
def normalized(obs):
if self._main_obs_stats is not None:
if self._collect_obs_stats:
self._main_obs_stats.update(obs)
self._collected_obs_stats.update(obs)
return self._main_obs_stats.normalize(obs)
else:
return obs
t = 0
cumulative_reward = 0.0
env = self._get_env()
observation = env.reset()
observation = normalized(observation)
while True:
action = self._use_policy(observation)
observation, reward, done, info = env.step(action)
observation = normalized(observation)
t += 1
reward -= decrease_rewards_by
cumulative_reward += reward
if max_episode_length is not None and t > max_episode_length:
break
if done:
break
return cumulative_reward, t
def set_params_and_run(self,
policy_parameters: ParamVector,
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> (
Tuple[float, int]):
"""Set the the parameters of the policy by copying them
from the given parameter vector, then run an episode.
Args:
policy_parameters: The policy parameters to be used.
decrease_rewards_by: The reward at each timestep will be
decreased by this given amount.
max_episode_length: The maximum number of interactions
allowed in an episode.
Returns:
A tuple (cumulative_reward, number_of_interactions).
"""
self.set_parameters(policy_parameters)
return self.run(
decrease_rewards_by=decrease_rewards_by,
max_episode_length=max_episode_length
)
def _run_from_list(self,
policy_param_list: List[ParamVector],
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> (
List[Tuple[float, int]]):
results = []
for policy_params in policy_param_list:
results.append(
self.set_params_and_run(
policy_params,
decrease_rewards_by=decrease_rewards_by,
max_episode_length=max_episode_length
)
)
return results
def _run_from_dict(self,
policy_param_dict: Dict[Any, ParamVector],
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> (
Dict[Any, Tuple[float, int]]):
results = {}
for policy_key, policy_params in policy_param_dict.items():
results[policy_key] = (
self.set_params_and_run(
policy_params,
decrease_rewards_by=decrease_rewards_by,
max_episode_length=max_episode_length
)
)
return results
def set_params_and_run_all(self,
policy_params_all: Union[
List[ParamVector],
Dict[Any, ParamVector]
],
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> (
Union[
List[Tuple[float, int]],
Dict[Any, Tuple[float, int]]
]
):
"""For each of the items in the given parameters dictionary,
set the the parameters of the policy by copying them
from the given parameter vector, then run an episode.
Args:
policy_params_all: A dictionary, mapping a policy identifier
to a policy parameter vector.
For example, the policy identifier here could possibly
be an integer specifying the index of the
parameter vector within a batch of parameter vectors.
decrease_rewards_by: The reward at each timestep will be
decreased by this given amount.
max_episode_length: The maximum number of interactions
allowed in an episode.
Returns:
A dictionary where each item maps the policy identifier key
to a tuple (cumulative_reward, number_of_interactions).
"""
kwargs = dict(
decrease_rewards_by=decrease_rewards_by,
max_episode_length=max_episode_length
)
received_dict = (
hasattr(policy_params_all, "keys")
and hasattr(policy_params_all, "values")
)
if received_dict:
return self._run_from_dict(policy_params_all, **kwargs)
else:
return self._run_from_list(policy_params_all, **kwargs)
def set_parameters(self, parameters: ParamVector):
"""Set the parameters of the policy by copying the values
from the given parameter vector.
Args:
parameters: The parameter vector.
"""
#x = torch.as_tensor(parameters, dtype=torch.float32)
if isinstance(parameters, np.ndarray):
parameters = parameters.copy()
x = torch.as_tensor(parameters, dtype=torch.float32)
fill_parameters(self._policy, x)
def get_parameters(self) -> np.ndarray:
"""Get the parameters of the policy as a 1-D numpy array.
Returns:
The parameter vector.
"""
return get_parameter_vector(self._policy).numpy()
def pop_collected_obs_stats(self) -> RunningStat:
"""Get the collected observation statistics.
When this method is called, the contained collected
statistics are removed.
Returns:
The collected observation statistics.
"""
if self._collected_obs_stats is None:
raise ValueError(
"Observation stats are not configured to be collected,"
" therefore, they cannot be popped."
)
result = self._collected_obs_stats
self._collected_obs_stats = RunningStat()
return result
def set_main_obs_stats(self, obs_stats: RunningStat):
"""Set the observation statistics to be used for
observation normalization.
Args:
obs_stats: A RunningStat object containing the statistics.
"""
if obs_stats is None:
raise ValueError(
"The main observation stats cannot be given as None."
)
self._main_obs_stats = deepcopy(obs_stats)
def get_main_obs_stats(self) -> Optional[RunningStat]:
"""Get the observation statistics used for
observation normalization.
Returns:
A RunningStat object containing the statistics.
"""
return self._main_obs_stats
def update_main_obs_stats(self, obs_stats: Union[RunningStat, np.ndarray]):
"""Update the observation statistics used for
observation normalization.
Args:
obs_stats: A RunningStat object or a numpy array
(a numpy array representing a single observation vector).
"""
if self._main_obs_stats is None:
raise ValueError(
"There is no observation stats to update."
+ " Was "
+ repr(self)
+ " initialized with observation_normalization=False?"
)
self._main_obs_stats.update(obs_stats)
def get_parameters_count(self) -> int:
"""Get the number of parameters of the policy
(also corresponds to the length of parameter vector).
"""
return len(self.get_parameters())
def get_collect_obs_stats(self) -> bool:
"""Get, as boolean, whether or not the policy is configured
to collect observation statistics when running episodes.
Returns:
A boolean.
"""
return self._collect_obs_stats
def set_collect_obs_stats(self, b: bool):
"""Set, as boolean, whether or not the policy is to collect
observation statistics when running episodes.
Args:
b: A boolean.
"""
self._collect_obs_stats = bool(b)
class LinearPolicy(Policy):
"""A linear policy."""
def __init__(self,
*,
env_name: str,
env_config: Optional[dict]=None,
observation_normalization: bool=True,
seed: Optional[Integral]=None,
bias: bool=True):
"""``__init__(...)``: Initialize the linear policy.
Args:
env_name: Expected as a string specifying the gym
environment ID (e.g. 'Humanoid-v2').
env_config: Expected as None, or as a dictionary
containing the keyword arguments to be passed
to ``gym.make`` when creating the environment.
observation_normalization: Expected as boolean,
specifying whether or not the observations
are to be normalized.
seed: Expected as None or as an integer.
Pass here an integer for explicitly setting a
random seed for the stochastic operations of
the gym environment.
bias: Expected as a boolean, specifying whether or
not the linear policy will have bias parameters.
"""
Policy.__init__(
self,
env_name=env_name,
env_config=env_config,
observation_normalization=observation_normalization,
seed=seed
)
obs_length = get_1D_box_length(self._observation_space)
act_length = get_action_space_length(self._action_space)
self._policy = nn.Linear(obs_length, act_length, bias=bias)
class MLPPolicy(Policy):
"""A multi-layer perceptron policy."""
ACTIVATION_CLS = {
"tanh": nn.Tanh,
"relu": nn.ReLU
}
def __init__(self,
*,
env_name: str,
env_config: Optional[dict]=None,
observation_normalization: bool=True,
seed: Optional[Integral]=None,
hidden_size: Integral=64,
num_hidden: Integral=1,
hidden_activation: str="tanh",
output_activation: Optional[str]=None):
"""
Args:
env_name: Expected as a string specifying the gym
environment ID (e.g. 'Humanoid-v2').
env_config: Expected as None, or as a dictionary
containing the keyword arguments to be passed
to ``gym.make`` when creating the environment.
observation_normalization: Expected as boolean,
specifying whether or not the observations
are to be normalized.
seed: Expected as None or as an integer.
Pass here an integer for explicitly setting a
random seed for the stochastic operations of
the gym environment.
hidden_size: Expected as an integer, specifying
the number of neurons in a hidden layer.
num_hidden: Expected as an integer, specifying
the number of hidden layers.
hidden_activation: The activation function to be
used by the hidden layer(s).
Expected as 'tanh' or 'relu'.
output_activation: Optional. The activation function
to be used by the output layer.
Can be given as 'tanh' or 'relu', or can be left
as None.
"""
Policy.__init__(
self,
env_name=env_name,
env_config=env_config,
observation_normalization=observation_normalization,
seed=seed
)
obs_length = get_1D_box_length(self._observation_space)
act_length = get_action_space_length(self._action_space)
hidden_size = positive_int(hidden_size)
num_hidden = positive_int(num_hidden)
if hidden_activation is None:
hidden_act_cls = None
else:
hidden_act_cls = self.ACTIVATION_CLS[hidden_activation]
if output_activation is None:
output_act_cls = None
else:
output_act_cls = self.ACTIVATION_CLS[output_activation]
layers = []
# first hidden layer
layers.append(nn.Linear(obs_length, hidden_size))
if hidden_act_cls is not None:
layers.append(hidden_act_cls())
# rest of the hidden layers (if any)
for _ in range(1, num_hidden):
layers.append(nn.Linear(hidden_size, hidden_size))
if hidden_act_cls is not None:
layers.append(hidden_act_cls())
# output layer
layers.append(nn.Linear(hidden_size, act_length))
if output_act_cls is not None:
layers.append(output_act_cls())
self._policy = nn.Sequential(*layers)
|
1643631
|
import tensorflow as tf
#import tensorlayer as tl
import numpy as np
class DQN(object):
def __init__(self, hps, name_variable):
self._hps = hps
self._name_variable = name_variable
def variable_summaries(self, var_name, var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries_{}'.format(var_name)):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def _add_placeholders(self):
"""Add placeholders to the graph. These are entry points for any input data."""
self._x = tf.placeholder(tf.float32, [None, self._hps.dqn_input_feature_len], name='x') # size (dataset_len, input_feature_len)
self._y = tf.placeholder(tf.float32, [None, self._hps.vocab_size], name='y') # size (dataset_len, 1)
self._train_step = tf.placeholder(tf.int32, None,name='train_step')
def _make_feed_dict(self, batch):
feed_dict = {}
feed_dict[self._x] = batch._x
feed_dict[self._y] = batch._y
return feed_dict
def _add_tf_layers(self):
""" Based on the dqn_layers flag, it creates multiple dense layers to do the regression. """
h = tf.layers.dense(self._x, units = self._hps.dqn_input_feature_len, activation=tf.nn.relu, name='{}_input_layer'.format(self._name_variable))
for i, layer in enumerate(self._hps.dqn_layers.split(',')):
h = tf.layers.dense(h, units = int(layer), activation = tf.nn.relu, name='{}_h_{}'.format(self._name_variable, i))
self.advantage_layer = tf.layers.dense(h, units = self._hps.vocab_size, activation = tf.nn.softmax, name='{}_advantage'.format(self._name_variable))
if self._hps.dueling_net:
# in dueling net, we have two extra output layers; one for value function estimation
# and the other for advantage estimation, we then use the difference between these two layers
# to calculate the q-estimation
self_layer = tf.layers.dense(h, units = 1, activation = tf.identity, name='{}_value'.format(self._name_variable))
normalized_al = self.advantage_layer-tf.reshape(tf.reduce_mean(self.advantage_layer,axis=1),[-1,1]) # equation 9 in https://arxiv.org/pdf/1511.06581.pdf
value_extended = tf.concat([self_layer] * self._hps.vocab_size, axis=1)
self.output = value_extended + normalized_al
else:
self.output = self.advantage_layer
def _add_train_op(self):
# In regression, the objective loss is Mean Squared Error (MSE).
self.loss = tf.losses.mean_squared_error(labels = self._y, predictions = self.output)
tvars = tf.trainable_variables()
gradients = tf.gradients(self.loss, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)
# Clip the gradients
with tf.device("/gpu:{}".format(self._hps.dqn_gpu_num)):
grads, global_norm = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)
# Add a summary
tf.summary.scalar('global_norm', global_norm)
# Apply adagrad optimizer
optimizer = tf.train.AdamOptimizer(self._hps.lr)
with tf.device("/gpu:{}".format(self._hps.dqn_gpu_num)):
self.train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')
self.variable_summaries('dqn_loss',self.loss)
def _add_update_weights_op(self):
""" Updates the weight of the target network based on the current network. """
self.model_trainables = tf.trainable_variables(scope='{}_relay_network'.format(self._name_variable)) # target variables
self._new_trainables = [tf.placeholder(tf.float32, None,name='trainables_{}'.format(i)) for i in range(len(self.model_trainables))]
self.assign_ops = []
if self._hps.dqn_polyak_averaging: # target parameters are slowly updating using: \phi_target = \tau * \phi_target + (1-\tau) * \phi_target
tau = (tf.cast(self._train_step,tf.float32) % self._hps.dqn_target_update)/float(self._hps.dqn_target_update)
for i, mt in enumerate(self.model_trainables):
nt = self._new_trainables[i]
self.assign_ops.append(mt.assign(tau * mt + (1-tau) * nt))
else:
if self._train_step % self._hps.dqn_target_update == 0:
for i, mt in enumerate(self.model_trainables):
nt = self._new_trainables[i]
self.assign_ops.append(mt.assign(nt))
def build_graph(self):
with tf.variable_scope('{}_relay_network'.format(self._name_variable)), tf.device("/gpu:{}".format(self._hps.dqn_gpu_num)):
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self._add_placeholders()
self._add_tf_layers()
self._add_train_op()
self._add_update_weights_op()
self._summaries = tf.summary.merge_all()
def run_train_steps(self, sess, batch):
feed_dict = self._make_feed_dict(batch)
to_return = {'train_op': self.train_op,
'summaries': self._summaries,
'loss': self.loss,
'global_step': self.global_step}
return sess.run(to_return, feed_dict)
def run_test_steps(self, sess, x, y=None, return_loss=False, return_best_action=False):
# when return_loss is True, the model will return the loss of the prediction
# return_loss should be False, during estimation (decoding)
feed_dict = {self._x:x}
to_return = {'estimates': self.output}
if return_loss:
feed_dict.update({self._y:y})
to_return.update({'loss': self.loss})
output = sess.run(to_return, feed_dict)
if return_best_action:
output['best_action']=np.argmax(output['estimates'],axis=1)
return output
def run_update_weights(self, sess, train_step, weights):
feed_dict = {self._train_step:train_step}
for i, w in enumerate(weights):
feed_dict.update({self._new_trainables[i]:w})
_ = sess.run(self.assign_ops, feed_dict)
|
1643650
|
class Solution(object):
def two_sum(self, nums, val):
if nums is None or val is None:
raise TypeError('nums or target cannot be None')
if not nums:
raise ValueError('nums cannot be empty')
cache = {}
for index, num in enumerate(nums):
cache_val = val - num
if num in cache:
return [cache[num], index]
else:
cache[cache_val] = index
return None
|
1643724
|
from yo_ds__tests.common import *
import seaborn
import math
class TestFluentPlot(TestCase):
def test_1_simple(self):
df = Query.en(range(10)).select(lambda z: Obj(x=z,y=z*z)).to_dataframe()
return df.feed(FluentPlot().call(lambda item,ax: ax.plot(item.x,item.y)).labels('Simplest fluent plot'))
def test_2_3rdparty(self):
df = Query.combinatorics.grid(x=range(5),y=range(5)).select(lambda z: z.update(z=z.x+z.y)).to_dataframe().pivot_table(columns='x',index='y',values='z')
return df.feed(FluentPlot().call(lambda item, ax: seaborn.heatmap(data=item,ax=ax)).labels('Fluent plot over 3rd party member from seaborn'))
def test_3_tuning(self):
df = Query.en(range(10)).select(lambda z: Obj(x=z, y=z * z)).to_dataframe()
return df.feed(FluentPlot()
.call(lambda item,ax: ax.plot(item.x,item.y,label='label'))
.labels('Plot with cosmetic tuning','X axis','Y axis')
.with_legend()
.tune(lambda ax: ax.tick_params(axis='x', rotation=45))
)
def test_4_groupby_plot(self):
df = Query.combinatorics.grid(a=range(5),x=range(10)).select(lambda z: z.update(y=z.x*z.a)).to_dataframe()
return df.groupby('a').feed(FluentPlot()
.call(lambda gr, ax: ax.plot(gr[1].x, gr[1].y, label = gr[0]))
.labels('Plot built for groupby')
.iterate()
.with_legend())
def test_5_df_columns_plot(self):
df = Query.combinatorics.grid(x=range(5), y=range(5)).select(lambda z: z.update(z=z.x + z.y)).to_dataframe().pivot_table(columns='x', index='y', values='z')
return df.feed(FluentPlot()
.call(lambda item,ax: ax.plot(item.index, item, label = item.name))
.iterate_df_columns()
.labels('Plot build for dataframe columns')
.with_legend())
def test_6_plots_on_different_axes(self):
df = Query.en(range(10)).select(lambda z: Obj(x=z, pow=z*z, sqrt=math.sqrt(z))).to_dataframe().set_index('x')
(Query
.en(df.columns)
.feed(fluq.with_plots(columns=2))
.foreach(lambda p: df[p.item].feed(FluentPlot()
.call(lambda item,ax: ax.plot(item.index, item))
.on(p.ax)
)))
def test_7_plot_from_series(self):
series = pd.Series([5,4,3,2,1])
series.feed(FluentPlot().call(lambda item, ax: item.plot(ax=ax)).size(20,10))
def test_8_plot_from_df(self):
df = Query.en(range(10)).select(lambda z: Obj(x=z, pow=z * z, sqrt=math.sqrt(z))).to_dataframe().set_index('x')
df.feed(FluentPlot().call(lambda item,ax: item.plot(ax=ax)).iterate_df_columns())
|
1643769
|
import json
import base64
import typing
import tempfile
from time import sleep
import re
from datetime import datetime
from datetime import timezone
import falcon
from mitmproxy import ctx
from mitmproxy import connections
from mitmproxy import version
from mitmproxy.utils import strutils
from mitmproxy.net.http import cookies
from mitmproxy import http
class LatencyResource:
def addon_path(self):
return "latency"
def __init__(self, latency_addon):
self.latency_addon = latency_addon
def on_get(self, req, resp, method_name):
getattr(self, "on_" + method_name)(req, resp)
def on_set_latency(self, req, resp):
self.latency_addon.latency_ms = int(req.get_param('latency'))
class LatencyAddOn:
def __init__(self):
self.num = 0
self.latency_ms = 0
def get_resource(self):
return LatencyResource(self)
def response(self, flow):
if self.latency_ms != 0:
sleep(self.latency_ms / 1000)
addons = [
LatencyAddOn()
]
|
1643771
|
from typing import Tuple
import numpy as np
from keras.callbacks import ReduceLROnPlateau
from sklearn.utils import class_weight
from vivid.estimators.base import MetaBlock
from vivid.sklearn_extend.neural_network import ScikitKerasClassifier, SKerasRegressor, ROCAucCallback
class BaseSkerasBlock(MetaBlock):
initial_params = {
'input_scaling': True,
'epochs': 30,
'batch_size': 128,
'workers': -1
}
def get_keras_callbacks(self, training_set, validation_set):
return [
ReduceLROnPlateau(patience=5, verbose=1)
]
def get_fit_params_on_each_fold(self,
model_params: dict,
training_set: Tuple[np.ndarray, np.ndarray],
validation_set: Tuple[np.ndarray, np.ndarray],
indexes_set: Tuple[np.ndarray, np.ndarray],
experiment) -> dict:
params = super(BaseSkerasBlock, self).get_fit_params_on_each_fold(
model_params=model_params,
training_set=training_set,
validation_set=validation_set,
indexes_set=indexes_set,
experiment=experiment)
add_params = {
'callbacks': self.get_keras_callbacks(training_set, validation_set),
'validation_data': validation_set,
}
params.update(add_params)
return params
class KerasClassifierBlock(BaseSkerasBlock):
model_class = ScikitKerasClassifier
def get_keras_callbacks(self, training_set, validation_set):
return [
*super(KerasClassifierBlock, self).get_keras_callbacks(training_set, validation_set),
ROCAucCallback(training_data=training_set, validation_data=validation_set),
]
def get_fit_params_on_each_fold(self,
model_params: dict,
training_set: Tuple[np.ndarray, np.ndarray],
validation_set: Tuple[np.ndarray, np.ndarray],
indexes_set: Tuple[np.ndarray, np.ndarray],
experiment) -> dict:
params = super(KerasClassifierBlock, self) \
.get_fit_params_on_each_fold(model_params, training_set, validation_set, indexes_set, experiment)
y = training_set[1]
weight = class_weight.compute_class_weight('balanced', np.unique(y), y)
params['class_weight'] = weight
return params
class KerasRegressorBlock(BaseSkerasBlock):
model_class = SKerasRegressor
|
1643785
|
import re
import os.path
from csv import DictWriter
from collections import defaultdict
from datetime import datetime
from django.core.management.base import BaseCommand
from elasticsearch_dsl.query import Q
from catalog.elastic_models import NACPDeclaration
from catalog.utils import title
AGGREGATED_FIELD_NAME = 'aggregated'
ORDER_BY = ['assets.total', 'incomes.total']
CATEGORY_MAP = {
"Поліція": 'a',
'Місцеві адміністрації та ради': 'b',
'Фонд державного майна': 'c',
'Кабмін, міністерства та підлеглі органи': 'd',
"Інспектори": 'e',
'Інші державні служби, комісії, і т.п.': 'f',
'Без категорії': 'g',
'Прокуратура': 'h',
'Парламент': 'j',
"Слідчі": 'k',
'Пенсійний фонд': 'l',
"Тюрми": 'm',
"Лікарі": 'n',
'Суд': 'p',
'НБУ': 'q',
'Адміністрація / Секретаріат Президента': 'r',
'Державний комітет телебачення і радіомовлення': 's',
'Мер': 't',
'НАБУ': 'u',
'Ректор': 'v',
'Антимонопольний комітет': 'w',
'Рахункова палата': 'x',
'ЦВК': 'y',
'Вища рада юстиції': 'z',
'СБУ': 'o',
'НАЗК': 'z2'
}
GROUPS_TO_EXPORT = [
"p", # 'Suddi'
"h", # 'Prokurory'
"b", # 'miscevaVlada'
"j", # 'centralnaVlada'
"n", # 'likari'
"e", # 'inspectory'
]
POSITION_MAP = {
"Ректор": [re.compile("\sректор"), " ректора"],
"Лікарі": ['лікар'],
"Тюрми": ['колоні', 'тюрм', "тюрьм", "колони"],
'Прокуратура': ["прокурор", "прокурат", "пркур", "прокрор", "прокруор"],
"Поліція": ["поліц"],
"Слідчі": ["следователь", "слідчий", "детектив", "слідчого", "поліці", "оперуповноважений"],
'Парламент': [
"апарат верховної ради україни", "верховна рада україни", "верховна рада", "народний депутат",
"народный депутат"
],
"Мер": ["м<NAME>"],
"Суд": [
"судя", "суду", "судя", "судья", "голова суду",
re.compile("суд$"), re.compile("суд,\s"), re.compile("суд\s"),
re.compile("голова\s.*суду"), re.compile("голова\s.*суда")
],
'Кабмін, міністерства та підлеглі органи': ["міністерств", re.compile("міністр(?!(ац|ат))"), ],
"Інспектори": ["інспектор", "інспекц"],
"Місцеві адміністрації та ради": [
'сільськ', "селищ", 'районної ради', "сільський голова", "обласний голова", "районний голова"
],
}
REGIONS_MAP = {
'Харківська область': 1,
'Львівська область': 2,
'Чернівецька область': 3,
'Донецька область': 4,
'!не визначено': 5,
'м. Київ': 6,
'Миколаївська область': 7,
'Дніпропетровська область': 8,
'Житомирська область': 9,
'Рівненська область': 10,
'Одеська область': 11,
'Київська область': 12,
'Закарпатська область': 13,
'Запорізька область': 14,
'Черкаська область': 15,
'Чернігівська область': 16,
'Сумська область': 17,
'Волинська область': 18,
'Івано-Франківська область': 19,
'Херсонська область': 20,
'Хмельницька область': 21,
'Тернопільська область': 22,
'Полтавська область': 23,
'Кіровоградська область': 24,
'Луганська область': 25,
'Вінницька область': 26,
'Кримська Автономна Республіка': 27
}
class Command(BaseCommand):
help = 'Export aggregated values from NACP declarations '
'into CSV format for visualisation made by <NAME> '
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def add_arguments(self, parser):
parser.add_argument(
'destination',
help='Data dir',
)
parser.add_argument(
'--year', nargs='*', type=int,
choices=range(2015, datetime.now().year),
default=range(2015, datetime.now().year),
)
def get_raw_data(self, year, order_by, limit=10000):
to_export = NACPDeclaration.search().source(
include=[AGGREGATED_FIELD_NAME]).query("exists", field=AGGREGATED_FIELD_NAME)
to_export = to_export.query(
"bool",
must=[
Q("term", intro__doc_type="Щорічна"),
Q("term", intro__declaration_year=year)
],
must_not=[
Q("exists", field="corrected_declarations"),
Q("term", _id="nacp_e46bba0c-32d5-4b0d-a290-9fdc4afcc278"), # Fucking Melnytchuk
Q("term", _id="nacp_c67549d0-abc0-48fe-b529-9185efe1a3ce"), # Fucking idiots
Q("term", _id="nacp_2e07bb01-5ca8-4188-97c6-6297f7a4d2ad"), # Fucking idiots
Q("term", _id="nacp_f1b25e4d-e691-48d6-99b1-758e94764b91"), # Fucking Motsyor
Q("term", **{"{}__outlier".format(AGGREGATED_FIELD_NAME): True})
]
).sort(
{'{}.{}'.format(AGGREGATED_FIELD_NAME, order_by): {"order": "desc"}}
)[:limit]
res = []
for d in to_export.execute():
row = d[AGGREGATED_FIELD_NAME].to_dict()
if row[order_by] > 10000000000:
continue
row["id"] = d._id
res.append(row)
return res
def define_profession_group(self, group):
group = group.lower()
for k, v in POSITION_MAP.items():
for chunk in v:
if isinstance(chunk, str) and chunk.lower() in group:
return k
else:
if re.search(chunk, group):
return k
def categorize(self, data):
name_post = data["name_post"].replace(data["name"], "").strip(", ")
organization_group = data.get("organization_group", "")
if not organization_group or organization_group in [
'Без категорії',
'!не визначено',
'Кабмін, міністерства та підлеглі органи',
'Інші державні служби, комісії, і т.п.',
'Місцеві адміністрації та ради']:
organization_group = self.define_profession_group(name_post) or organization_group
organization_group = CATEGORY_MAP[organization_group]
return {
"incomes.total": round(float(data['incomes.total'] or 0)),
"estate.total_land": round(float(data['estate.total_land'] or 0)),
"estate.total_other": round(float(data['estate.total_other'] or 0)),
"assets.total": round(float(data['assets.total'] or 0)),
"assets.cash.total": round(float(data['assets.cash.total'] or 0)),
"vehicles_names": "/".join(data['vehicles.all_names'].split(';')),
"organization_group": organization_group,
"name": title(data["name"]),
"region": REGIONS_MAP[data["region"]],
"name_post": name_post,
"id": data["id"],
}
def trim(self, data, data_in_each_group=100, data_in_all_groups=300):
res = []
counts = defaultdict(int)
for d in data:
if d["organization_group"] in GROUPS_TO_EXPORT:
if (counts[d["organization_group"]] < data_in_each_group or
len(res) < data_in_all_groups):
res.append(d)
counts[d["organization_group"]] += 1
elif len(res) < data_in_all_groups:
res.append(d)
if (counts and min(counts.values()) >= data_in_each_group and
len(res) >= data_in_all_groups):
break
self.stdout.write("Exported {}".format(counts))
return res
def handle(self, *args, **options):
for year in options["year"]:
for order in ORDER_BY:
self.stdout.write("Exporting {} for {}".format(order, year))
data = self.get_raw_data(year, order, 100000)
categorized = [self.categorize(d) for d in data]
trimmed = self.trim(categorized)
with open(
os.path.join(options["destination"], "viz_{}.{}.csv".format(order, year)),
"w") as fp:
w = DictWriter(fp, fieldnames=trimmed[0].keys())
w.writeheader()
w.writerows(trimmed)
|
1643789
|
class Solution:
def balancedStringSplit(self, s: str) -> int:
count = 0
j = 0
for i in range(2, len(s)+1, 2):
if s[j:i].count('L') == s[j:i].count('R'):
count += 1
j = i
return count
def balancedStringSplit_easy(self, s: str) -> int:
balancedCount = count = 0
for char in s:
if char == 'L':
count += 1
elif char == 'R':
count -= 1
if count == 0:
balancedCount += 1
return balancedCount
if __name__ == '__main__':
input = "RLRRRLLRLL"
instance = Solution()
solution = instance.balancedStringSplit(input)
print(solution)
|
1643798
|
if window.get_active_class() != 'gnome-terminal-server.Gnome-terminal':
keyboard.send_keys("<ctrl>+x")
else:
keyboard.send_keys("<ctrl>+<shift>+c")
|
1643799
|
import numpy as np
def euclidean_distances(X, Y=None, Y_norm_squared=None, X_norm_squared=None):
'''
将数据的每行看做样本,计算两矩阵样本之间的欧氏距离
:param X: matrix one
:param Y: matrix two
:param Y_norm_squared:
:param X_norm_squared:
:return: pairwise距离矩阵
'''
X = np.array(X)
Y = np.array(Y) if Y else X # 若未指定Y则令其为X
dist_mat = np.dot(X, Y.T)
X_squared = np.sum(np.square(X), axis=1).reshape((dist_mat.shape[0], -1))
Y_squared = np.sum(np.square(Y), axis=1).reshape((-1, dist_mat.shape[1]))
squared_dist = X_squared - 2 * dist_mat + Y_squared
squared_dist[squared_dist < 0] = 0 # 在某些数据下可能出现负数,需要做截断处理
return np.sqrt(squared_dist)
if __name__ == '__main__':
X = [[0, 1], [1, 1]]
Y = [[0, 0]]
print(euclidean_distances(X))
print(euclidean_distances(X, Y))
|
1643844
|
import mxnet as mx
import logging
data = mx.sym.Variable('data')
conv = mx.sym.Convolution(data=data, num_filter=128, kernel=(3,3), pad=(1,1),
name='conv1')
bn = mx.sym.BatchNorm(data=conv, name='bn1')
relu = mx.sym.Activation(data=bn, act_type='relu', name='relu1')
pool = mx.sym.Pooling(data=relu, kernel=(2,2), stride=(2,2), pool_type='max',
name='pool1')
fc = mx.sym.FullyConnected(data=pool, num_hidden=2, name='fc1')
sym = mx.sym.SoftmaxOutput(data=fc, name='softmax')
data = mx.nd.random.uniform(0,1,shape=(1000,3,224,224))
label = mx.nd.round(mx.nd.random.uniform(0,1,shape=(1000)))
train_data = mx.io.NDArrayIter(data={'data':data},
label={'softmax_label':label},
batch_size=8,
shuffle=True)
print(train_data.provide_data)
print(train_data.provide_label)
mod = mx.mod.Module(symbol=sym,context=mx.gpu(0))
logger = logging.getLogger()
logger.setLevel(logging.INFO)
mod.fit(train_data=train_data, num_epoch=5)
|
1643848
|
import pytest
import stk
from ...case_data import CaseData
@pytest.fixture(
scope='session',
params=(
lambda name: CaseData(
molecule=stk.ConstructedMolecule(
topology_graph=stk.cof.PeriodicHexagonal(
building_blocks={
stk.BuildingBlock(
smiles='BrC1=C(Br)[C+]=N1',
functional_groups=[stk.BromoFactory()],
): (
4, 5, 6, 7, 8, 9, 20, 21, 23, 24, 30, 36,
38, 40, 41, 42, 43, 46, 47, 52, 53, 60, 61,
),
stk.BuildingBlock(
smiles='BrN1N(Br)[C+]=N1',
functional_groups=[stk.BromoFactory()],
): (
10, 11, 12, 13, 14, 15, 22, 25, 26, 27, 28,
29, 37, 39, 44, 45, 54, 55, 56, 57, 58, 59,
31, 62, 63,
),
stk.BuildingBlock(
smiles=(
'Br[C+]1[C+]2[N+][C+2]C2(Br)[C+](I)[C+'
'](I)[C+](Br)[C+]1Br'
),
functional_groups=[
stk.BromoFactory(),
stk.IodoFactory(),
stk.FluoroFactory(),
],
): (0, 1, 18, 50, 51),
stk.BuildingBlock(
smiles=(
'Br[C+]1[C+]2[S][C+2]C2(Br)[C+](I)[C+]'
'(I)[C+](Br)[C+]1Br'
),
functional_groups=[
stk.BromoFactory(),
stk.IodoFactory(),
stk.FluoroFactory(),
],
): (2, 16, 34, 49),
stk.BuildingBlock(
smiles=(
'Br[C+]1[C+]2[S][O]C2(Br)[C+](I)[C+](I'
')[C+](Br)[C+]1Br'
),
functional_groups=[
stk.BromoFactory(),
stk.IodoFactory(),
stk.FluoroFactory(),
],
): (3, 17, 19, 32, 33, 35, 48),
},
lattice_size=(2, 2, 1),
vertex_alignments={0: 5},
),
),
smiles=(
'[C+]1=NC2=C1[C+]1[C+]3[C+]4C5=C(N=[C+]5)[C+]5[C+]6[C+'
']7[C+]8[C+]9C%10=C(N=[C+]%10)[C+]%10[C+]%11C%12=C([C+'
']=N%12)[C+]%12[C+]%13[C+]%14[C+]%15C%16=C(N=[C+]%16)['
'C+]%16[C+]%17C%18=C([C+]=N%18)[C+]%18[C+]([C+]%19[NH2'
'+][C+2]C%19([C+]%19[C+]%20C%21=C(N=[C+]%21)[C+]%21[C+'
']%22C%23=C(N=[C+]%23)[C+]%23[C+]%24C%25=C([C+]=N%25)['
'C+]%25[C+]%26[C+]%27C%28=C([C+]=N%28)C%28%29OS[C+]%28'
'[C+]%28C%30=C(N=[C+]%30)[C+]([C+]%30[C+]%21N%21[C+]=N'
'N%21[C+]%21[C+]%31C%32=C(N=[C+]%32)C%32%33[C+2][NH2+]'
'[C+]%32[C+]%32C%34=C(N=[C+]%34)[C+]%34[C+]([C+](C%35='
'C(N=[C+]%35)C5%35[C+2][NH2+][C+]9%35)[C+]5[C+]9[C+]%3'
'5SOC%35%34C%34=C(N=[C+]%34)[C+]%31[C+]%31C%34=C(N=[C+'
']%34)C%34%35OS[C+]%34[C+]%34C%36=C(N=[C+]%36)[C+]([C+'
']%17N%17N=[C+]N%17[C+]%31[C+]%17S[C+2]C%17%21N%17N=[C'
'+]N%17[C+]%18%20)[C+]%17SOC%17%18[C+]%16C%16=C([C+]=N'
'%16)[C+]%16[C+]%17C%20=C([C+]=N%20)C%20%21OS[C+]%20[C'
'+]%20C%31=C(N=[C+]%31)[C+]%31[C+]4N4[C+]=NN4[C+]([C+]'
'%24N4N=[C+]N4[C+]%20[C+]4[C+](C%20=C(N=[C+]%20)[C+]%3'
'4[C+]%20[C+]([C+]%35N%24N=[C+]N9%24)N9[C+]=NN9[C+]([C'
'+]%10N9N=[C+]N59)[C+](N5N=[C+]N5[C+]%27[C+](N5N=[C+]N'
'%205)C5([C+2][NH2+][C+]%255)N5N=[C+]N45)C4(OS[C+]%114'
')N4[C+]=NN4[C+]%29[C+]([C+]4[C+]%28N5[C+]=NN5[C+]([C+'
'](C5=C([C+]=N5)[C+]([C+]%17C5=C(N=[C+]5)C%315[C+2][NH'
'2+][C+]15)[C+]1S[C+2]C1([C+]%16N1N=[C+]N%141)N1N=[C+]'
'N41)[C+]2%32)[C+]%33N1N=[C+]N%301)N1[C+]=NN%131)[C+]%'
'21N1N=[C+]N1%18)[C+](N1N=[C+]N61)C1([C+2]S[C+]%231)N1'
'[C+]=NN%191)N1[C+]=NN31)C1(OS[C+]%221)N1N=[C+]N%261)N'
'1[C+]=NN71)N1[C+]=NN1[C+]%15C1([C+2]S[C+]%121)N1N=[C+'
']N81'
),
name=name,
),
lambda name: CaseData(
molecule=stk.ConstructedMolecule(
topology_graph=stk.cof.PeriodicHexagonal(
building_blocks={
stk.BuildingBlock(
smiles='BrC1=C(Br)[C+]=N1',
functional_groups=[stk.BromoFactory()],
): (
4, 5, 6, 7, 8, 9, 20, 21, 23, 24, 30, 36,
38, 40, 41, 42, 43, 46, 47, 52, 53, 60, 61,
),
stk.BuildingBlock(
smiles='BrN1N(Br)[C+]=N1',
functional_groups=[stk.BromoFactory()],
): (
10, 11, 12, 13, 14, 15, 22, 25, 26, 27, 28,
29, 37, 39, 44, 45, 54, 55, 56, 57, 58, 59,
31, 62, 63,
),
stk.BuildingBlock(
smiles=(
'Br[C+]1[C+]2[N+][C+2]C2(Br)[C+](I)[C+'
'](I)[C+](Br)[C+]1Br'
),
functional_groups=[
stk.BromoFactory(),
stk.IodoFactory(),
stk.FluoroFactory(),
],
): (0, 1, 18, 50, 51),
stk.BuildingBlock(
smiles=(
'Br[C+]1[C+]2[S][C+2]C2(Br)[C+](I)[C+]'
'(I)[C+](Br)[C+]1Br'
),
functional_groups=[
stk.BromoFactory(),
stk.IodoFactory(),
stk.FluoroFactory(),
],
): (2, 16, 34, 49),
stk.BuildingBlock(
smiles=(
'Br[C+]1[C+]2[S][O]C2(Br)[C+](I)[C+](I'
')[C+](Br)[C+]1Br'
),
functional_groups=[
stk.BromoFactory(),
stk.IodoFactory(),
stk.FluoroFactory(),
],
): (3, 17, 19, 32, 33, 35, 48),
},
lattice_size=(2, 2, 1),
vertex_alignments={0: 5},
optimizer=stk.PeriodicCollapser(),
),
),
smiles=(
'[C+]1=NC2=C1[C+]1[C+]3[C+]4C5=C(N=[C+]5)[C+]5[C+]6[C+'
']7[C+]8[C+]9C%10=C(N=[C+]%10)[C+]%10[C+]%11C%12=C([C+'
']=N%12)[C+]%12[C+]%13[C+]%14[C+]%15C%16=C(N=[C+]%16)['
'C+]%16[C+]%17C%18=C([C+]=N%18)[C+]%18[C+]([C+]%19[NH2'
'+][C+2]C%19([C+]%19[C+]%20C%21=C(N=[C+]%21)[C+]%21[C+'
']%22C%23=C(N=[C+]%23)[C+]%23[C+]%24C%25=C([C+]=N%25)['
'C+]%25[C+]%26[C+]%27C%28=C([C+]=N%28)C%28%29OS[C+]%28'
'[C+]%28C%30=C(N=[C+]%30)[C+]([C+]%30[C+]%21N%21[C+]=N'
'N%21[C+]%21[C+]%31C%32=C(N=[C+]%32)C%32%33[C+2][NH2+]'
'[C+]%32[C+]%32C%34=C(N=[C+]%34)[C+]%34[C+]([C+](C%35='
'C(N=[C+]%35)C5%35[C+2][NH2+][C+]9%35)[C+]5[C+]9[C+]%3'
'5SOC%35%34C%34=C(N=[C+]%34)[C+]%31[C+]%31C%34=C(N=[C+'
']%34)C%34%35OS[C+]%34[C+]%34C%36=C(N=[C+]%36)[C+]([C+'
']%17N%17N=[C+]N%17[C+]%31[C+]%17S[C+2]C%17%21N%17N=[C'
'+]N%17[C+]%18%20)[C+]%17SOC%17%18[C+]%16C%16=C([C+]=N'
'%16)[C+]%16[C+]%17C%20=C([C+]=N%20)C%20%21OS[C+]%20[C'
'+]%20C%31=C(N=[C+]%31)[C+]%31[C+]4N4[C+]=NN4[C+]([C+]'
'%24N4N=[C+]N4[C+]%20[C+]4[C+](C%20=C(N=[C+]%20)[C+]%3'
'4[C+]%20[C+]([C+]%35N%24N=[C+]N9%24)N9[C+]=NN9[C+]([C'
'+]%10N9N=[C+]N59)[C+](N5N=[C+]N5[C+]%27[C+](N5N=[C+]N'
'%205)C5([C+2][NH2+][C+]%255)N5N=[C+]N45)C4(OS[C+]%114'
')N4[C+]=NN4[C+]%29[C+]([C+]4[C+]%28N5[C+]=NN5[C+]([C+'
'](C5=C([C+]=N5)[C+]([C+]%17C5=C(N=[C+]5)C%315[C+2][NH'
'2+][C+]15)[C+]1S[C+2]C1([C+]%16N1N=[C+]N%141)N1N=[C+]'
'N41)[C+]2%32)[C+]%33N1N=[C+]N%301)N1[C+]=NN%131)[C+]%'
'21N1N=[C+]N1%18)[C+](N1N=[C+]N61)C1([C+2]S[C+]%231)N1'
'[C+]=NN%191)N1[C+]=NN31)C1(OS[C+]%221)N1N=[C+]N%261)N'
'1[C+]=NN71)N1[C+]=NN1[C+]%15C1([C+2]S[C+]%121)N1N=[C+'
']N81'
),
name=name,
),
),
)
def cof_periodic_hexagonal(request) -> CaseData:
return request.param(
f'{request.fixturename}{request.param_index}',
)
|
1643878
|
def caseUnification(s):
u = sum(1 for x in s if x.isupper())
if u > (len(s) / 2):
return s.upper()
else:
return s.lower()
|
1643881
|
from setuptools import setup, find_packages
with open('README.md') as fp:
long_description = fp.read()
setup(
name='typeform',
version='1.1.0',
description='Python Client wrapper for Typeform API',
long_description=long_description,
long_description_content_type='text/markdown',
keywords=[
'type',
'form',
'typeform',
'api',
],
author='Typeform',
author_email='<EMAIL>',
url='https://github.com/MichaelSolati/typeform-python-sdk',
packages=find_packages(),
install_requires=['requests'],
test_suite='typeform.test.suite.test_suite',
license='MIT',
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python',
]
)
|
1643896
|
from justgood import imjustgood
api = imjustgood("YOUR_APIKEY_HERE")
data = api.lineapp()
print(data)
# EXAMPLE GET CERTAIN ATTRIBUTES
result = "LINE APP VERSION"
for a in data["result"]:
result += "\n{} : {}".format(a,data["result"][a])
print(result)
|
1643901
|
import numpy as np
import pytest
from chainer_chemistry.dataset.preprocessors import wle as WLE # NOQA
from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset
@pytest.fixture
def small_datasets():
N_1 = 3
N_2 = 5
# one-hot atom labels: 1 tp N
atom_array_1 = np.arange(N_1)
atom_array_2 = np.arange(N_2)
# adj-array, manually
# all connectes. expanded labels is a permutaion of 0,1,2
adj_array_1 = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]).astype(np.int32)
# node 0 --> 0-1.2
# node 1 --> 1-0.2
# node 2 --> 2-0.1
adj_array_2 = np.array([[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[1, 1, 0, 0, 1]]).astype(np.float32)
# node 0 --> 0-1.4
# node 1 --> 1-0.4
# node 2 --> 2-3
# node 3 --> 3-2
# node 4 --> 4-0.1
# supervised labels, dummy
teach_signal_1 = np.array(1).astype(np.int)
teach_signal_2 = np.array(0).astype(np.int)
# concat in a one numpy array!
atom_arrays = np.array([atom_array_1, atom_array_2])
adj_arrays = np.array([adj_array_1, adj_array_2])
teach_signals = np.array([teach_signal_1, teach_signal_2])
# train/val/test dataset, respectively
datasets = [NumpyTupleDataset(atom_arrays, adj_arrays, teach_signals),
NumpyTupleDataset(atom_arrays, adj_arrays, teach_signals),
NumpyTupleDataset(atom_arrays, adj_arrays, teach_signals)]
return datasets
def _get_elements(datasets, idx):
return [[mol[1] for mol in d] for d in datasets]
def _get_atom_arrays(datasets):
return _get_elements(datasets, 0)
def _get_adj_arrays(datasets):
return _get_elements(datasets, 1)
def _get_wle_arrays(datasets):
return _get_elements(datasets, 2)
def _get_teach_signals(datasets, is_cwle=False):
if is_cwle:
return _get_elements(datasets, 2)
else:
return _get_elements(datasets, 3)
def _check_np_array(actuals, expects):
assert len(actuals) == len(expects) == 3 # train/test/val
for actual_adjs, expect_adjs in zip(actuals, expects):
assert len(actual_adjs) == len(expect_adjs)
[np.testing.assert_array_equal(a, e)
for a, e in zip(actual_adjs, expect_adjs)]
def test_wle(small_datasets):
ret_value = WLE.apply_wle_for_datasets(small_datasets, 0)
actual_datasets, actual_labels, actual_frequency = ret_value
expected_frequency = {'0-1.2': 3,
'1-0.2': 3,
'2-0.1': 3,
'0-1.4': 3,
'1-0.4': 3,
'2-3': 3,
'3-2': 3,
'4-0.1': 3}
assert expected_frequency == actual_frequency
expected_labels = set(expected_frequency.keys())
assert expected_labels == set(actual_labels)
actual_adj_arrays = _get_adj_arrays(actual_datasets)
expect_adj_arrays = _get_adj_arrays(small_datasets)
_check_np_array(actual_adj_arrays, expect_adj_arrays)
actual_signal_arrays = _get_teach_signals(actual_datasets)
expect_signal_arrays = _get_teach_signals(small_datasets)
_check_np_array(actual_signal_arrays, expect_signal_arrays)
# Check atom_arrays of train/val/test datasets are identical.
# 2 is the number of samples in each (train/val/test) dataset.
atom_arrays = _get_atom_arrays(actual_datasets)
first_mols = [d[0] for d in atom_arrays]
second_mols = [d[1] for d in atom_arrays]
for mols in (first_mols, second_mols):
assert len(mols) == 3
np.testing.assert_array_equal(mols[0], mols[1])
np.testing.assert_array_equal(mols[1], mols[2])
def test_2_hop_wle(small_datasets):
k = 2
ret_value = WLE.apply_wle_for_datasets(small_datasets, 0, k)
actual_datasets, actual_labels, actual_frequency = ret_value
expected_frequency = {'0-1.2': 3,
'1-0.2': 3,
'2-0.1': 3,
'3-4.7': 3,
'4-3.7': 3,
'5-6': 3,
'6-5': 3,
'7-3.4': 3}
# <NAME> (<EMAIL>)
# The following assertion checks too strong condition.
# Specifically it assumes that the WLE algorithm assigns
# the extended atom labels appeared in the first iteration
# in a certain order and runs the second iteration.
# Strictly speaking, this is not required in the algorithm.
assert expected_frequency == actual_frequency
expected_labels = set(expected_frequency.keys())
assert expected_labels == set(actual_labels)
actual_adj_arrays = _get_adj_arrays(actual_datasets)
expect_adj_arrays = _get_adj_arrays(small_datasets)
_check_np_array(actual_adj_arrays, expect_adj_arrays)
actual_signal_arrays = _get_teach_signals(actual_datasets)
expect_signal_arrays = _get_teach_signals(small_datasets)
_check_np_array(actual_signal_arrays, expect_signal_arrays)
# Check atom_arrays of train/val/test datasets are identical.
# 2 is the number of samples in each (train/val/test) dataset.
atom_arrays = _get_atom_arrays(actual_datasets)
first_mols = [d[0] for d in atom_arrays]
second_mols = [d[1] for d in atom_arrays]
for mols in (first_mols, second_mols):
assert len(mols) == 3
np.testing.assert_array_equal(mols[0], mols[1])
np.testing.assert_array_equal(mols[1], mols[2])
def test_cwle(small_datasets):
ret_value = WLE.apply_cwle_for_datasets(small_datasets)
actual_datasets, actual_labels, actual_frequency = ret_value
expected_frequency = {'1.2': 3,
'0.2': 3,
'0.1': 6,
'1.4': 3,
'0.4': 3,
'3': 3,
'2': 3}
assert expected_frequency == actual_frequency
expected_labels = set(expected_frequency.keys())
assert expected_labels == set(actual_labels)
actual_adj_arrays = _get_adj_arrays(actual_datasets)
expect_adj_arrays = _get_adj_arrays(small_datasets)
_check_np_array(actual_adj_arrays, expect_adj_arrays)
actual_signal_arrays = _get_teach_signals(actual_datasets, True)
expect_signal_arrays = _get_teach_signals(small_datasets)
_check_np_array(actual_signal_arrays, expect_signal_arrays)
# Check atom_arrays of train/val/test datasets are identical.
atom_arrays = _get_atom_arrays(actual_datasets)
first_mols = [d[0] for d in atom_arrays]
second_mols = [d[1] for d in atom_arrays]
for mols in (first_mols, second_mols):
assert len(mols) == 3
np.testing.assert_array_equal(mols[0], mols[1])
np.testing.assert_array_equal(mols[1], mols[2])
# Check wle_arrays of train/val/test datasets are identical.
wle_arrays = _get_wle_arrays(actual_datasets)
first_mols = [d[0] for d in wle_arrays]
second_mols = [d[1] for d in wle_arrays]
for mols in [first_mols, second_mols]:
assert len(mols) == 3
np.testing.assert_array_equal(mols[0], mols[1])
np.testing.assert_array_equal(mols[1], mols[2])
def test_findmaxidx_atom_label(small_datasets):
actual = WLE.findmaxidx(small_datasets, 'atom_label')
expect = 5
assert actual == expect
@pytest.fixture
def cwle_datasets():
B = 10
D_atom = 5
D_wle = 50
K_large = 10000
atom_arrays = [np.full((B, D_atom), K_large) for _ in range(3)]
adj_arrays = [np.eye(B, dtype=np.int32) for _ in range(3)]
wle_arrays = [np.arange(B * D_wle, dtype=np.int32).reshape(B, -1)
for _ in range(3)]
signal_arrays = [np.full(B, K_large) for _ in range(3)]
print(wle_arrays[0].shape)
datasets = [NumpyTupleDataset(atom_arrays[i],
adj_arrays[i],
wle_arrays[i],
signal_arrays[i])
for i in range(3)]
return datasets
def test_findmaxidx_wle(cwle_datasets):
actual = WLE.findmaxidx(cwle_datasets, 'wle_label')
expect = 10 * 50
assert actual == expect
|
1643949
|
from itertools import chain
from collections import Counter
from tqdm import tqdm
import requests
from bs4 import BeautifulSoup
import pandas as pd
def flatten(ls):
"""
Flatten list of list
"""
return list(chain.from_iterable(ls))
def clean_lyrics(lyric):
"""
Clean lines that do not contain lyrics
"""
lines = lyric.split('\n')
lyrics_clean = []
for line in lines:
# remove headers from the file
headers = [
'เพลง ', 'คำร้อง ', 'คำร้อง/ทำนอง ', 'ศิลปิน ', 'ทำนอง ',
'เรียบเรียง ', 'เพลงประกอบละคร ', 'อัลบัม ', 'ร่วมร้องโดย ',
'เนื้อร้อง/ทำนอง', 'ทำนอง/เรียบเรียง ', 'เพลงประกอบภาพยนตร์ ',
'เพลงประกอบละครซิทคอม ', 'คำร้อง/ทำนอง/เรียบเรียง ',
'คำร้อง/เรียบเรียง ', 'เพลงประกอบ ', 'ร้องโดย ',
'ทำนอง / เรียบเรียง :', ' สังกัด'
]
if any(line.startswith(s) for s in headers):
pass
else:
line = ' '.join(line.replace('(', ' ').replace(')', ' ').replace('-', ' ').split())
lyrics_clean.append(line)
return '\n'.join(lyrics_clean).strip()
def create_lookup_dict(tokenized_lyrics, n_min=None):
"""
Create lookup dictionary from list of words (lyrics)
"""
word_counts = Counter(tokenized_lyrics)
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
if n_min is not None:
sorted_vocab = {k: v for k, v in word_counts.items() if v >= n_min}
vocab_to_int = {word: i for i, word in enumerate(sorted_vocab, 0)}
int_to_vocab = {i: word for word, i in vocab_to_int.items()}
return (vocab_to_int, int_to_vocab)
def scrape_siamzone_url(d):
"""
Script to scrape Siamzone lyrics from a given song_id (integer)
"""
soup = BeautifulSoup(requests.get('https://www.siamzone.com/music/thailyric/{}'.format(d)).content, 'html.parser')
song_title, artist_name = soup.find('title').text.split('|')
song_title, artist_name = song_title.replace("เนื้อเพลง ", "").strip(), artist_name.strip()
try:
n_views = ' '.join(soup.find('div', attrs={'class': 'has-text-info'}).text.strip().split())
except:
n_views = ''
try:
full_lyrics = soup.find_all('div', attrs={'class': 'column is-6-desktop'})[1]
lyrics = full_lyrics.find("div", attrs={'style': "margin-bottom: 1rem;"}).text.strip()
except:
lyrics = ""
return {
'url': 'https://www.siamzone.com/music/thailyric/%d' % d,
'soup': soup,
'song_title': song_title,
'artist_name': artist_name,
'n_views': n_views,
'lyrics': lyrics
}
def scrape_siamzone(start=1, end=20200):
"""
Scrape Siamzon URL and return dictioanry output
Usage
=====
>>> scraped_siamzone_df = scrape_siamzone(start=1, end=20200)
>>> scraped_siamzone_df['html'] = scraped_siamzone_df.soup.map(lambda x: x.prettify())
"""
scraped_siamzone = []
for i in tqdm(range(start, end)):
try:
scraped_siamzone.append(scrape_siamzone_url(i))
except:
pass
scraped_siamzone_df = pd.DataFrame(scraped_siamzone)
return scraped_siamzone_df
|
1643958
|
from app.api import callback_api, operator_api, task_api, response_api, crypto_api
from app.api import (
payloads_api,
analytics_api,
c2profiles_api,
file_api,
operation_api,
payloadtype_api,
)
from app.api import (
command_api,
reporting_api,
credential_api,
keylog_api,
mitre_api,
artifacts_api,
)
from app.api import (
rabbitmq_api,
apitokens_api,
browserscript_api,
processlist_api,
event_message_api,
proxies_api,
siem_logger
)
from app.api import payloadonhost_api, file_browser_api
|
1643975
|
import ConfigParser, sys
from py.fm_ops import fm_ops
from py.fm_train import local_train, dist_train
from py.fm_predict import local_predict, dist_predict
import tensorflow as tf
cmd_instruction = '''Usage:
1. Local training.
python fast_tffm.py train <cfg_file>
2. Distributed training.
python fast_tffm.py dist_train <cfg_file> <job_name> <task_idx>
3. Local predicting.
python fast_tffm.py predict <cfg_file>
4. Distributed predicting.
python fast_tffm.py dist_predict <cfg_file> <job_name> <task_idx>
Arguments:
<cfg_file>: configuartion file path. See sample.cfg for example.
<job_name>: 'worker' or 'ps'. Launch as a worker or a parameter server
<task_idx>: Task index.
'''
def check_argument_error(condition):
if not condition:
sys.stderr.write('''Invalid arguments\n''')
sys.stderr.write(cmd_instruction)
exit()
argc = len(sys.argv)
if argc == 1:
print cmd_instruction,
exit()
check_argument_error(argc >= 3)
mode = sys.argv[1]
cfg_file = sys.argv[2]
if mode == 'train' or mode == 'predict':
check_argument_error(argc == 3)
elif mode == 'dist_train' or mode == 'dist_predict':
check_argument_error(argc == 5)
job_name = sys.argv[3]
task_idx = int(sys.argv[4])
else:
check_argument_error(False)
GENERAL_SECTION = 'General'
TRAIN_SECTION = 'Train'
PREDICT_SECTION = 'Predict'
CLUSTER_SPEC_SECTION = 'ClusterSpec'
STR_DELIMITER = ','
config = ConfigParser.ConfigParser()
config.read(cfg_file)
print 'Config: '
def read_config(section, option, not_null = True):
if not config.has_option(section, option):
if not_null:
raise ValueError("%s is undefined."%option)
else:
return None
else:
value = config.get(section, option)
print ' {0} = {1}'.format(option, value)
return value
def read_strs_config(section, option, not_null = True):
val = read_config(section, option, not_null)
if val != None:
return [s.strip() for s in val.split(STR_DELIMITER)]
return None
factor_num = int(read_config(GENERAL_SECTION, 'factor_num'))
vocabulary_size = int(read_config(GENERAL_SECTION, 'vocabulary_size'))
vocabulary_block_num = int(read_config(GENERAL_SECTION, 'vocabulary_block_num'))
model_file = read_config(GENERAL_SECTION, 'model_file')
hash_feature_id = read_config(GENERAL_SECTION, 'hash_feature_id').strip().lower() == 'true'
if mode == 'dist_train' or mode == 'dist_predict':
ps_hosts = read_strs_config(CLUSTER_SPEC_SECTION, 'ps_hosts')
worker_hosts = read_strs_config(CLUSTER_SPEC_SECTION, 'worker_hosts')
if mode == 'train' or mode == 'dist_train':
batch_size = int(read_config(TRAIN_SECTION, 'batch_size'))
init_value_range = float(read_config(TRAIN_SECTION, 'init_value_range'))
factor_lambda = float(read_config(TRAIN_SECTION, 'factor_lambda'))
bias_lambda = float(read_config(TRAIN_SECTION, 'bias_lambda'))
thread_num = int(read_config(TRAIN_SECTION, 'thread_num'))
epoch_num = int(read_config(TRAIN_SECTION, 'epoch_num'))
train_files = read_strs_config(TRAIN_SECTION, 'train_files')
weight_files = read_strs_config(TRAIN_SECTION, 'weight_files', False)
if weight_files != None and len(train_files) != len(weight_files):
raise ValueError('The numbers of train files and weight files do not match.')
validation_files = read_strs_config(TRAIN_SECTION, 'validation_files', False)
learning_rate = float(read_config(TRAIN_SECTION, 'learning_rate'))
adagrad_init_accumulator = float(read_config(TRAIN_SECTION, 'adagrad.initial_accumulator'))
loss_type = read_config(TRAIN_SECTION, 'loss_type').strip().lower()
if not loss_type in ['logistic', 'mse']:
raise ValueError('Unsupported loss type: %s'%loss_type)
optimizer = tf.train.AdagradOptimizer(learning_rate, adagrad_init_accumulator)
if mode == 'train':
local_train(train_files, weight_files, validation_files, epoch_num, vocabulary_size, vocabulary_block_num, hash_feature_id, factor_num, init_value_range, loss_type, optimizer, batch_size, factor_lambda, bias_lambda, thread_num, model_file)
else:
dist_train(ps_hosts, worker_hosts, job_name, task_idx, train_files, weight_files, validation_files, epoch_num, vocabulary_size, vocabulary_block_num, hash_feature_id, factor_num, init_value_range, loss_type, optimizer, batch_size, factor_lambda, bias_lambda, thread_num, model_file)
elif mode == 'predict' or mode == 'dist_predict':
predict_files = read_config(PREDICT_SECTION, 'predict_files').split(',')
score_path = read_config(PREDICT_SECTION, 'score_path')
if mode == 'predict':
local_predict(predict_files, vocabulary_size, vocabulary_block_num, hash_feature_id, factor_num, model_file, score_path)
else:
dist_predict(ps_hosts, worker_hosts, job_name, task_idx, predict_files, vocabulary_size, vocabulary_block_num, hash_feature_id, factor_num, model_file, score_path)
|
1643983
|
from time import sleep
import streamlit as st
from stqdm import stqdm
columns = st.beta_columns(3)
with columns[1]:
for i in stqdm(range(50)):
sleep(0.5)
if i == 20:
break
with columns[2]:
for i in stqdm(range(50)):
sleep(0.5)
if i == 30:
break
with columns[0]:
for i in stqdm(range(50)):
sleep(0.5)
if i == 15:
break
|
1644040
|
import ncvis
vis = ncvis.NCVis(n_neighbors=15, M=16, ef_construction=200, n_init_epochs=20, n_epochs=50, min_dist=0.4, n_threads=-1, distance='euclidean')
|
1644042
|
from formaloo import constants, helper
class RowVote(helper.RequestHandler):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.actions = {
"get_list": {
"url": constants.V_1_0_ROW_VOTE_LIST_CREATE_ENDPOINT,
"has_url_params": True,
"body": None,
"method": self.client.get
},
"create": {
"url": constants.V_1_0_ROW_VOTE_LIST_CREATE_ENDPOINT,
"has_url_params": True,
"body": self.get_body(),
"method": self.client.post
},
"patch": {
"url": constants.V_1_0_ROW_VOTE_ITEM_ENDPOINT,
"has_url_params": True,
"body": self.get_body(),
"method": self.client.patch
},
"delete": {
"url": constants.V_1_0_ROW_VOTE_ITEM_ENDPOINT,
"has_url_params": True,
"body": None,
"method": self.client.delete
}
}
|
1644082
|
from ..css_matcher import scan, split_value, TokenType
from .utils import push_range, SelectItemModel
class CSSSection:
__slots__ = ('start', 'end', 'body_start', 'body_end', 'properties')
def __init__(self, start: int, end: int, body_start: int, body_end: int, properties: list=None):
self.start = start
self.end = end
self.body_start = body_start
self.body_end = body_end
self.properties = properties
def to_json(self):
result = {
'start': self.start,
'end': self.end,
'body_start': self.body_start,
'body_end': self.body_end
}
if self.properties:
result['properties'] = [prop.to_json() for prop in self.properties]
return result
class CSSProperty:
__slots__ = ('name', 'value', 'value_tokens', 'before', 'after')
def __init__(self, code: str, name: list, before: int, start: int, end: int, delimiter: int, offset=0):
self.name = (offset + name[0], offset + name[1])
self.value = (offset + start, offset + end)
self.value_tokens = split_value(code[start:end], offset + start)
self.before = before
self.after = offset + delimiter + 1
def to_json(self):
return {
'name': self.name,
'value': self.value,
'value_tokens': self.value_tokens,
'before': self.before,
'after': self.after
}
class ParseState:
__slots__ = ('type', 'start', 'end', 'value_start', 'value_end', 'value_delimiter')
def __init__(self):
self.type = None
self.start = -1
self.end = -1
self.value_start = -1
self.value_end = -1
self.value_delimiter = -1
def get_css_section(code: str, pos: int, properties=False) -> CSSSection:
"""
Returns context CSS section for given location in source code
:param properties Parse inner properties
"""
stack = []
pool = []
result = []
result.append(None) # Skip pylint warnings
def scan_callback(token_type: str, start: int, end: int, delimiter: int):
if start > pos and not stack:
return False
if token_type == TokenType.Selector:
stack.append(alloc_range(pool, start, end, delimiter))
elif token_type == TokenType.BlockEnd:
sel = stack and stack.pop()
if sel and sel[0] <= pos <= end:
result[0] = CSSSection(sel[0], end, sel[2] + 1, start)
return False
release_range(pool, sel)
scan(code, scan_callback)
section = result[0]
if section and properties:
section.properties = parse_properties(code, section.body_start, section.body_end)
return section
def select_item_css(code: str, pos: int, is_prev=False) -> SelectItemModel:
"Returns list of ranges for Select Next/Previous CSS Item action"
if is_prev:
return select_previous_item(code, pos)
return select_next_item(code, pos)
def select_next_item(code: str, pos: int) -> SelectItemModel:
"Returns regions for selecting next item in CSS"
result = []
result.append(None)
pending_property = []
pending_property.append(None)
def scan_callback(token_type: str, start: int, end: int, delimiter: int):
if start < pos:
return
if token_type == TokenType.Selector:
result[0] = SelectItemModel(start, end, [(start, end)])
return False
elif token_type == TokenType.PropertyName:
pending_property[0] = (start, end, delimiter)
elif token_type == TokenType.PropertyValue:
section = SelectItemModel(start, delimiter + 1 if delimiter != -1 else end, [])
result[0] = section
if pending_property[0]:
# Full property range
prop = pending_property[0]
section.start = prop[0]
push_range(section.ranges, (prop[0], section.end))
# Full value range
push_range(section.ranges, (start, end))
# Value fragments
for r in split_value(code[start:end]):
push_range(section.ranges, (r[0] + start, r[1] + start))
return False
elif pending_property[0]:
prop = pending_property[0]
result[0] = SelectItemModel(prop[0], prop[1], [(prop[0], prop[1])])
return False
scan(code, scan_callback)
return result[0]
def select_previous_item(code: str, pos: int) -> SelectItemModel:
"Returns regions for selecting previous item in CSS"
state = ParseState()
def scan_callback(token_type, start, end, delimiter):
# Accumulate context until we reach given position
if start >= pos and token_type != TokenType.PropertyValue:
return False
if token_type in (TokenType.Selector, TokenType.PropertyName):
state.start = start
state.end = end
state.type = token_type
state.value_start = state.value_end = state.value_delimiter = -1
elif token_type == TokenType.PropertyValue:
state.value_start = start
state.value_end = end
state.value_delimiter = delimiter
scan(code, scan_callback)
if state.type == TokenType.Selector:
return SelectItemModel(state.start, state.end, [(state.start, state.end)])
if state.type == TokenType.PropertyName:
result = SelectItemModel(state.start, state.end, [])
if state.value_start != -1:
result.end = state.value_delimiter + 1 if state.value_delimiter != -1 else state.value_end
# Full property range
push_range(result.ranges, (state.start, result.end))
# Full value range
push_range(result.ranges, (state.value_start, state.value_end))
# Value fragments
for r in split_value(code[state.value_start:state.value_end]):
push_range(result.ranges, (r[0] + state.value_start, r[1] + state.value_start))
else:
push_range(result.ranges, (state.start, state.end))
return result
class ParsePropertiesState:
__slots__ = ('pending_name', 'nested', 'before')
def __init__(self, before: int):
self.pending_name = None
self.nested = 0
self.before= before
def parse_properties(code: str, parse_from=0, parse_to=None) -> list:
"""
Parses properties in `from:to` fragment of `code`. Note that `from:to` must
point to CSS section content, e.g. *inside* `{` and `}` (or top-level code context),
all properties found in nested sections will be ignored
"""
if parse_to is None:
parse_to = len(code)
fragment = code[parse_from:parse_to]
result = []
pool = []
state = ParsePropertiesState(parse_from)
def scan_callback(token_type, start: int, end: int, delimiter: int):
if token_type == TokenType.Selector:
state.nested += 1
elif token_type == TokenType.BlockEnd:
state.nested -= 1
state.before = parse_from + end
elif not state.nested:
if token_type == TokenType.PropertyName:
if state.pending_name:
# Create property with empty value
value_pos = state.pending_name[2]
result.append(
CSSProperty(fragment, state.pending_name, state.before,
value_pos, value_pos, value_pos,
parse_from))
release_range(pool, state.pending_name)
state.before = parse_from + start
state.pending_name = alloc_range(pool, start, end, delimiter)
elif token_type == TokenType.PropertyValue:
if state.pending_name:
result.append(
CSSProperty(fragment, state.pending_name, state.before,
start, end, delimiter, parse_from))
release_range(pool, state.pending_name)
state.pending_name = None
state.before = parse_from + delimiter + 1
scan(fragment, scan_callback)
return result
def alloc_range(pool: list, start: int, end: int, delimiter: int) -> list:
"Allocates new token range from pool"
if pool:
rng = pool.pop()
rng[0] = start
rng[1] = end
rng[2] = delimiter
return rng
return [start, end, delimiter]
def release_range(pool: list, rng: list):
"Releases given token range and pushes it back into the pool"
if rng:
pool.append(rng)
|
1644118
|
import logging
from botocore.exceptions import ClientError
from library.aws.utility import convert_tags
class EBSOperations:
@staticmethod
def snapshot_make_private(ec2_client, snapshot_id):
"""
Remove public permissions on EBS snapshot
:param ec2_client: EC2 boto3 client
:param snapshot_id: the ID of the snapshot
:return: nothing
"""
ec2_client.modify_snapshot_attribute(
Attribute="createVolumePermission",
CreateVolumePermission={
"Remove": [
{
"Group": "all"
},
]
},
GroupNames=["all"],
OperationType="remove",
SnapshotId=snapshot_id
)
class EBSVolume(object):
"""
Basic class for EBS volume.
Encapsulates `VolumeId`/`State`/`Encrypted` and list of `Attachments`.
"""
def __init__(self, account, source):
"""
:param account: `Account` instance where EBS volume is present
:param source: single `Volumes` element as AWS returns
"""
self.source = source
self.account = account
self.id = source["VolumeId"]
self.state = source["State"]
self.encrypted = source["Encrypted"]
attachments = source.get('Attachments', [])
self.attachments = { attach['InstanceId']: attach['State'] for attach in attachments } if attachments else {}
self.tags = convert_tags(source.get('Tags', []))
@property
def name(self):
""" :return: EBS volume name from tags """
return self.tags.get("Name", None) if self.tags else None
def __str__(self):
name = "" if self.name is None else f"Name={self.name}, "
return f"{self.__class__.__name__}({name}Id={self.id}, Encrypted={self.encrypted}, State={self.state}, Attachments={len(self.attachments)})"
class EBSUnencryptedVolumesChecker(object):
"""
Basic class for checking EBS volumes in account/region.
Encapsulates discovered EBS volumes.
"""
def __init__(self, account):
"""
:param account: `Account` instance with EBS volumes to check
"""
self.account = account
self.volumes = []
def get_volume(self, id):
"""
:return: `EBSVolume` by id
"""
for volume in self.volumes:
if volume.id == id:
return volume
return None
def check(self, ids=None, tags=None):
"""
Walk through not encrypted EBS volumes in the account/region and put them to `self.volumes`.
:param ids: list with EBS volume ids to check, if it is not supplied - all EBS volumes must be checked
:return: boolean. True - if check was successful,
False - otherwise
"""
args = {'DryRun': False}
if ids:
# if ids is set - check given ids regardless of encrypted status
args['VolumeIds'] = ids
else:
# else get only unencrypted volumes
args['Filters'] = [{
'Name': 'encrypted',
'Values': ["false"]
}]
if tags:
for key, value in tags.items():
args['Filters'].append(
{'Name': f"tag:{key}", 'Values': value if isinstance(value, list) else [value]},
)
try:
volume_details = self.account.client("ec2").describe_volumes(**args)["Volumes"]
except ClientError as err:
if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]:
logging.error(f"Access denied in {self.account} "
f"(ec2:{err.operation_name})")
else:
logging.exception(f"Failed to describe volumes in {self.account}")
return False
for volume_detail in volume_details:
volume = EBSVolume(self.account, volume_detail)
self.volumes.append(volume)
return True
class EBSSnapshot(object):
"""
Basic class for EBS snapshot.
Encapsulates `SnapshotId`/`VolumeId`/`Encrypted` and list of permissions.
"""
def __init__(self, account, source, permissions):
"""
:param account: `Account` instance where EBS snapshot is present
:param source: single `Snapshots` element as AWS returns
:param permissions: result of `describe_snapshot_attribute` API call for snapshot
"""
self.source = source
self.permissions = permissions
self.account = account
self.id = source["SnapshotId"]
self.volume_id = source["VolumeId"]
self.tags = convert_tags(source.get('Tags', []))
def __str__(self):
return f"{self.__class__.__name__}(Id={self.id}, VolumeId={self.volume_id}, Public={self.public})"
@property
def public(self):
"""
:return: boolean, True - if snapshot has `all` group permissions for `CreateVolumePermissions`
False - otherwise
"""
for permission in self.permissions["CreateVolumePermissions"]:
if "Group" in permission and permission["Group"] == "all":
return True
return False
def make_private(self):
"""
Remove public permissions on snapshot
:return: nothing
"""
EBSOperations.snapshot_make_private(self.account.client("ec2"), self.id)
class EBSPublicSnapshotsChecker(object):
"""
Basic class for checking EBS snapshots in account/region.
Encapsulates discovered EBS snapshots.
"""
def __init__(self, account):
"""
:param account: `Account` instance with EBS snapshots to check
"""
self.account = account
self.snapshots = []
def get_snapshot(self, id):
"""
:return: `EBSSnapshot` by id
"""
for snapshot in self.snapshots:
if snapshot.id == id:
return snapshot
return None
def check(self, ids=None, tags=None):
"""
Walk through public EBS snapshots in the account/region and put them to `self.snapshots`.
:param ids: list with EBS snapshot ids to check, if it is not supplied - all EBS snapshots must be checked
:return: boolean. True - if check was successful,
False - otherwise
"""
args = {
'DryRun': False,
# You can specify AWS account IDs (if you own the snapshots),
# 'self' for snapshots for which you own or have explicit permissions,
# or 'all' for public snapshots.
'RestorableByUserIds': ['all'],
# The results can include the AWS account IDs of the specified owners,
# 'amazon' for snapshots owned by Amazon,
# or 'self' for snapshots that you own.
'OwnerIds': ['self']
}
if ids:
# if ids is set - check given ids regardless of encrypted status
args['SnapshotIds'] = ids
del args['RestorableByUserIds']
if tags:
args['Filters'] = []
for key, value in tags.items():
args['Filters'].append(
{'Name': f"tag:{key}", 'Values': value if isinstance(value, list) else [value]},
)
try:
snapshot_details = self.account.client("ec2").describe_snapshots(**args)["Snapshots"]
except ClientError as err:
if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]:
logging.error(f"Access denied in {self.account} "
f"(ec2:{err.operation_name})")
else:
logging.exception(f"Failed to describe snapshots in {self.account}")
return False
for snapshot_detail in snapshot_details:
try:
# Need to check each snapshot attributes dispite of the fact
# that we ask AWS to return only restorable by all snapshots as:
# * if 'ids' set - we remove RestorableByUserIds and AWS return both public and private snapshots
# * moto does not support RestorableByUserIds and returns all snapshots
snapshot_permissions = self.account.client("ec2").describe_snapshot_attribute(
Attribute="createVolumePermission",
SnapshotId=snapshot_detail['SnapshotId']
)
except ClientError as err:
if err.response['Error']['Code'] in ["AccessDenied", "UnauthorizedOperation"]:
logging.error(f"Access denied in {self.account} "
f"(ec2:{err.operation_name})")
else:
logging.exception(f"Failed to describe '{snapshot_detail['SnapshotId']}' snapshot attribute "
f"in {self.account}")
return False
snapshot = EBSSnapshot(self.account, snapshot_detail, snapshot_permissions)
self.snapshots.append(snapshot)
return True
|
1644133
|
import os
import cv2
import imutils
class ImageAligner:
def __init__(self, frame_path, angle, extension="jpg"):
self.frame_path = os.path.splitext(frame_path)[0] + "." + extension
self.angle = angle
self.extension = extension
def align(self):
img = cv2.imread(self.frame_path)
img_rotated = imutils.rotate(img, self.angle)
return img_rotated
|
1644219
|
import os
import sys
import twitter
from footbot import footbot
from config import APP_NAME, API_KEY, API_SECRET
AUTH_FILENAME = 'auth.token'
def get_auth_token(fn=AUTH_FILENAME):
return twitter.oauth_dance(APP_NAME,
API_KEY,
API_SECRET,
token_filename=fn)
def read_auth_token_file(fn=AUTH_FILENAME):
token, secret = None, None
with open(fn) as fp:
token = fp.readline().strip()
secret = fp.readline().strip()
return token, secret
def tweet_game(fn=AUTH_FILENAME):
text = footbot()[0]
if not len(text) < 140:
sys.stderr.write('Tweet is too long: %s\n' % text)
return False
token, secret = read_auth_token_file(fn)
auth = twitter.OAuth(token, secret, API_KEY, API_SECRET)
t = twitter.Twitter(auth=auth)
result = t.statuses.update(status=text)
#print result
return result
def main():
if os.path.exists(AUTH_FILENAME):
return 0 if tweet_game() else 1
else:
return 0 if get_auth_token() else 0
if __name__ == '__main__':
sys.exit(main())
|
1644236
|
from tzwhere import tzwhere
import datetime
import unittest
class LocationTestCase(unittest.TestCase):
TEST_LOCATIONS = (
( 35.295953, -89.662186, 'Arlington, TN', 'America/Chicago'),
( 33.58, -85.85, 'Memphis, TN', 'America/Chicago'),
( 61.17, -150.02, 'Anchorage, AK', 'America/Anchorage'),
( 44.12, -123.22, 'Eugene, OR', 'America/Los_Angeles'),
( 42.652647, -73.756371, 'Albany, NY', 'America/New_York'),
( 55.743749, 37.6207923, 'Moscow', 'Europe/Moscow'),
( 34.104255, -118.4055591, 'Los Angeles', 'America/Los_Angeles'),
( 55.743749, 37.6207923, 'Moscow', 'Europe/Moscow'),
( 39.194991, -106.8294024, 'Aspen, Colorado', 'America/Denver'),
( 50.438114, 30.5179595, 'Kiev', 'Europe/Kiev'),
( 12.936873, 77.6909136, 'Jogupalya', 'Asia/Kolkata'),
( 38.889144, -77.0398235, 'Washington DC', 'America/New_York'),
( 59.932490, 30.3164291, '<NAME>', 'Europe/Moscow'),
( 50.300624, 127.559166, 'Blagoveshchensk', 'Asia/Yakutsk'),
( 42.439370, -71.0700416, 'Boston', 'America/New_York'),
( 41.84937, -87.6611995, 'Chicago', 'America/Chicago'),
( 28.626873, -81.7584514, 'Orlando', 'America/New_York'),
( 47.610615, -122.3324847, 'Seattle', 'America/Los_Angeles'),
( 51.499990, -0.1353549, 'London', 'Europe/London'),
( 51.256241, -0.8186531, '<NAME>', 'Europe/London'),
( 51.292215, -0.8002638, 'Fleet', 'Europe/London'),
( 48.868743, 2.3237586, 'Paris', 'Europe/Paris'),
( 22.158114, 113.5504603, 'Macau', 'Asia/Macau'),
( 56.833123, 60.6097054, 'Russia', 'Asia/Yekaterinburg'),
( 60.887496, 26.6375756, 'Salo', 'Europe/Helsinki'),
( 52.799992, -1.8524408, 'Staffordshire', 'Europe/London'),
( 5.016666, 115.0666667, 'Muara', 'Asia/Brunei'),
(-41.466666, -72.95, 'Puerto Montt seaport', 'America/Santiago'),
( 34.566666, 33.0333333, 'Akrotiri seaport', 'Asia/Nicosia'),
( 37.466666, 126.6166667, 'Inchon seaport', 'Asia/Seoul'),
( 42.8, 132.8833333, 'Nakhodka seaport', 'Asia/Vladivostok'),
( 50.26, -5.051, 'Truro', 'Europe/London'),
( 50.26, -9.051, 'Sea off Cornwall', None),
( 35.82373, -110.72144, 'Hopi Nation', 'America/Phoenix'),
( 35.751956, -110.169460, 'Deni inside Hopi Nation', 'America/Denver'),
( 68.38068073677294, -133.73396065378114, 'Upper hole in America/Yellowknife', 'America/Inuvik')
)
TEST_LOCATIONS_FORCETZ = (
( 35.295953, -89.662186, 'Arlington, TN', 'America/Chicago'),
( 33.58, -85.85, 'Memphis, TN', 'America/Chicago'),
( 61.17, -150.02, 'Anchorage, AK', 'America/Anchorage'),
( 40.7271, -73.98, 'Shore Lake Michigan', 'America/New_York'),
( 50.1536, -8.051, 'Off Cornwall', 'Europe/London'),
( 49.2698, -123.1302, 'Vancouver', 'America/Vancouver'),
( 50.26, -9.051, 'Far off Cornwall', None)
)
def _test_tzwhere(self, locations, forceTZ):
start = datetime.datetime.now()
w = tzwhere.tzwhere(forceTZ=forceTZ)
end = datetime.datetime.now()
print('Initialized in: '),
print(end - start)
template = '{0:20s} | {1:20s} | {2:20s} | {3:2s}'
print(template.format('LOCATION', 'EXPECTED', 'COMPUTED', '=='))
for (lat, lon, loc, expected) in locations:
computed = w.tzNameAt(float(lat), float(lon), forceTZ=forceTZ)
ok = 'OK' if computed == expected else 'XX'
print(template.format(loc, str(expected), str(computed), ok))
assert computed == expected
def test_lookup(self):
self._test_tzwhere(self.TEST_LOCATIONS,forceTZ=False)
def test_forceTZ(self):
self._test_tzwhere(self.TEST_LOCATIONS_FORCETZ,forceTZ=True)
|
1644248
|
from django.contrib.auth.decorators import login_required
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from accounts.api.views import AccountsUsersViewSet
@login_required
def update(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""Update an account."""
viewset = AccountsUsersViewSet.init("list", request, args, kwargs)
context = viewset.get_response_context()
return render(request, "accounts/users/update.html", context)
|
1644251
|
from dataclasses import dataclass
from test.util import TestCase
from threading import RLock
from marshmallow import Schema, fields
from tinydb import TinyDB
from tinydb.storages import MemoryStorage
from OpenCast.domain.model import Id
from OpenCast.domain.model.entity import Entity
from OpenCast.domain.service.identity import IdentityService
from OpenCast.infra.data.repo.error import RepoError
from OpenCast.infra.data.repo.repository import Repository
class TestEntitySchema(Schema):
id = fields.UUID()
name = fields.String()
class TestEntity(Entity):
Schema = TestEntitySchema
@dataclass
class Data:
id: Id
name: str
def __init__(self, *attrs, **kattrs):
super().__init__(self.Data, *attrs, **kattrs)
@property
def name(self):
return self._data.name
@name.setter
def name(self, value):
self._data.name = value
class RepositoryTest(TestCase):
def setUp(self):
database = TinyDB(storage=MemoryStorage)
self.repo = Repository(database, RLock(), TestEntity)
self.entity = TestEntity(IdentityService.random(), "test")
def test_create(self):
self.repo.create(self.entity)
self.assertListEqual([self.entity], self.repo.list())
def test_create_existing(self):
self.repo.create(self.entity)
with self.assertRaises(RepoError) as ctx:
self.repo.create(self.entity)
self.assertEqual(
f"cannot create: '{self.entity}' already exists", str(ctx.exception)
)
def test_update(self):
self.repo.create(self.entity)
self.entity.name = "UPDATED"
self.repo.update(self.entity)
entity = self.repo.get(self.entity.id)
self.assertEqual("UPDATED", entity.name)
def test_update_nonexistent(self):
with self.assertRaises(RepoError) as ctx:
self.repo.update(self.entity)
self.assertEqual(
f"cannot update: '{self.entity}' doesn't exist", str(ctx.exception)
)
def test_delete(self):
self.repo.create(self.entity)
self.repo.delete(self.entity)
self.assertListEqual([], self.repo.list())
def test_list(self):
self.repo.create(self.entity)
entity_list = self.repo.list()
self.assertEqual([self.entity], entity_list)
self.assertNotEqual(id(self.entity), id(entity_list[0]))
def test_list_filtered(self):
entities = [TestEntity(IdentityService.random(), f"{i}") for i in range(5)]
for entity in entities:
self.repo.create(entity)
entity_list = self.repo.list([entities[0].id, entities[2].id])
self.assertEqual([entities[0], entities[2]], entity_list)
def test_list_filtered_ordered(self):
entities = [TestEntity(IdentityService.random(), f"{i}") for i in range(5)]
for entity in entities:
self.repo.create(entity)
entity_list = self.repo.list([entities[2].id, entities[0].id])
self.assertEqual([entities[2], entities[0]], entity_list)
def test_get(self):
self.repo.create(self.entity)
repo_entity = self.repo.get(self.entity.id)
self.assertEqual(self.entity, repo_entity)
self.assertNotEqual(id(self.entity), id(repo_entity))
def test_get_nonexistent(self):
self.assertEqual(None, self.repo.get(self.entity.id))
def test_exists(self):
self.assertFalse(self.repo.exists(self.entity.id))
self.repo.create(self.entity)
self.assertTrue(self.repo.exists(self.entity.id))
|
1644270
|
import unittest
import numpy as np
import torch
from torchimage.utils import NdSpec
from torchimage.padding import Padder
from torchimage.pooling import AvgPoolNd
from torchimage.padding.utils import same_padding_width
from torchimage.shapes.conv_like import n_original_elements_1d, n_original_elements_nd
class MyTestCase(unittest.TestCase):
@staticmethod
def n_orignal_elements_gt(in_size, pad_width, kernel_size, stride):
x = torch.ones(in_size, dtype=torch.int32)
padder = Padder(pad_width=pad_width, mode="constant", constant_values=0)
x = padder.forward(x, axes=None)
return x.unfold(0, size=kernel_size, step=stride).sum(dim=-1).tolist()
def test_n_original_elements_1d(self):
for i in range(20):
in_size = np.random.randint(1, 7)
pad_width = np.random.randint(0, 7, size=2).tolist()
kernel_size = np.random.randint(1, 7)
stride = np.random.randint(1, 7)
if sum(pad_width) + in_size < kernel_size:
return
with self.subTest(in_size=in_size, pad_width=pad_width, kernel_size=kernel_size, stride=stride):
# print(f"{in_size=}; {pad_width=}; {kernel_size=}; {stride=}")
expected = self.n_orignal_elements_gt(in_size=in_size, pad_width=pad_width, kernel_size=kernel_size, stride=stride)
# print(f"{expected=}")
actual = n_original_elements_1d(in_size=in_size, pad_width=pad_width, kernel_size=kernel_size, stride=stride)
# print(f"{actual=}")
self.assertEqual(expected, actual)
def test_n_original_elements_nd(self):
# average pooling, such that border cases (for instance) has smaller re-normalization weight
for i in range(10):
ndim = np.random.randint(1, 6)
shape = np.random.randint(10, 30, size=ndim).tolist()
kernel_size = np.random.randint(2, 8, size=ndim)
stride = np.random.randint(2, 8, size=ndim)
pad_width = NdSpec.apply(
same_padding_width,
NdSpec(kernel_size), NdSpec(stride), NdSpec(shape)
)
old_layer = AvgPoolNd(kernel_size, stride=stride, same_padder=Padder(mode="constant", constant_values=0), count_include_pad=True)
expected = torch.round(old_layer.forward(torch.ones(tuple(shape)), axes=None) * np.prod(kernel_size)).to(dtype=torch.int32)
actual = n_original_elements_nd(in_size=shape, pad_width=pad_width,
kernel_size=kernel_size, stride=stride)
with self.subTest(i=i):
self.assertTrue(torch.equal(actual, expected))
if __name__ == '__main__':
unittest.main()
|
1644289
|
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from collections import defaultdict
import numpy as np
# takes DNA sequence, outputs one-hot-encoded matrix with rows A, T, G, C
def one_hot_encoder(sequence):
l = len(sequence)
x = np.zeros((4,l),dtype = 'int8')
for j, i in enumerate(sequence):
if i == "A" or i == "a":
x[0][j] = 1
elif i == "T" or i == "t":
x[1][j] = 1
elif i == "G" or i == "g":
x[2][j] = 1
elif i == "C" or i == "c":
x[3][j] = 1
else:
return "contains_N"
return x
#read names and postions from bed file
def read_bed(filename):
positions = defaultdict(list)
with open(filename) as f:
for line in f:
name, chr, start, stop = line.split()
positions[name].append((chr, int(start), int(stop)))
return positions
# parse fasta file and turn into dictionary
def read_fasta(genome_dir, num_chr):
chr_dict = dict()
for chr in range(1, num_chr):
chr_file_path = genome_dir + "chr{}.fa".format(chr)
chr_dict.update(SeqIO.to_dict(SeqIO.parse(open(chr_file_path), 'fasta')))
return chr_dict
#get sequences for peaks from reference genome
def get_sequences(positions, chr_dict, num_chr):
one_hot_seqs = []
peak_seqs = []
invalid_ids = []
peak_names = []
target_chr = ['chr{}'.format(i) for i in range(1, num_chr)]
for name in positions:
for (chr, start, stop) in positions[name]:
if chr in target_chr:
chr_seq = chr_dict[chr].seq
peak_seq = str(chr_seq)[start - 1:stop].lower()
one_hot_seq = one_hot_encoder(peak_seq)
if isinstance(one_hot_seq, np.ndarray): # it is valid sequence
peak_names.append(name)
peak_seqs.append(peak_seq)
one_hot_seqs.append(one_hot_seq)
else:
invalid_ids.append(name[20:])
else:
invalid_ids.append(name[20:])
one_hot_seqs = np.stack(one_hot_seqs)
peak_seqs = np.stack(peak_seqs)
peak_names = np.stack(peak_names)
return one_hot_seqs, peak_seqs, invalid_ids, peak_names
def format_intensities(intensity_file, invalid_ids):
cell_type_array = []
peak_names = []
with open(intensity_file) as f:
for i, line in enumerate(f):
if i == 0: continue
columns = line.split()
peak_name = columns[0]
if '\x1a' not in columns:
cell_act = columns[1:]
cell_type_array.append(cell_act)
peak_names.append(peak_name)
cell_type_array = np.stack(cell_type_array)
peak_names = np.stack(peak_names)
return cell_type_array, peak_names
|
1644290
|
from aloe import before, step, world
from aloe.tools import guess_types
from aloe_django.steps.models import get_model
from rest_framework.test import APIClient
from django.contrib.auth.models import User
from ..models import Friendship
@before.each_feature
def before_each_feature(feature):
world.client = APIClient()
@step('I empty the "([^"]+)" table')
def step_empty_table(self, model_name):
get_model(model_name).objects.all().delete()
@step('I create the following users:')
def step_create_users(self):
for user in guess_types(self.hashes):
User.objects.create_user(**user)
@step('I log in with username "([^"]+)" and password "([^"]+)"')
def step_log_in(self, username, password):
world.is_logged_in = world.client.login(username=username, password=password)
@step('I am logged in')
def step_confirm_log_in(self):
assert world.is_logged_in
@step('I create the following friendships:')
def step_create_friendships(self):
Friendship.objects.bulk_create([
Friendship(
id=data['id'],
user1=User.objects.get(id=data['user1']),
user2=User.objects.get(id=data['user2']),
status=data['status']
) for data in guess_types(self.hashes)
])
@step('I get a list of friends')
def step_get_friends(self):
world.response = world.client.get('/friends/')
@step('I see the following response data:')
def step_confirm_response_data(self):
response = world.response.json()
if isinstance(response, list):
assert guess_types(self.hashes) == response
else:
assert guess_types(self.hashes)[0] == response
@step('I request the following friendship:')
def step_request_friendship(self):
world.response = world.client.post('/friendship-requests/', data=guess_types(self.hashes[0]))
@step('I see the following rows in the "([^"]+)" table:')
def step_confirm_table(self, model_name):
model_class = get_model(model_name)
for data in guess_types(self.hashes):
has_row = model_class.objects.filter(**data).exists()
assert has_row
@step('I accept the friendship request with ID "([^"]+)"')
def step_accept_friendship_request(self, pk):
world.response = world.client.put(f'/friendship-requests/{pk}/', data={
'status': Friendship.ACCEPTED
})
@step('I reject the friendship request with ID "([^"]+)"')
def step_reject_friendship_request(self, pk):
world.response = world.client.put(f'/friendship-requests/{pk}/', data={
'status': Friendship.REJECTED
})
|
1644304
|
import unittest
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', 'tools')))
from js2c import NormalizeFileName
class Js2ctest(unittest.TestCase):
def testNormalizeFileName(self):
self.assertEqual(NormalizeFileName('dir/mod.js'), 'mod')
self.assertEqual(NormalizeFileName('deps/mod.js'), 'internal/deps/mod')
self.assertEqual(NormalizeFileName('mod.js'), 'mod')
if __name__ == '__main__':
unittest.main()
|
1644315
|
import argparse
import hashlib
import os
import pprint
import re
import shutil
import k3down2
import k3git
import yaml
from k3color import darkyellow
from k3color import green
from k3handy import cmdpass
from k3handy import pjoin
from k3handy import to_bytes
from k3fs import fread
from .. import mistune
def sj(*args):
return ''.join([str(x) for x in args])
def msg(*args):
print('>', ''.join([str(x) for x in args]))
def indent(line):
if line == '':
return ''
return ' ' + line
def escape(s, quote=True):
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
def add_paragraph_end(lines):
# add blank line to a paragraph block
if lines[-1] == '':
return lines
lines.append('')
return lines
def strip_paragraph_end(lines):
# remove last blank lines
if lines[-1] == '':
return strip_paragraph_end(lines[:-1])
return lines
def code_join(n):
lang = n['info'] or ''
txt = '\n'.join(['```' + lang]
+ n['text'][:-1].split('\n')
+ ['```', ''])
return txt
def block_code_to_jpg(mdrender, n, width=None, ctx=None):
txt = code_join(n)
w = width
if w is None:
w = mdrender.conf.code_width
return typ_text_to_jpg(mdrender, 'code', txt, opt={'html': {'width': w}})
def block_code_to_fixwidth_jpg(mdrender, n, ctx=None):
return block_code_to_jpg(mdrender, n, width=600, ctx=ctx)
def block_code_mermaid_to_jpg(mdrender, n, ctx=None):
return typ_text_to_jpg(mdrender, 'mermaid', n['text'])
def block_code_graphviz_to_jpg(mdrender, n, ctx=None):
return typ_text_to_jpg(mdrender, 'graphviz', n['text'])
def typ_text_to_jpg(mdrender, typ, txt, opt=None):
d = k3down2.convert(typ, txt, 'jpg', opt=opt)
fn = asset_fn(txt, 'jpg')
fwrite(mdrender.conf.asset_output_dir, fn, d)
return [r''.format(mdrender.conf.img_url(fn)), '']
def math_block_to_imgtag(mdrender, n, ctx=None):
return [k3down2.convert('tex_block', n['text'], 'imgtag')]
def math_inline_to_imgtag(mdrender, n, ctx=None):
return [k3down2.convert('tex_inline', n['text'], 'imgtag')]
def math_block_to_jpg(mdrender, n, ctx=None):
return typ_text_to_jpg(mdrender, 'tex_block', n['text'])
def math_inline_to_jpg(mdrender, n, ctx=None):
return typ_text_to_jpg(mdrender, 'tex_inline', n['text'])
def math_inline_to_plaintext(mdrender, n, ctx=None):
return [escape(k3down2.convert('tex_inline', n['text'], 'plain'))]
def to_plaintext(mdrender, n, ctx=None):
return [escape(n['text'])]
def table_to_barehtml(mdrender, n, ctx=None):
# create a markdown render to recursively deal with images etc.
mdr = MDRender(mdrender.conf, platform=importer)
md = mdr.render_node(n)
md = '\n'.join(md)
tablehtml = k3down2.convert('table', md, 'html')
return [tablehtml, '']
def table_to_jpg(mdrender, n, ctx=None):
mdr = MDRender(mdrender.conf, platform='')
md = mdr.render_node(n)
md = '\n'.join(md)
md_base_path = os.path.split(mdrender.conf.src_path)[0]
return typ_text_to_jpg(mdrender, 'md', md, opt={'html': {
'asset_base': os.path.abspath(md_base_path),
}})
def importer(mdrender, n, ctx=None):
'''
Importer is only used to copy local image to output dir and update image urls.
This is used to deal with partial renderers, e.g., table_to_barehtml,
which is not handled by univertial image importer, but need to import the image when rendering a table with images.
'''
typ = n['type']
if typ == 'image':
return image_local_to_remote(mdrender, n, ctx=ctx)
return None
def zhihu_specific(mdrender, n, ctx=None):
return render_with_features(mdrender, n, ctx=ctx, features=zhihu_features)
def minimal_mistake_specific(mdrender, n, ctx=None):
return render_with_features(mdrender, n, ctx=ctx, features=minimal_mistake_features)
def wechat_specific(mdrender, n, ctx=None):
return render_with_features(mdrender, n, ctx=ctx, features=wechat_features)
def weibo_specific(mdrender, n, ctx=None):
typ = n['type']
if typ == 'image':
return image_local_to_remote(mdrender, n, ctx=ctx)
if typ == 'math_block':
return math_block_to_imgtag(mdrender, n, ctx=ctx)
if typ == 'math_inline':
return math_inline_to_plaintext(mdrender, n, ctx=ctx)
if typ == 'table':
return table_to_jpg(mdrender, n, ctx=ctx)
if typ == 'codespan':
return [escape(n['text'])]
# weibo does not support pasting <p> in <li>
if typ == 'list':
lines = []
lines.extend(mdrender.render(n['children']))
lines.append('')
return lines
if typ == 'list_item':
lines = []
lines.extend(mdrender.render(n['children']))
lines.append('')
return lines
if typ == 'block_quote':
lines = mdrender.render(n['children'])
lines = strip_paragraph_end(lines)
return lines
if typ == 'block_code':
lang = n['info'] or ''
if lang == 'mermaid':
return block_code_mermaid_to_jpg(mdrender, n, ctx=ctx)
if lang == 'graphviz':
return block_code_graphviz_to_jpg(mdrender, n, ctx=ctx)
if lang == '':
return block_code_to_jpg(mdrender, n, ctx=ctx)
else:
return block_code_to_jpg(mdrender, n, width=600, ctx=ctx)
return None
def simple_specific(mdrender, n, ctx=None):
return render_with_features(mdrender, n, ctx=ctx, features=simple_features)
class MDRender(object):
# platform specific renderer
platforms = {
'zhihu': zhihu_specific,
'wechat': wechat_specific,
'weibo': weibo_specific,
'minimal_mistake': minimal_mistake_specific,
'simple': simple_specific,
}
def __init__(self, conf, platform='zhihu'):
self.conf = conf
if isinstance(platform, str):
self.handlers = self.platforms.get(platform, lambda *x, **y: None)
else:
self.handlers = platform
def render_node(self, n, ctx=None):
"""
Render a AST node into lines of text
"""
typ = n['type']
# customized renderers:
lines = self.handlers(self, n, ctx=ctx)
if lines is not None:
return lines
else:
# can not render, continue with default handler
pass
# default renderers:
if typ == 'thematic_break':
return ['---', '']
if typ == 'paragraph':
lines = self.render(n['children'])
return ''.join(lines).split('\n') + ['']
if typ == 'text':
return [n['text']]
if typ == 'strong':
lines = self.render(n['children'])
lines[0] = '**' + lines[0]
lines[-1] = lines[-1] + '**'
return lines
if typ == 'math_block':
return ['$$', n['text'], '$$']
if typ == 'math_inline':
return ['$$ ' + n['text'].strip() + ' $$']
if typ == 'table':
return self.render(n['children']) + ['']
if typ == 'table_head':
alignmap = {
'left': ':--',
'right': '--:',
'center': ':-:',
None: '---',
}
lines = self.render(n['children'])
aligns = [alignmap[x['align']] for x in n['children']]
aligns = '| ' + ' | '.join(aligns) + ' |'
return ['| ' + ' | '.join(lines) + ' |', aligns]
if typ == 'table_cell':
lines = self.render(n['children'])
return [''.join(lines)]
if typ == 'table_body':
return self.render(n['children'])
if typ == 'table_row':
lines = self.render(n['children'])
return ['| ' + ' | '.join(lines) + ' |']
if typ == 'block_code':
# remove the last \n
return ['```' + (n['info'] or '')] + n['text'][:-1].split('\n') + ['```', '']
if typ == 'codespan':
return [('`' + n['text'] + '`')]
if typ == 'image':
if n['title'] is None:
return [''.format(**n)]
else:
return [''.format(**n)]
if typ == 'list':
head = '- '
if n['ordered']:
head = '1. '
lines = self.render(n['children'], head)
return add_paragraph_end(lines)
if typ == 'list_item':
lines = self.render(n['children'])
# ctx is head passed from list
lines[0] = ctx + lines[0]
lines = lines[0:1] + [indent(x) for x in lines[1:]]
return lines
if typ == 'block_text':
lines = self.render(n['children'])
return ''.join(lines).split('\n')
if typ == 'block_quote':
lines = self.render(n['children'])
lines = strip_paragraph_end(lines)
lines = ['> ' + x for x in lines]
return lines + ['']
if typ == 'newline':
return ['']
if typ == 'block_html':
return add_paragraph_end([n['text']])
if typ == 'link':
# TODO title
lines = self.render(n['children'])
lines[0] = '[' + lines[0]
lines[-1] = lines[-1] + '](' + n['link'] + ')'
return lines
if typ == 'heading':
lines = self.render(n['children'])
lines[0] = '#' * n['level'] + ' ' + lines[0]
return lines + ['']
if typ == 'strikethrough':
lines = self.render(n['children'])
lines[0] = '~~' + lines[0]
lines[-1] = lines[-1] + '~~'
return lines
if typ == 'emphasis':
lines = self.render(n['children'])
lines[0] = '*' + lines[0]
lines[-1] = lines[-1] + '*'
return lines
if typ == 'inline_html':
return [n['text']]
if typ == 'linebreak':
return [" \n"]
print(typ, n.keys())
pprint.pprint(n)
return ['***:' + typ]
def render(self, nodes, ctx=None):
rst = []
for n in nodes:
rst.extend(self.render_node(n, ctx))
return rst
def msg(self, *args):
msg(*args)
def fix_tables(nodes):
"""
mistune does not parse table in list item.
We need to recursively fix it.
"""
for n in nodes:
if 'children' in n:
fix_tables(n['children'])
if n['type'] == 'paragraph':
children = n['children']
if len(children) == 0:
continue
c0 = children[0]
if c0['type'] != 'text':
continue
txt = c0['text']
table_reg = r' {0,3}\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*'
match = re.match(table_reg, txt)
if match:
mdr = MDRender(None, platform='')
partialmd = mdr.render(children)
partialmd = ''.join(partialmd)
parser = new_parser()
new_children = parser(partialmd)
n['children'] = new_children
def join_math_block(nodes):
"""
A tex segment may spans several paragraph:
$$ // paragraph 1
x = 5 //
y = 3 // paragraph 2
$$ //
This function finds out all such paragraph and merge them into a single one.
"""
for n in nodes:
if 'children' in n:
join_math_block(n['children'])
join_math_text(nodes)
def parse_math(nodes):
"""
Extract all math segment such as ``$$ ... $$`` from a text and build a
math_block or math_inline node.
"""
children = []
for n in nodes:
if 'children' in n:
n['children'] = parse_math(n['children'])
if n['type'] == 'text':
new_children = extract_math(n)
children.extend(new_children)
else:
children.append(n)
return children
def join_math_text(nodes):
i = 0
while i < len(nodes) - 1:
n1 = nodes[i]
n2 = nodes[i + 1]
if ('children' in n1
and 'children' in n2
and len(n1['children']) > 0
and len(n2['children']) > 0
and n1['children'][-1]['type'] == 'text'
and n2['children'][0]['type'] == 'text'
and '$$' in n1['children'][-1]['text']):
has_dd = '$$' in n2['children'][0]['text']
n1['children'][-1]['text'] += '\n\n' + n2['children'][0]['text']
n1['children'].extend(n2['children'][1:])
nodes.pop(i + 1)
if has_dd:
i += 1
else:
i += 1
inline_math = r'\$\$(.*?)\$\$'
def extract_math(n):
"""
Extract ``$$ ... $$`` from a text node and build a new node.
The original text node is split into multiple segments.
"""
children = []
t = n['text']
while True:
match = re.search(inline_math, t, flags=re.DOTALL)
if match:
children.append({'type': 'text', 'text': t[:match.start()]})
children.append({'type': 'math_inline', 'text': match.groups()[0]})
t = t[match.end():]
left = children[-2]['text']
right = t
if (left == '' or left.endswith('\n\n')) and (right == '' or right.startswith('\n')):
children[-1]['type'] = 'math_block'
continue
break
children.append({'type': 'text', 'text': t})
return children
def asset_fn(text, suffix):
textmd5 = hashlib.md5(to_bytes(text)).hexdigest()
escaped = re.sub(r'[^a-zA-Z0-9_\-=]+', '', text)
fn = escaped[:32] + '-' + textmd5[:16] + '.' + suffix
return fn
def image_local_to_remote(mdrender, n, ctx=None):
# {'alt': 'openacid',
# 'src': 'https://...',
# 'title': None,
# 'type': 'image'},
src = n['src']
if re.match(r'https?://', src):
return None
if src.startswith('/'):
# absolute path from CWD.
src = src[1:]
else:
# relative path from markdown containing dir.
src = os.path.join(os.path.split(mdrender.conf.src_path)[0], src)
fn = os.path.split(src)[1]
shutil.copyfile(src, pjoin(mdrender.conf.asset_output_dir, fn))
n['src'] = mdrender.conf.img_url(fn)
# Transform ast node but does not render, leave the task to default image
# renderer.
return None
def build_refs(meta):
dic = {}
if meta is None:
return dic
if 'refs' in meta:
refs = meta['refs']
for r in refs:
dic.update(r)
platform = 'zhihu'
if 'platform_refs' in meta:
refs = meta['platform_refs']
if platform in refs:
refs = refs[platform]
for r in refs:
dic.update(r)
return dic
def replace_ref_with_def(nodes, refs):
"""
Convert ``[text][link-def]`` to ``[text](link-url)``
Convert ``[link-def][]`` to ``[link-def](link-url)``
Convert ``[link-def]`` to ``[link-def](link-url)``
"""
used_defs={}
for n in nodes:
if 'children' in n:
used = replace_ref_with_def(n['children'], refs)
used_defs.update(used)
if n['type'] == 'text':
t = n['text']
link = re.match(r'^\[(.*?)\](\[([0-9a-zA-Z_\-]*?)\])?$', t)
if not link:
continue
gs = link.groups()
txt = gs[0]
if len(gs) >= 3:
definition = gs[2]
if definition is None or definition == '':
definition = txt
if definition in refs:
n['type'] = 'link'
r = refs[definition]
# TODO title
n['link'] = r.split()[0]
n['children'] = [{'type': 'text', 'text': txt}]
used_defs[definition] = r
return used_defs
def new_parser():
rdr = mistune.create_markdown(
escape=False,
renderer='ast',
plugins=['strikethrough', 'footnotes', 'table'],
)
return rdr
def extract_ref_definitions(cont):
lines = cont.split('\n')
rst = []
refs = {}
for l in lines:
r = re.match(r'\[(.*?)\]:(.*?)$', l, flags=re.UNICODE)
if r:
gs = r.groups()
refs[gs[0]] = gs[1]
else:
rst.append(l)
return '\n'.join(rst), refs
def extract_jekyll_meta(cont):
meta = None
meta_text = None
m = re.match(r'^ *--- *\n(.*?)\n---\n', cont,
flags=re.DOTALL | re.UNICODE)
if m:
cont = cont[m.end():]
meta_text = m.groups()[0].strip()
meta = yaml.safe_load(meta_text)
return cont, meta, meta_text
def render_ref_list(refs, platform):
ref_lines = ["", "Reference:", ""]
for _id, d in refs.items():
# d is in form "<url> <alt>"
url_alt = d.split()
url = url_alt[0]
if len(url_alt) == 1:
txt = _id
else:
txt = ' '.join(url_alt[1:])
txt = txt.strip('"')
txt = txt.strip("'")
ref_lines.append(
'- {id} : [{url}]({url})'.format(
id=txt, url=url
)
)
# disable paragraph list in weibo
if platform != 'weibo':
ref_lines.append('')
return ref_lines
def fwrite(*p):
cont = p[-1]
p = p[:-1]
with open(os.path.join(*p), 'wb') as f:
f.write(cont)
class LocalRepo(object):
is_local = True
"""
Create relative path for url in ``md_path` pointing to ``asset_dir_path``.
"""
def __init__(self, md_path, asset_dir_path):
md_base = os.path.split(md_path)[0]
rel = os.path.relpath(asset_dir_path, start=md_base, )
if rel == '.':
rel = ''
self.path_pattern = pjoin(rel, '{path}')
class AssetRepo(object):
is_local = False
def __init__(self, repo_url, cdn=True):
# TODO: test rendering md rendering with pushed assets
self.cdn = cdn
repo_url = self.parse_shortcut_repo_url(repo_url)
gu = k3git.GitUrl.parse(repo_url)
f = gu.fields
if (f['scheme'] == 'https'
and 'committer' in f
and 'token' in f):
url = gu.fmt(scheme='https')
else:
url = gu.fmt(scheme='ssh')
host, user, repo, branch = (
f.get('host'),
f.get('user'),
f.get('repo'),
f.get('branch'),
)
print("branch:", branch)
print(f)
self.url = url
url_patterns = {
'github.com': 'https://raw.githubusercontent.com/{user}/{repo}/{branch}/{path}',
'gitee.com': 'https://gitee.com/{user}/{repo}/raw/{branch}/{path}',
}
cdn_patterns = {
'github.com': 'https://cdn.jsdelivr.net/gh/{user}/{repo}@{branch}/{path}',
}
if branch is None:
branch = self.make_default_branch()
else:
# strip '@'
branch = branch[1:]
self.host = host
self.user = user
self.repo = repo
self.branch = branch
ptn = url_patterns[host]
if self.cdn and host == 'github.com':
ptn = cdn_patterns[host]
self.path_pattern = ptn.format(
user=user,
repo=repo,
branch=branch,
path='{path}')
def parse_shortcut_repo_url(self, repo_url):
"""
If repo_url is a shortcut specifying to use local git repo remote url,
convert repo shortcut to url.
md2zhihu --repo . # default remote, default branch
md2zhihu --repo .@brach # default remote
md2zhihu --repo remote@brach
"""
elts = repo_url.split('@', 1)
first = elts.pop(0)
g = k3git.Git(k3git.GitOpt(), cwd='.')
is_shortcut = False
# ".": use cwd git
# ".@foo_branch": use cwd git and specified branch
if first == '.':
msg("Using current git to store assets...")
u = self.get_remote_url()
is_shortcut = True
elif g.remote_get(first) is not None:
msg("Using current git remote: {} to store assets...".format(first))
u = self.get_remote_url(first)
is_shortcut = True
if is_shortcut:
if len(elts) > 0:
u += '@' + elts[0]
msg("Parsed shortcut {} to {}".format(repo_url, u))
repo_url = u
return repo_url
def get_remote_url(self, remote=None):
g = k3git.Git(k3git.GitOpt(), cwd='.')
if remote is None:
branch = g.head_branch(flag='x')
remote = g.branch_default_remote(branch, flag='x')
remote_url = g.remote_get(remote, flag='x')
return remote_url
def make_default_branch(self):
cwd = os.getcwd().split(os.path.sep)
cwdmd5 = hashlib.md5(to_bytes(os.getcwd())).hexdigest()
branch = '_md2zhihu_{tail}_{md5}'.format(
tail=cwd[-1],
md5=cwdmd5[:8],
)
# escape special chars
branch = re.sub(r'[^a-zA-Z0-9_\-=]+', '', branch)
return branch
simple_features = dict(
image=image_local_to_remote,
math_block=math_block_to_jpg,
math_inline=math_inline_to_jpg,
table=table_to_jpg,
codespan=to_plaintext,
block_code=dict(
mermaid=block_code_mermaid_to_jpg,
graphviz=block_code_graphviz_to_jpg,
**{"": block_code_to_jpg,
"*": block_code_to_fixwidth_jpg,
},
)
)
wechat_features = dict(
image=image_local_to_remote,
math_block=math_block_to_imgtag,
math_inline=math_inline_to_imgtag,
table=table_to_barehtml,
block_code=dict(
mermaid=block_code_mermaid_to_jpg,
graphviz=block_code_graphviz_to_jpg,
**{"": block_code_to_jpg,
"*": block_code_to_fixwidth_jpg,
},
)
)
zhihu_features = dict(
image=image_local_to_remote,
math_block=math_block_to_imgtag,
math_inline=math_inline_to_imgtag,
table=table_to_barehtml,
block_code=dict(
mermaid=block_code_mermaid_to_jpg,
graphviz=block_code_graphviz_to_jpg,
)
)
# jekyll theme: minimal mistake
minimal_mistake_features = dict(
image=image_local_to_remote,
block_code=dict(
mermaid=block_code_mermaid_to_jpg,
graphviz=block_code_graphviz_to_jpg,
)
)
# type, subtype... action
#
all_features = dict(
image=dict(local_to_remote=image_local_to_remote, ),
math_block=dict(
to_imgtag=math_block_to_imgtag,
to_jpg=math_block_to_jpg,
),
math_inline=dict(
to_imgtag=math_inline_to_imgtag,
to_jpg=math_inline_to_jpg,
to_plaintext=math_inline_to_plaintext,
),
table=dict(
to_barehtml=table_to_barehtml,
to_jpg=table_to_jpg,
),
codespan=dict(to_text=to_plaintext),
block_code=dict(
graphviz=dict(
to_jpg=block_code_graphviz_to_jpg,
),
mermaid=dict(
to_jpg=block_code_mermaid_to_jpg,
),
**{"": dict(to_jpg=block_code_to_jpg),
"*": dict(to_jpg=block_code_to_fixwidth_jpg),
},
)
)
def rules_to_features(rules):
features = {}
for r in rules:
rs, act = r.split(":")
rs = rs.split("/")
f = all_features
rst = features
for typ in rs[:-1]:
f = f[typ]
if typ not in rst:
rst[typ] = {}
rst = rst[typ]
typ = rs[-1]
rst[typ] = f[typ][act]
return features
# features: {typ:action(), typ2:{subtyp:action()}}
def render_with_features(mdrender, n, ctx=None, features=None):
typ = n['type']
f = features
if typ not in f:
return None
f = f[typ]
if callable(f):
return f(mdrender, n, ctx=ctx)
# subtype is info
lang = n['info'] or ''
if lang in f:
return f[lang](mdrender, n, ctx=ctx)
if '*' in f:
return f['*'](mdrender, n, ctx=ctx)
return None
class Config(object):
# TODO refactor var names
def __init__(self,
src_path,
platform,
output_dir,
asset_output_dir,
asset_repo_url=None,
md_output_path=None,
code_width=1000,
keep_meta=None,
ref_files=None,
jekyll=False,
rewrite=None,
):
"""
Config of markdown rendering
Args:
src_path(str): path to markdown to convert.
platform(str): target platform the converted markdown compatible with.
output_dir(str): the output dir path to which converted/generated file saves.
asset_repo_url(str): url of a git repo to upload output files, i.e.
result markdown, moved image or generated images.
md_output_path(str): when present, specifies the path of the result markdown or result dir.
code_width(int): the result image width of code block.
keep_meta(bool): whether to keep the jekyll meta file header.
"""
self.output_dir = output_dir
self.md_output_path = md_output_path
self.platform = platform
self.src_path = src_path
self.code_width = code_width
if keep_meta is None:
keep_meta = False
self.keep_meta = keep_meta
if ref_files is None:
ref_files = []
self.ref_files = ref_files
self.jekyll = jekyll
if rewrite is None:
rewrite = []
self.rewrite = rewrite
fn = os.path.split(self.src_path)[-1]
trim_fn = re.match(r'\d\d\d\d-\d\d-\d\d-(.*)', fn)
if trim_fn:
trim_fn = trim_fn.groups()[0]
else:
trim_fn = fn
if not self.jekyll:
fn = trim_fn
self.article_name = trim_fn.rsplit('.', 1)[0]
self.asset_output_dir = pjoin(asset_output_dir, self.article_name)
self.rel_dir = os.path.relpath(self.asset_output_dir, self.output_dir)
assert(self.md_output_path is not None)
if self.md_output_path.endswith('/'):
self.md_output_base = self.md_output_path
self.md_output_path = pjoin(self.md_output_path, fn)
else:
self.md_output_base = os.path.split(
os.path.abspath(self.md_output_path))[0]
if asset_repo_url is None:
self.asset_repo = LocalRepo(self.md_output_path, self.output_dir)
else:
self.asset_repo = AssetRepo(asset_repo_url)
for k in (
"src_path",
"platform",
"output_dir",
"asset_output_dir",
"md_output_base",
"md_output_path",
):
msg(darkyellow(k), ": ", getattr(self, k))
def img_url(self, fn):
url = self.asset_repo.path_pattern.format(
path=pjoin(self.rel_dir, fn))
for (pattern, repl) in self.rewrite:
url = re.sub(pattern, repl, url)
return url
def push(self):
x = dict(cwd=self.output_dir)
git_path = pjoin(self.output_dir, '.git')
has_git = os.path.exists(git_path)
cmdpass('git', 'init', **x)
cmdpass('git', 'add', '.', **x)
cmdpass('git',
'-c', "user.name='drmingdrmer'",
'-c', "user.email='<EMAIL>'",
'commit', '--allow-empty',
'-m', 'by md2zhihu by <EMAIL>',
**x)
cmdpass('git', 'push', '-f', self.asset_repo.url,
'HEAD:refs/heads/' + self.asset_repo.branch, **x)
if not has_git:
msg("Removing tmp git dir: ", self.output_dir + '/.git')
shutil.rmtree(self.output_dir + '/.git')
def convert_md(conf, handler=None):
os.makedirs(conf.output_dir, exist_ok=True)
os.makedirs(conf.asset_output_dir, exist_ok=True)
os.makedirs(conf.md_output_base, exist_ok=True)
with open(conf.src_path, 'r') as f:
cont = f.read()
cont, meta, meta_text = extract_jekyll_meta(cont)
cont, article_refs = extract_ref_definitions(cont)
refs = {}
for ref_path in conf.ref_files:
fcont = fread(ref_path)
y = yaml.safe_load(fcont)
for r in y.get('universal', []):
refs.update(r)
for r in y.get(conf.platform, []):
refs.update(r)
meta_refs = build_refs(meta)
refs.update(meta_refs)
refs.update(article_refs)
parse_to_ast = new_parser()
ast = parse_to_ast(cont)
# with open('ast', 'w') as f:
# f.write(pprint.pformat(ast))
# TODO use feature detection to decide if we need to convert table to hml
if conf.platform == 'minimal_mistake':
# jekyll output does render table well.
pass
else:
fix_tables(ast)
# with open('fixed-table', 'w') as f:
# f.write(pprint.pformat(ast))
used_refs = replace_ref_with_def(ast, refs)
# extract already inlined math
ast = parse_math(ast)
# with open('after-math-1', 'w') as f:
# f.write(pprint.pformat(ast))
# join cross paragraph math
join_math_block(ast)
ast = parse_math(ast)
# with open('after-math-2', 'w') as f:
# f.write(pprint.pformat(ast))
if handler is None:
mdr = MDRender(conf, platform=conf.platform)
else:
mdr = MDRender(conf, platform=handler)
out = mdr.render(ast)
if conf.keep_meta:
out = ['---', meta_text, '---'] + out
out.append('')
ref_list = render_ref_list(used_refs, conf.platform)
out.extend(ref_list)
out.append('')
ref_lines = [
'[{id}]: {d}'.format(
id=_id, d=d
) for _id, d in used_refs.items()
]
out.extend(ref_lines)
with open(conf.md_output_path, 'w') as f:
f.write(str('\n'.join(out)))
def main():
# TODO refine arg names
# md2zhihu a.md --output-dir res/ --platform xxx --md-output foo/
# res/fn.md
# /assets/fn/xx.jpg
#
# md2zhihu a.md --output-dir res/ --repo a@branch --platform xxx --md-output b.md
#
# TODO then test drmingdrmer.github.io with action
parser = argparse.ArgumentParser(
description='Convert markdown to zhihu compatible')
parser.add_argument('src_path', type=str,
nargs='+',
help='path to the markdown to process')
parser.add_argument('-o', '--md-output', action='store',
help='sepcify output path for converted mds.'
' If the path specified ends with "/", it is treated as output dir, e.g. --md-output foo/ output the converted md to foo/<fn>.md.'
' Otherwise it should be the path to some md file such as a/b/c.md. '
' default: <output-dir>/<fn>.md')
parser.add_argument('-d', '--output-dir', action='store',
default='_md2',
help='sepcify directory path to store outputs(default: "_md2")'
' It is the root dir of the git repo for storing assets')
parser.add_argument('--asset-output-dir', action='store',
help='sepcify directory to store assets (default: <output-dir>)'
' If <asset-output-dir> is outside <output-dir>, nothing will be uploaded.')
parser.add_argument('-r', '--repo', action='store',
required=False,
help='sepcify the git url to store assets.'
' The url should be in a SSH form such as:'
' "git@github.com:openacid/openacid.github.io.git[@branch_name]".'
' When absent, assets are referenced by relative path and it will not push assets to remote.'
' If no branch is specified, a branch "_md2zhihu_{cwd_tail}_{md5(cwd)[:8]}" is used,'
' in which cwd_tail is the last segment of current working dir.'
' It has to be a public repo and you have the write access.'
' "-r ." to use the git in CWD to store the assets.'
)
parser.add_argument('-p', '--platform', action='store',
required=False,
default='zhihu',
choices=["zhihu", "wechat", "weibo", "simple", "minimal_mistake"],
help='convert to a platform compatible format.'
' simple is a special type that it produce simplest output, only plain text and images, there wont be table, code block, math etc.'
)
parser.add_argument('--keep-meta', action='store_true',
required=False,
default=False,
help='if keep meta header, which is wrapped with two "---" at file beginning.'
' default: False'
)
parser.add_argument('--jekyll', action='store_true',
required=False,
default=False,
help='respect jekyll syntax: 1) implies <keep-meta>: do not trim md header meta;'
' 2) keep jekyll style name: YYYY-MM-DD-TITLE.md;'
' default: False'
)
parser.add_argument('--refs', action='append',
required=False,
help='external file that contains ref definitions'
' A ref file is a yaml contains dict of list.'
' A dict key is the platform name, only visible to <platform> argument'
' "univeral" is visible with any <platform>'
' An example of ref file data:'
' {"universal": [{"grpc":"http:.."}, {"protobuf":"http:.."}],'
' "zhihu": [{"grpc":"http:.."}, {"protobuf":"http:.."}]'
'}.'
' default: []'
)
parser.add_argument('--rewrite', action='append',
nargs=2,
required=False,
help='rewrite generated to image url.'
' E.g.: --rewrite "/asset/" "/resource/"'
' will transform "/asset/banner.jpg" to "/resource/banner.jpg"'
' default: []'
)
parser.add_argument('--code-width', action='store',
required=False,
default=1000,
help='specifies code image width.'
' default: 1000'
)
args = parser.parse_args()
if args.md_output is None:
args.md_output = args.output_dir + '/'
if args.asset_output_dir is None:
args.asset_output_dir = args.output_dir
if args.jekyll:
args.keep_meta = True
msg("Build markdown: ", darkyellow(args.src_path),
" into ", darkyellow(args.md_output))
msg("Build assets to: ", darkyellow(args.asset_output_dir))
msg("Git dir: ", darkyellow(args.output_dir))
msg("Gid dir will be pushed to: ", darkyellow(args.repo))
for path in args.src_path:
# TODO Config should accept only two arguments: the path and a args
conf = Config(
path,
args.platform,
args.output_dir,
args.asset_output_dir,
asset_repo_url=args.repo,
md_output_path=args.md_output,
code_width=args.code_width,
keep_meta=args.keep_meta,
ref_files=args.refs,
jekyll=args.jekyll,
rewrite=args.rewrite,
)
convert_md(conf)
msg(sj("Done building ", darkyellow(conf.md_output_path)))
if conf.asset_repo.is_local:
msg("No git repo specified")
else:
msg("Pushing ", darkyellow(conf.output_dir), " to ", darkyellow(
conf.asset_repo.url), " branch: ", darkyellow(conf.asset_repo.branch))
conf.push()
msg(green(sj("Great job!!!")))
if __name__ == "__main__":
main()
|
1644327
|
class Solution:
def isValid(self, s):
brackets_stack, lefts, rights = [], ("(", "[", "{"), (")", "]", "}")
for char in s:
if char in lefts:
brackets_stack.append(char)
elif not brackets_stack or lefts.index(brackets_stack.pop()) != rights.index(char):
return False
return not brackets_stack
|
1644335
|
import bpy
from islpy import *
from islplot.support import *
def make_material(name, color_diffuse, color_specular, alpha):
"""
Create a blender material.
:param name: The name.
:param color_diffuse: The diffuse color.
:param color_specular: The specular color.
:param alpha: The alpha channel.
"""
mat = bpy.data.materials.new(name)
mat.diffuse_color = color_diffuse
mat.diffuse_shader = 'LAMBERT'
mat.diffuse_intensity = 1.0
mat.specular_color = color_specular
mat.specular_shader = 'COOKTORR'
mat.specular_intensity = 0.5
mat.alpha = alpha
mat.ambient = 1
mat.use_transparency = True
return mat
# Define a set of default colors
red = make_material('Red', (1,0.1,0.1), (1,1,1), 1)
green = make_material('Green', (0,1,0), (1,1,1), 1)
blue = make_material('Blue', (0,0.3,1), (1,1,1), 1)
white_trans = make_material('White', (1,1,1), (0.2,0.2,0.2), 0.4)
white = make_material('White', (1,1,1), (1,1,1), 1)
black = make_material('Black', (0,0,0), (1,1,1), 1)
def remove_default_cube():
"""
Remove the cube that is in the blender default scene to get an empty scene.
"""
bpy.data.objects["Cube"].select = True
bpy.ops.object.delete()
def set_lamp():
"""
Set the position of the default lamp.
"""
bpy.data.objects["Lamp"].data.type = 'HEMI'
bpy.data.objects["Lamp"].location = (20,20,20)
def set_camera(location=(34,42,24), rotation=(1.08, 0.013, 2.43)):
"""
Set the location of the default camera.
:param location: The camera's location.
:param rotation: The camera's rotation.
"""
bpy.data.objects["Camera"].location = location
bpy.data.objects["Camera"].rotation_euler = rotation
def set_horizon():
"""
Set the color of the horizon.
"""
bpy.context.scene.world.horizon_color = (1,1,1)
def set_scene():
"""
Prepare the scene for rendering.
"""
remove_default_cube()
set_lamp()
set_camera()
set_horizon()
bpy.context.scene.render.resolution_percentage = 60
def save(filename):
"""
Save the current blender project.
:param filename: The location where to save the file.
"""
bpy.ops.wm.save_as_mainfile(filepath=filename)
def render(filename):
"""
Render the scene to a file.
:param filename: The location where to store the rendered file.
"""
bpy.data.scenes['Scene'].render.filepath = filename
bpy.ops.render.render( write_still=True )
def print_plane(height0=10, height1=10, color=white_trans, dim=0, units=True):
"""
Print a plane.
:param color: The color of the plane.
:param dim: The dimension controls the orientation and location of the plane.
:param units: If units should be marked on the plane.
:param height0: The width of the plane along the first direction.
:param height1: The width of the plane along the second direction.
"""
dim1 = None
if dim == 0:
rotation=(0,0,0)
dim1 = 1
rotation1 = (0,1.5708,0)
dim2 = 0
rotation2 = (1.5708,0,0)
resize=(height1, height0, 1)
if dim == 1:
rotation=(0,1.5708,0)
dim1 = 2
rotation1 = (1.5708,0,0)
dim2 = 1
rotation2 = (0,0,0)
resize=(1, height1, height0)
if dim == 2:
rotation=(1.5708,0,0)
dim1 = 0
rotation1 = (0,0,0)
dim2 = 2
rotation2 = (0,1.5708,0)
resize=(height0, 1, height1)
bpy.ops.mesh.primitive_plane_add(location=(0,0,0), rotation=rotation)
# FIXME: Re-enable resize
#
# This resize operation is necessary to actually form the planes. It
# is disabled as this call currently fails with an invalid context
# error when running directly in python with blender loaded as module.
# This means we currently do not render those planes.
#
# bpy.ops.transform.resize(value=resize)
ob = bpy.context.active_object
ob.data.materials.append(color)
if not units:
return
for i in range(-height0,height0, 1):
if i == 0:
continue
location=[0,0,0]
location[dim1] = i
if i % 5 == 0:
radius = 0.01 * 2
else:
radius = 0.01
bpy.ops.mesh.primitive_cylinder_add(vertices=128,
radius=radius, depth=2*height1, rotation=rotation1,
location=location)
ob = bpy.context.active_object
ob.data.materials.append(black)
for i in range(-height1,height1, 1):
if i == 0:
continue
location=[0,0,0]
location[dim2] = i
if i % 5 == 0:
radius = 0.01 * 2
else:
radius = 0.01
bpy.ops.mesh.primitive_cylinder_add(vertices=128,
radius=radius, depth=2*height0, rotation=rotation2,
location=location)
ob = bpy.context.active_object
ob.data.materials.append(black)
def print_axis(height, color, dim, unit_markers=True, labels=False):
"""
Print the axis of a coordinate system.
:param height: The length of the axis.
:param color: The color of the axis.
:param dim: The dimension for which the axis is printed.
:param unit_marks: If unit markers should be plotted.
:param labels: The labels that sould be printed next to the axis
"""
if dim == 2:
rotation = (0,0,0)
if dim == 1:
rotation = (-1.5708,0,0)
if dim == 0:
rotation = (0, 1.5708, 0)
bpy.ops.mesh.primitive_cylinder_add(vertices=128, radius=0.1,
depth=2 * (height+1), rotation=rotation,
location=(0,0,0))
ob = bpy.context.active_object
ob.data.materials.append(color)
location = [0,0,0]
location[dim] = height+1
bpy.ops.mesh.primitive_cone_add(vertices=128, radius1=0.2, radius2=0, depth=1,
rotation=rotation, location=location)
ob = bpy.context.active_object
ob.data.materials.append(color)
top = ob
if labels != False and (labels == True or labels[dim] != False):
location=[0,0,0]
if dim == 0:
rotation = (1.5708,0, 2 * 1.5708)
location[2] = 0.3
location[1] = 0
location[0] = height - 0.1
if dim == 1:
location[1] = height - 0.8
location[2] = 0.3
location[0] = 0
rotation = (1.5708,0,1.5708)
if dim == 2:
rotation = (1.5708,0, 2 * 1.5708)
location[2] = height - 1.2
location[1] = 0
location[0] = -0.5
bpy.ops.object.text_add(enter_editmode=True, location = location,
rotation=rotation)
bpy.ops.font.delete()
if labels[dim] == False:
text = "i%d" % dim
else:
text = labels[dim]
bpy.ops.font.text_insert(text=text)
ob = bpy.context.active_object
ob.data.size = 2
ob.data.materials.append(color)
if dim == 2:
rotation = (-1.5708,0,0)
rotation = (0,0,0)
if dim == 1:
rotation = (-1.5708,0,0)
if dim == 0:
rotation = (0, 1.5708, 0)
if unit_markers:
for i in range(-height,height+1, 1):
location = [0, 0, 0]
location[dim] = i
if i % 5 == 0:
depth = 2 * 0.05
else:
depth = 0.05
bpy.ops.mesh.primitive_cylinder_add(vertices=128,
radius=0.105, depth=depth, rotation=rotation,
location=location)
ob = bpy.context.active_object
ob.data.materials.append(white)
return top
def add_coordinate_system(size=[10,10,10], print_planes=[True, False, False],
unit_markers=True, labels=False):
"""
Plot a coordinate system.
:param size: The size of the coordinate system along the different
dimensions.
:param print_plans: Either a single boolean value that enables or disables
the printing of all planes or a vector of booleans that
enables each plane individually.
:param unit_markers: If unit markers should be printed on the plans and
axis.
"""
axis = []
dim = 0
a = print_axis(size[2], black, dim, unit_markers, labels)
axis.append(a)
dim = 1
a = print_axis(size[1], black, dim, unit_markers, labels)
axis.append(a)
dim = 2
a = print_axis(size[0], black, dim, unit_markers, labels)
axis.append(a)
if print_planes != False:
if (print_planes == True or print_planes[0] == True):
print_plane(size[1], size[2], dim=0)
if (print_planes == True or print_planes[1] == True):
print_plane(size[0], size[1], dim=1)
if (print_planes == True or print_planes[2] == True):
print_plane(size[2], size[0], dim=2)
return axis
def print_line(start, end):
"""
Print a line between two points.
"""
if not "islplot-tmp-line-a" in bpy.data.objects:
"""
We only construct a sphere once and then copy subsequent spheres from
this one. This speeds up blender, as we avoid the additional checking
normally performed by the bpy.ops.mesh.* functions.
"""
bpy.ops.mesh.primitive_uv_sphere_add(segments=1, ring_count=1,
size=0.01, view_align=False, enter_editmode=False,
location=(0,0,0), rotation=(0,0,0), layers=(True, False,
False, False, False, False, False, False, False,
False, False, False, False, False, False,
False, False, False, False, False))
A = bpy.context.active_object
A.name = "islplot-tmp-line-a"
else:
A = bpy.data.objects["islplot-tmp-line-a"]
if not "islplot-tmp-line-b" in bpy.data.objects:
"""
We only construct a sphere once and then copy subsequent spheres from
this one. This speeds up blender, as we avoid the additional checking
normally performed by the bpy.ops.mesh.* functions.
"""
bpy.ops.mesh.primitive_uv_sphere_add(segments=1, ring_count=1,
size=0.01, view_align=False, enter_editmode=False,
location=(0,0,0), rotation=(0,0,0), layers=(True, False,
False, False, False, False, False, False, False,
False, False, False, False, False, False,
False, False, False, False, False))
B = bpy.context.active_object
B.name = "islplot-tmp-line-b"
else:
B = bpy.data.objects["islplot-tmp-line-b"]
A.location = start
B.location = end
l = [A,B]
draw_curve = bpy.data.curves.new('draw_curve','CURVE')
draw_curve.dimensions = '3D'
spline = draw_curve.splines.new('BEZIER')
spline.bezier_points.add(len(l)-1)
curve = bpy.data.objects.new('curve',draw_curve)
bpy.context.scene.objects.link(curve)
# Curve settings for new curve
draw_curve.resolution_u = 64
draw_curve.fill_mode = 'FULL'
draw_curve.bevel_depth = 0.02
draw_curve.bevel_resolution = 0.02
# Assign bezier points to selection object locations
for i in range(len(l)):
p = spline.bezier_points[i]
p.co = l[i].location
p.handle_right_type="VECTOR"
p.handle_left_type="VECTOR"
curve.data.materials.append(black)
def print_face_borders(vertices, faces):
"""
Print lines along the edges of a set of faces.
:param faces: The faces for which to print the edges.
:param vertices: The locations of the vertices.
"""
for face in faces:
for i in range(len(face)):
a = vertices[face[i]]
b = vertices[face[(i+1)%len(face)]]
print_line(a, b)
def print_sphere(location):
"""
Print a sphere at a given location.
:param location: The location of the sphere.
"""
if not "islplot-tmp-sphere" in bpy.data.objects:
"""
We only construct a sphere once and then copy subsequent spheres from
this one. This speeds up blender, as we avoid the additional checking
normally performed by the bpy.ops.mesh.* functions.
"""
bpy.ops.mesh.primitive_uv_sphere_add(segments=8, ring_count=8,
size=0.1, view_align=False, enter_editmode=False,
location=(0,0,0), rotation=(0,0,0), layers=(True, False,
False, False, False, False, False, False, False,
False, False, False, False, False, False,
False, False, False, False, False))
sphere = bpy.context.active_object
sphere.name = "islplot-tmp-sphere"
sphere.select = False
sphere.data.materials.append(black)
else:
sphere = bpy.data.objects["islplot-tmp-sphere"]
l = location
ob = sphere.copy()
ob.name = "Sphere (%d, %d, %d)" % (l[0], l[1], l[2])
ob.location = l
ob.data = sphere.data.copy()
bpy.context.scene.objects.link(ob)
return ob
def plot_bset_shape(bset_data, name, material, borders=True):
"""
Given an basic set, plot the shape formed by the constraints that define
the basic set.
:param bset_data: The basic set to plot.
:param name: The name the resulting mesh should have.
:param material: The material to use for the faces.
:param borders: Printer bordes of the shape.
"""
vertices, faces = get_vertices_and_faces(bset_data)
if borders:
print_face_borders(vertices, faces)
bpy.ops.object.add(type='MESH')
ob = bpy.context.object
ob.name = name
me = ob.data
me.from_pydata(vertices, [], faces)
me.materials.append(material)
me.update()
ob.location[0] = 0
ob.location[1] = 0
ob.location[2] = 0
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='MEDIAN')
return ob
def plot_set_points(set_data):
points = bset_get_points(set_data, only_hull=True)
for point in points:
s = print_sphere(point)
def plot_bset(bset_data, color, name, add_spheres=True, borders=True):
tile = plot_bset_shape(bset_data, name, color, borders)
if add_spheres:
plot_set_points(bset_data)
bpy.context.scene.update()
return tile
def plot_all(schedule, dimensions_to_visualize, add_spheres=False, borders=True,
get_color=None):
"""
Given a schedule, we print the individual tiles.
TODO: This is just a quick hack. This code should be shared between the
different renderers.
:param schedule: The schedule to visualize.
:parma dimensions_to_visualize: The map used to define the dimensions that
will be visualized.
:param add_spheres: Print the individual elements in the set.
:param getColor: A function that takes a tile id and returns the
corresponding tile color.
"""
tileIDSet = schedule.range()
tileIDs = []
tileIDSet.foreach_point(tileIDs.append)
tileIDs = sort_points(tileIDs)
for tileID in tileIDs:
tileIDSet = Set.from_point(tileID)
tileSet = schedule.intersect_range(tileIDSet).domain()
tileSet = tileSet.apply(dimensions_to_visualize)
assert tileSet == tileSet.convex_hull()
tileSet = tileSet.convex_hull()
color = get_color(tileID)
name = "Tile " + str(get_point_coordinates(tileID))
plot_bset(tileSet, color, name, add_spheres, borders)
|
1644350
|
from setuptools import setup, Extension
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='int8mm',
ext_modules=[
CUDAExtension('int8mm_cuda',
['int8mm.cpp',
'int8mm_kernel.cu',],
library_dirs=['/usr/lib/x86_64-linux-gnu'],
include_dirs=['/niti/pytorch/Common'],
libraries=['cublas'])
],
cmdclass={
'build_ext': BuildExtension
})
|
1644367
|
import FWCore.ParameterSet.Config as cms
process = cms.Process('MERGEDQM')
process.load('Configuration.EventContent.EventContent_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
processingMode = cms.untracked.string('RunsAndLumis'),
fileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet()
# Output definition
process.output = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
outputCommands = process.DQMEventContent.outputCommands,
fileName = cms.untracked.string(''),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('')
)
)
process.DQMoutput_step = cms.EndPath(process.output)
|
1644378
|
import os
from os.path import isfile, isdir, getmtime, dirname, splitext, getsize
from tempfile import mkstemp
from shutil import copyfile
try:
from PIL import Image, ImageFilter
except:
import Image, ImageFilter
from sorl.thumbnail import defaults
from sorl.thumbnail.processors import get_valid_options, dynamic_import
class ThumbnailException(Exception):
# Stop Django templates from choking if something goes wrong.
silent_variable_failure = True
class Thumbnail(object):
imagemagick_file_types = defaults.IMAGEMAGICK_FILE_TYPES
def __init__(self, source, requested_size, opts=None, quality=85,
dest=None, convert_path=defaults.CONVERT,
wvps_path=defaults.WVPS, processors=None):
# Paths to external commands
self.convert_path = convert_path
self.wvps_path = wvps_path
# Absolute paths to files
self.source = source
self.dest = dest
# Thumbnail settings
try:
x, y = [int(v) for v in requested_size]
except (TypeError, ValueError):
raise TypeError('Thumbnail received invalid value for size '
'argument: %s' % repr(requested_size))
else:
self.requested_size = (x, y)
try:
self.quality = int(quality)
if not 0 < quality <= 100:
raise ValueError
except (TypeError, ValueError):
raise TypeError('Thumbnail received invalid value for quality '
'argument: %r' % quality)
# Processors
if processors is None:
processors = dynamic_import(defaults.PROCESSORS)
self.processors = processors
# Handle old list format for opts.
opts = opts or {}
if isinstance(opts, (list, tuple)):
opts = dict([(opt, None) for opt in opts])
# Set Thumbnail opt(ion)s
VALID_OPTIONS = get_valid_options(processors)
for opt in opts:
if not opt in VALID_OPTIONS:
raise TypeError('Thumbnail received an invalid option: %s'
% opt)
self.opts = opts
if self.dest is not None:
self.generate()
def generate(self):
"""
Generates the thumbnail if it doesn't exist or if the file date of the
source file is newer than that of the thumbnail.
"""
# Ensure dest(ination) attribute is set
if not self.dest:
raise ThumbnailException("No destination filename set.")
if not isinstance(self.dest, basestring):
# We'll assume dest is a file-like instance if it exists but isn't
# a string.
self._do_generate()
elif not isfile(self.dest) or (self.source_exists and
getmtime(self.source) > getmtime(self.dest)):
# Ensure the directory exists
directory = dirname(self.dest)
if directory and not isdir(directory):
os.makedirs(directory)
self._do_generate()
def _check_source_exists(self):
"""
Ensure the source file exists. If source is not a string then it is
assumed to be a file-like instance which "exists".
"""
if not hasattr(self, '_source_exists'):
self._source_exists = (self.source and
(not isinstance(self.source, basestring) or
isfile(self.source)))
return self._source_exists
source_exists = property(_check_source_exists)
def _get_source_filetype(self):
"""
Set the source filetype. First it tries to use magic and
if import error it will just use the extension
"""
if not hasattr(self, '_source_filetype'):
if not isinstance(self.source, basestring):
# Assuming a file-like object - we won't know it's type.
return None
try:
import magic
except ImportError:
self._source_filetype = splitext(self.source)[1].lower().\
replace('.', '').replace('jpeg', 'jpg')
else:
m = magic.open(magic.MAGIC_NONE)
m.load()
ftype = m.file(self.source)
if ftype.find('Microsoft Office Document') != -1:
self._source_filetype = 'doc'
elif ftype.find('PDF document') != -1:
self._source_filetype = 'pdf'
elif ftype.find('JPEG') != -1:
self._source_filetype = 'jpg'
else:
self._source_filetype = ftype
return self._source_filetype
source_filetype = property(_get_source_filetype)
# data property is the image data of the (generated) thumbnail
def _get_data(self):
if not hasattr(self, '_data'):
try:
self._data = Image.open(self.dest)
except IOError, detail:
raise ThumbnailException(detail)
return self._data
def _set_data(self, im):
self._data = im
data = property(_get_data, _set_data)
# source_data property is the image data from the source file
def _get_source_data(self):
if not hasattr(self, '_source_data'):
if not self.source_exists:
raise ThumbnailException("Source file: '%s' does not exist." %
self.source)
if self.source_filetype == 'doc':
self._convert_wvps(self.source)
elif self.source_filetype in self.imagemagick_file_types:
self._convert_imagemagick(self.source)
else:
self.source_data = self.source
return self._source_data
def _set_source_data(self, image):
if isinstance(image, Image.Image):
self._source_data = image
else:
try:
self._source_data = Image.open(image)
except IOError, detail:
raise ThumbnailException("%s: %s" % (detail, image))
except MemoryError:
raise ThumbnailException("Memory Error: %s" % image)
source_data = property(_get_source_data, _set_source_data)
def _convert_wvps(self, filename):
try:
import subprocess
except ImportError:
raise ThumbnailException('wvps requires the Python 2.4 subprocess '
'package.')
tmp = mkstemp('.ps')[1]
try:
p = subprocess.Popen((self.wvps_path, filename, tmp),
stdout=subprocess.PIPE)
p.wait()
except OSError, detail:
os.remove(tmp)
raise ThumbnailException('wvPS error: %s' % detail)
self._convert_imagemagick(tmp)
os.remove(tmp)
def _convert_imagemagick(self, filename):
try:
import subprocess
except ImportError:
raise ThumbnailException('imagemagick requires the Python 2.4 '
'subprocess package.')
tmp = mkstemp('.png')[1]
if 'crop' in self.opts or 'autocrop' in self.opts:
x, y = [d * 3 for d in self.requested_size]
else:
x, y = self.requested_size
try:
p = subprocess.Popen((self.convert_path, '-size', '%sx%s' % (x, y),
'-antialias', '-colorspace', 'rgb', '-format', 'PNG24',
'%s[0]' % filename, tmp), stdout=subprocess.PIPE)
p.wait()
except OSError, detail:
os.remove(tmp)
raise ThumbnailException('ImageMagick error: %s' % detail)
self.source_data = tmp
os.remove(tmp)
def _do_generate(self):
"""
Generates the thumbnail image.
This a semi-private method so it isn't directly available to template
authors if this object is passed to the template context.
"""
im = self.source_data
for processor in self.processors:
im = processor(im, self.requested_size, self.opts)
self.data = im
filelike = not isinstance(self.dest, basestring)
if not filelike:
dest_extension = os.path.splitext(self.dest)[1][1:]
format = None
else:
dest_extension = None
format = 'JPEG'
if (self.source_filetype and self.source_filetype == dest_extension and
self.source_data == self.data):
copyfile(self.source, self.dest)
else:
try:
im.save(self.dest, format=format, quality=self.quality,
optimize=1)
except IOError:
# Try again, without optimization (PIL can't optimize an image
# larger than ImageFile.MAXBLOCK, which is 64k by default)
try:
im.save(self.dest, format=format, quality=self.quality)
except IOError, detail:
raise ThumbnailException(detail)
if filelike:
self.dest.seek(0)
# Some helpful methods
def _dimension(self, axis):
if self.dest is None:
return None
return self.data.size[axis]
def width(self):
return self._dimension(0)
def height(self):
return self._dimension(1)
def _get_filesize(self):
if self.dest is None:
return None
if not hasattr(self, '_filesize'):
self._filesize = getsize(self.dest)
return self._filesize
filesize = property(_get_filesize)
def _source_dimension(self, axis):
if self.source_filetype in ['pdf', 'doc']:
return None
else:
return self.source_data.size[axis]
def source_width(self):
return self._source_dimension(0)
def source_height(self):
return self._source_dimension(1)
def _get_source_filesize(self):
if not hasattr(self, '_source_filesize'):
self._source_filesize = getsize(self.source)
return self._source_filesize
source_filesize = property(_get_source_filesize)
|
1644415
|
from collections import OrderedDict
import pytest
from .model import PublishedApplication
class TestPublishedApplicationFullText:
def test_fetch_publication(self):
pub_no = 20_160_009_839
pub = PublishedApplication.objects.get(pub_no)
assert (
pub.title
== "POLYMER PRODUCTS AND MULTI-STAGE POLYMERIZATION PROCESSES FOR THE PRODUCTION THEREOF"
)
assert len(pub.abstract) == 651
def test_publication_claims(self):
pub_no = 20_160_009_839
pub = PublishedApplication.objects.get(pub_no)
assert len(pub.parsed_claims) == 25
# Test Claim 1
claim_1 = pub.parsed_claims[0]
print(claim_1.text)
assert claim_1.number == 1
assert len(claim_1.text) == 487
assert len(claim_1.limitations) == 3
assert claim_1.independent
# Test Dependent Claim 2
claim_2 = pub.parsed_claims[1]
assert claim_2.number == 2
assert claim_2.dependent
def test_us_pub_20170370151(self):
pub_no = 20_170_370_151
pub = PublishedApplication.objects.get(pub_no)
for i in range(6):
assert pub.parsed_claims[i].text == f"{i+1}. (canceled)"
assert (
"A system to control directional drilling in borehole drilling for"
in pub.parsed_claims[6].text
)
def test_search_classification(self):
query = "CCL/166/308.1 AND APD/19000101->20121005"
results = PublishedApplication.objects.filter(query=query)
assert len(results) == 493
assert results[50].publication_number == "20130199782"
assert len(list(results[:15])) == 15
counter = 0
for i in results:
counter += 1
assert counter == 493
def test_empty_search_result(self):
query = "CCL/726/22 AND APD/19000101->20000619"
results = PublishedApplication.objects.filter(query=query)
assert len(results) == 0
counter = 0
for i in results:
counter += 1
assert counter == 0
def test_nonstandard_claim_format(self):
obj = PublishedApplication.objects.get("20170260839")
assert obj.parsed_claims[0].text[:39] == "1. A method of well ranging comprising:"
def test_can_get_images(self):
pat = PublishedApplication.objects.get("20090150362")
images = pat.images
assert images.pdf_url == 'https://pdfaiw.uspto.gov/fdd/62/2009/03/015/0.pdf'
assert images.sections == {'Front Page': (1, 1), 'Drawings': (2, 12), 'Specifications': (13, 23), 'Claims': (24, 24)}
|
1644417
|
import numpy as np
import cv2
from os import path as osp
from tqdm import tqdm
import shutil
import sys
def imwrite(path, img):
img = (img[:, :, [2,1,0]] * 255.0).round().astype(np.uint8)
cv2.imwrite(path, img)
def imread(path):
img = cv2.imread(path)
return img[:, :, [2, 1, 0]]
def tmap(x):
'''
Tone mapping algorithm. Refered to as simple tone-mapped domain.
'''
return x / (x + 0.25)
def ccm_info_get(ccm_txt):
with open(ccm_txt) as fi:
for line in fi:
(key, val) = line.split(':')
val_list = val.split()
ccm = [np.float32(v) for v in val_list]
ccm = np.array(ccm)
ccm = ccm.reshape((3, 3))
return ccm
def ccmProcess_rgb(img, ccm):
'''
Input images are in RGB domain.
'''
new_img = img.copy()
new_img[:,:,0] = ccm[0,0] * img[:,:,0] + ccm[0,1]* img[:,:,1] + \
ccm[0,2] * img[:,:,2]
new_img[:,:,1] = ccm[1,0] * img[:,:,0] + ccm[1,1]* img[:,:,1] + \
ccm[1,2] * img[:,:,2]
new_img[:,:,2] = ccm[2,0] * img[:,:,0] + ccm[2,1]* img[:,:,1] + \
ccm[2,2] * img[:,:,2]
return new_img
def cc_img(img, ccm):
'''
Color correct an image given corresponding matrix.
Assume images in linear domain.
'''
# clip to fit ZTE sensor
img = np.clip(img, 0, 16.0)
img_cc = ccmProcess_rgb(img, ccm)
img_cc = np.clip(img_cc, 0, 16)
return img_cc
def WB(img, ref):
'''
Simple white balance algorithm to copy color from reference image.
Assume both images range [0, 1].
'''
balanced_img = np.zeros_like(img, dtype=np.float32)
for c in range(3):
balanced_img[:, :, c] = img[:, :, c] / img[:, :, c].sum() * ref[:, :, c].sum()
balanced_img = np.clip(balanced_img, 0, 1)
return balanced_img
def simple_to_linear(img, linear_max=500):
'''
From simple tone-mapped domain to linear domain.
'''
img = np.clip(img, 0, tmap(linear_max))
img = img / (4 * (1-img))
return img
def linear_to_gamma(img, linear_max=12):
A = 1 / linear_max**(1/2.8)
img = np.clip(img, 0, linear_max)
img = A*(img**(1/2.8))
return img
def contrast(img, limit=1.0):
'''
Apply contrast enhancement. Tune argument "limit" to adjust.
'''
img = (img[:, :, [2,1,0]] * 255.0).round().astype(np.uint8)
clahe = cv2.createCLAHE(clipLimit=limit, tileGridSize=(8,8))
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) # convert from BGR to LAB color space
l, a, b = cv2.split(lab) # split on 3 different channels
l2 = clahe.apply(l) # apply CLAHE to the L-channel
lab = cv2.merge((l2,a,b)) # merge channels
img2 = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) # convert from LAB to BGR
return img2[:, :, [2,1,0]] / 255.0
|
1644462
|
from django.db import models
class Parent(models.Model):
name = models.TextField()
class Child(models.Model):
parent = models.ForeignKey(
Parent,
on_delete=models.CASCADE,
related_name='children',
)
name = models.TextField()
|
1644491
|
class Solution:
def findContentChildren(self, g: List[int], s: List[int]) -> int:
g.sort()
s.sort()
satisfiedChildren, childIndex, cookieIndex = 0, 0, 0
while childIndex < len(g) and cookieIndex < len(s):
if s[cookieIndex] >= g[childIndex]:
satisfiedChildren += 1
childIndex += 1
cookieIndex += 1
return satisfiedChildren
|
1644561
|
from horovod.runner.driver.driver_service import (_run_probe,
HorovodRunDriverService)
def _actor_launch_task_servers(*, node_actors, num_hosts, driver_addresses,
settings):
for index, w in enumerate(node_actors):
def execute_task_fn(_):
from horovod.runner.task_fn import _task_fn
_task_fn(index, num_hosts, driver_addresses, settings)
w.execute.remote(execute_task_fn)
def _driver_fn(node_actors, all_host_names, local_host_names, settings):
"""Probes routable nics across all hostnames.
Assumes the task service on each worker has already started.
Have them register with the driver service.
Launches the driver service. Each worker probes all the
interfaces of the worker index + 1 (in a ring manner) and
only keeps the routed interfaces.
Returns the intersection of the set of all the routed interfaces
on all the workers.
:param all_host_names: list of addresses. for example,
['worker-0','worker-1']
['10.11.11.11', '10.11.11.12']
:type all_host_names: list(string)
:param local_host_names: host names that resolve into a local addresses.
:type local_host_names: set
:param settings: the object that contains the setting for running horovod
:type settings: horovod.runner.common.util.settings.Settings
:return: example: ['eth0', 'eth1']
:rtype: list[string]
"""
# Launch a TCP server called driver service on the host running horovod
num_hosts = len(all_host_names)
driver = HorovodRunDriverService(num_hosts, settings.key, settings.nics)
if settings.verbose >= 2:
print('Launched horovod server.')
# Have all the workers register themselves with the service service.
if len(node_actors) != num_hosts:
raise ValueError(f"Number of node actors ({len(node_actors)}) "
f"must match num_hosts ({num_hosts}).")
_actor_launch_task_servers(
node_actors=node_actors,
num_hosts=len(all_host_names),
driver_addresses=driver.addresses(),
settings=settings)
if settings.verbose >= 2:
print('Attempted to launch horovod task servers.')
try:
return _run_probe(driver, settings, num_hosts)
finally:
driver.shutdown()
|
1644647
|
from django.apps import AppConfig
from django.utils.module_loading import autodiscover_modules
from django.utils.translation import gettext_lazy as _
from .settings import MODULE_INIT_DEFAULT
class UwsgifyConfig(AppConfig):
name = 'uwsgiconf.contrib.django.uwsgify'
verbose_name = _('uWSGI Integration')
def ready(self):
try:
import uwsgi
# This will handle init modules discovery for non-embedded.
# Part for embedding is done in toolbox.SectionMutator.mutate
# via in master process import.
autodiscover_modules(MODULE_INIT_DEFAULT)
except ImportError:
pass
|
1644683
|
import unittest
import os
import numpy as np
from rastervision.core.data import RasterStats, StatsTransformerConfig
from rastervision.pipeline import rv_config
class TestRasterTransformer(unittest.TestCase):
def test_stats_transformer(self):
raster_stats = RasterStats()
raster_stats.means = list(np.ones((4, )))
raster_stats.stds = list(np.ones((4, )) * 2)
with rv_config.get_tmp_dir() as tmp_dir:
stats_uri = os.path.join(tmp_dir, 'stats.json')
raster_stats.save(stats_uri)
# All values have z-score of 1, which translates to
# uint8 value of 170.
transformer = StatsTransformerConfig(stats_uri=stats_uri).build()
chip = np.ones((2, 2, 4)) * 3
out_chip = transformer.transform(chip)
expected_out_chip = np.ones((2, 2, 4)) * 170
np.testing.assert_equal(out_chip, expected_out_chip)
if __name__ == '__main__':
unittest.main()
|
1644700
|
from typing import Tuple, Callable, Union, Any
import numpy as np
import torch
from torch.nn import Module, Parameter
from netlens.image_proc import IMAGENET_MEAN, IMAGENET_STD
class RawParam(Module):
"""
A raw 'parameterized image', that just wraps a normal tensor.
This has to be the first layer in the network. It wraps the input and is differentiable
"""
def __init__(self, input: torch.Tensor, cloned: bool = True):
super().__init__()
self.param = Parameter(input.clone().detach().requires_grad_() if cloned else input)
def forward(self):
return self.param
def __repr__(self):
return f'{self.__class__.__name__}: {self.param.shape}'
# Decorrelation code ported from Lucid: https://github.com/tensorflow/lucid
color_correlation_svd_sqrt = np.asarray([[0.26, 0.09, 0.02],
[0.27, 0.00, -0.05],
[0.27, -0.09, 0.03]]).astype("float32")
max_norm_svd_sqrt = np.max(np.linalg.norm(color_correlation_svd_sqrt, axis=0))
def _get_default_device():
return 'cuda' if torch.cuda.is_available() else 'cpu'
def _linear_decorrelate_color(t: torch.Tensor) -> torch.Tensor:
"""Multiply input by sqrt of empirical (ImageNet) color correlation matrix.
If you interpret t's innermost dimension as describing colors in a
decorrelated version of the color space (which is a very natural way to
describe colors -- see discussion in Feature Visualization article) the way
to map back to normal colors is multiply the square root of your color
correlations.
"""
assert t.shape[0] == 1 # must be (N,C,W,H)
t_flat = t.squeeze(0).view((3, -1))
color_correlation_normalized = torch.tensor(color_correlation_svd_sqrt / max_norm_svd_sqrt, device=t.device)
t_flat = color_correlation_normalized @ t_flat
t = t_flat.view(t.shape)
return t
def rfft2d_freqs(h: int, w: int) -> np.ndarray:
"""Computes 2D spectrum frequencies."""
fy = np.fft.fftfreq(h)[:, None]
fx = np.fft.fftfreq(w)
return np.sqrt(fx * fx + fy * fy)
def _assert_image_param_inputs(im_initial: torch.Tensor, size: Tuple[int, int]):
assert (im_initial is not None) ^ (size is not None), "Exactly one of 'im_initial' or 'size' has to be specified."
if im_initial is not None:
assert im_initial.dim() == 4 and im_initial.shape[:2] == (1, 3), "The image must be of shape (1,3,H,W)"
size = im_initial.shape[2:]
device = im_initial.device
else:
device = _get_default_device()
return size, device
def fourier_image(im_initial: torch.Tensor = None, size: Tuple[int, int] = None, spectrum_scale: float = 0.01, decay_power: float = 1.0) \
-> Tuple[torch.Tensor, Callable]:
"""
Image initialized in the Fourier domain
"""
size, device = _assert_image_param_inputs(im_initial, size)
# this is needed to compute only once
freqs = rfft2d_freqs(*size)
scale = 1.0 / np.maximum(freqs, 1.0 / max(*size)) ** decay_power
scale *= np.sqrt(size[0] * size[1])
scale = torch.tensor(scale, dtype=torch.float32, device=device)
def _get_spectrum(_im):
scaled_spectrum_t = torch.rfft(_im.squeeze(0), signal_ndim=2, onesided=False)
return scaled_spectrum_t / scale[None, ..., None]
def _get_image(_spectrum):
scaled_spectrum_t = scale[None, ..., None] * _spectrum
return torch.irfft(scaled_spectrum_t, signal_ndim=2, onesided=False, signal_sizes=size).unsqueeze(0)
if im_initial is not None:
spectrum = _get_spectrum(im_initial.clone().detach()).detach()
else:
spectrum = (spectrum_scale * torch.randn((3, *freqs.shape, 2))).to(device) # dimensions: (C,W,H,Re/Im)
return spectrum, _get_image
def random_image(im_initial: torch.Tensor = None, size: Tuple[int, int] = None, sd: float = 0.5) -> Tuple[torch.Tensor, Callable]:
"""
Create a random 'image' from a normal distribution
"""
size, device = _assert_image_param_inputs(im_initial, size)
if im_initial is not None:
im = im_initial.clone().detach()
else:
im = torch.randn(1, 3, *size, device=device) * sd
return im, lambda x: x
class ImageParam(Module):
"""Class to create a parameterized image.
Parameters:
size: size of image, can be a tuple or an integer. If it's a tuple, the image will be square.
fft (bool): parameterize the image in the Fourier domain.
decorrelate (bool): decorrelate the colours of the image.
sigmoid (bool): apply sigmoid after decorrelation to ensure values are in range(0,1)
kwargs: passed on to the image function fourier_image or random_im.
"""
def __init__(self, im_initial: torch.Tensor = None, size: Union[int, Tuple[int, int]] = None, fft: bool = True, decorrelate: bool = True,
sigmoid: bool = True, norm_stats: Tuple[Any, Any] = (IMAGENET_MEAN, IMAGENET_STD), **kwargs):
super().__init__()
self.fft = fft
self.decorrelate = decorrelate
self.sigmoid = sigmoid
self.norm_stats = norm_stats
im_func = fourier_image if fft else random_image
size = (size, size) if isinstance(size, int) else size
self.param, self.get_image = im_func(im_initial, size, **kwargs)
self.param = Parameter(self.param)
def forward(self):
im = self.get_image(self.param)
if self.decorrelate:
im = _linear_decorrelate_color(im)
im = torch.sigmoid(im) if self.sigmoid else im.clamp(min=0.0, max=1.0)
return self.normalize(im)
def normalize(self, im):
if self.norm_stats is None:
return im
mean = torch.as_tensor(self.norm_stats[0], dtype=im.dtype, device=im.device)
std = torch.as_tensor(self.norm_stats[1], dtype=im.dtype, device=im.device)
return im.sub(mean[:, None, None]).div(std[:, None, None])
def denormalize(self, im):
if self.norm_stats is None:
return im
mean = torch.as_tensor(self.norm_stats[0], dtype=im.dtype, device=im.device)
std = torch.as_tensor(self.norm_stats[1], dtype=im.dtype, device=im.device)
return im.mul(std[:, None, None]).add(mean[:, None, None]).clamp(min=0.0, max=1.0)
def __repr__(self):
return f"{self.__class__.__name__}: {self.size}px, fft={self.fft}, decorrelate={self.decorrelate}"
|
1644710
|
from django.apps import AppConfig
class Class26Config(AppConfig):
name = 'tutorials.class_26'
|
1644720
|
inp = int(input("Enter number of test cases: "))
n = []
k = []
for i in range(0 , inp):
m = input("Enter X and Y: ")
s , o = m.split()
n.append(s)
k.append(o)
n = [int(i) for i in n]
k = [int(i) for i in k]
for i in range(0 , inp):
if k[i] > n[i]:
print("Destination unreachable")
else:
print(n[i])
|
1644767
|
class Solution:
def validWordAbbreviation(self, word: str, abbr: str) -> bool:
i = num = 0
for c in abbr:
if c.isdigit():
num = num * 10 + ord(c) - ord('0')
if num == 0:
return False
else:
i += num
if i >= len(word) or word[i] != c:
return False
num = 0
i += 1
i += num
return i == len(word)
|
1644799
|
import sys
import json
sys.path.append('../src')
from classifier import Classifier
clf = Classifier()
f = open('nsfc_test.json', 'r', encoding='utf-8')
s = f.read()
j = json.loads(s, encoding='utf-8')
data = j['nsfc']
for level in [1, 2, 3]:
cnt = 0
top1 = 0
top5 = 0
length = level * 2 + 1
for item in data:
if len(item['sid']) < length:
continue
subject = clf.classify([item['title']], level=level, lang_zh=True)
if subject == {}:
continue
cnt += 1
if subject['level{}'.format(level)][0]['code'] == item['sid'][0:length]:
top1 += 1
for ret in subject['level{}'.format(level)]:
if ret['code'] == item['sid'][0:length]:
top5 += 1
break
print('level', level, ':', top1/cnt, ' ', top5/cnt, ' ', cnt)
|
1644854
|
from asposewords import Settings
from com.aspose.words import Document
from com.aspose.words import DocumentBuilder
class HelloWorld:
def __init__(self):
dataDir = Settings.dataDir + 'quickstart/'
doc = Document()
builder = DocumentBuilder(doc)
builder.writeln('Hello World!')
doc.save(dataDir + 'HelloWorld.docx')
print "Document saved."
if __name__ == '__main__':
HelloWorld()
|
1644880
|
import logging
import sys
from typing import List
from core.logging import InterceptHandler
from loguru import logger
from starlette.config import Config
from starlette.datastructures import CommaSeparatedStrings, Secret
config = Config(".env")
API_PREFIX = "/api"
VERSION = "{{cookiecutter.version}}"
DEBUG: bool = config("DEBUG", cast=bool, default=False)
MAX_CONNECTIONS_COUNT: int = config("MAX_CONNECTIONS_COUNT", cast=int, default=10)
MIN_CONNECTIONS_COUNT: int = config("MIN_CONNECTIONS_COUNT", cast=int, default=10)
SECRET_KEY: Secret = config("SECRET_KEY", cast=Secret, default="")
PROJECT_NAME: str = config("PROJECT_NAME", default="{{cookiecutter.project_name}}")
# logging configuration
LOGGING_LEVEL = logging.DEBUG if DEBUG else logging.INFO
logging.basicConfig(
handlers=[InterceptHandler(level=LOGGING_LEVEL)], level=LOGGING_LEVEL
)
logger.configure(handlers=[{"sink": sys.stderr, "level": LOGGING_LEVEL}])
MODEL_PATH = config("MODEL_PATH", default="{{cookiecutter.machine_learn_model_path}}")
MODEL_NAME = config("MODEL_NAME", default="{{cookiecutter.machine_learn_model_name}}")
|
1644884
|
import numpy as np
# Accessing 1-D
arr = np.array([1, 2, 3, 4])
print(arr[0])
print(arr[2]+arr[3])
# Accessing 2-D
arr2 = np.array([[1,2,3,4,5], [6,7,8,9,10]])
print(arr2[0,1])
print(arr2[1, 3])
#Accessing 3_D
arr3 = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
print(arr3[0, 1, 0])
print(arr3[1, 0, 2])
|
1644885
|
from fastapi import APIRouter
router = APIRouter()
@router.get('/', tags=['skipper'])
def skipper():
return 'Skipper API is running'
|
1644931
|
import os
import json
from ipaddress import IPv4Network
def is_private(string):
""" Check if a network address is private or public """
result = False
result = IPv4Network(string, strict=False).is_private
return result
""" PLUGINS """
def get_plugin_info(plugin_path):
infos = {
"version": "Unknown"
}
try:
with open(plugin_path + '/plugin.json', 'r') as info_file:
infos = json.load(info_file)
except:
pass
return infos
def list_plugins(path):
""" list all plugins """
plugins = []
available_path = path + '/available/'
for root, dirs, files in os.walk(available_path):
for filename in dirs:
plugin_path = available_path + filename
plugin_info = get_plugin_info(plugin_path)
plugin = {
"path": plugin_path,
}
for k,v in plugin_info.items():
plugin[k] = v
plugins.append(plugin)
return plugins
|
1645000
|
from django.urls import path
from .views import HomePageView, InboxView, CreateTaskView, UpdateTask, TaskDetail, DeleteTask,TodayView, CompletedTasksView, TaskReorder
urlpatterns = [
path('', HomePageView.as_view(), name='home'),
path('inbox/', InboxView.as_view(), name='inbox'),
path('add_task/', CreateTaskView.as_view(), name='create_task'),
path('completed_tasks/', CompletedTasksView.as_view(), name='completed_tasks'),
path('update_task/<slug:slug>/', UpdateTask, name='update_task'),
path('task_detail/<slug:slug>/', TaskDetail, name='task_detail'),
path('delete_task/<slug:slug>/', DeleteTask, name='delete_task'),
path('today/', TodayView.as_view(), name='today'),
path('task-reorder/', TaskReorder.as_view(), name='task-reorder'),
]
handler404 = 'pages.views.page_not_found'
handler500 = 'pages.views.server_error'
|
1645074
|
import os
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_MISSED, EVENT_JOB_ERROR
import logging
"""时刻表的父类"""
class BaseScheduler(object):
def __init__(self, name: str):
self.sche = BlockingScheduler()
self.sche.add_listener(self.job_listener, EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_EXECUTED)
self.sche._logger = logging
self.job_logger = logging.getLogger(name=name)
file_handler = logging.FileHandler(filename=os.path.abspath(os.path.dirname(__file__)) + os.sep + "log" + os.sep + name + ".log", mode="a",
encoding="utf-8")
file_handler.setFormatter(
logging.Formatter(fmt="%(asctime)s - %(levelname)s: %(message)s", datefmt='%Y-%m-%d %H:%M:%S'))
file_handler.setLevel(logging.INFO)
self.job_logger.addHandler(file_handler)
logging.basicConfig(format="%(asctime)s - %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
filemode='a')
def job_listener(self, Event):
job = self.sche.get_job(Event.job_id)
if not Event.exception:
self.job_logger.info("jobname=%s|jobtrigger=%s|jobtime=%s|retval=%s", job.name, job.trigger,
Event.scheduled_run_time, Event.retval)
else:
self.job_logger.error("jobname=%s|jobtrigger=%s|errcode=%s|exception=[%s]|traceback=[%s]|scheduled_time=%s",
job.name,
job.trigger, Event.code,
Event.exception, Event.traceback, Event.scheduled_run_time)
|
1645096
|
import flee.flee as flee
import datamanager.handle_refugee_data as handle_refugee_data
import numpy as np
import outputanalysis.analysis as a
"""
Generation 1 code. Incorporates only distance, travel always takes one day.
"""
if __name__ == "__main__":
print("Testing basic data handling and simulation kernel.")
flee.SimulationSettings.MinMoveSpeed=5000.0
flee.SimulationSettings.MaxMoveSpeed=5000.0
flee.SimulationSettings.MaxWalkSpeed=5000.0
e = flee.Ecosystem()
l1 = e.addLocation("A", movechance=1.0)
l2 = e.addLocation("B", movechance=1.0)
l3 = e.addLocation("C1", movechance=1.0)
l4 = e.addLocation("C2", movechance=1.0)
l5 = e.addLocation("D1", movechance=1.0)
l6 = e.addLocation("D2", movechance=1.0)
l7 = e.addLocation("D3", movechance=1.0)
e.linkUp("A","B","10.0")
e.linkUp("A","C1","10.0")
e.linkUp("A","D1","10.0")
e.linkUp("C1","C2","10.0")
e.linkUp("D1","D2","10.0")
e.linkUp("D2","D3","10.0")
e.addAgent(location=l1)
print("Test successful!")
|
1645100
|
def _sign(number):
if number > 0.0:
return 1
elif number < 0.0:
return -1
else:
return 0
def compute_kendall_tau(a, b):
'''
Kendall Tau is a metric to measure the ordinal association between two measured quantities.
Refer to https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient
'''
assert len(a) == len(b), "Sequence a and b should have the same length while computing kendall tau."
length = len(a)
count = 0
total = 0
for i in range(length-1):
for j in range(i+1, length):
count += _sign(a[i] - a[j]) * _sign(b[i] - b[j])
total += 1
tau = count / total
return tau
|
1645106
|
from ghost_log import log
from ghost_tools import GCallException, get_provisioners_config
from .provisioner import FeaturesProvisioner
from .provisioner_ansible import FeaturesProvisionerAnsible
from .provisioner_salt import FeaturesProvisionerSalt
def get_provisioners(config, log_file, unique, job_options, app):
"""
Factory function to instantiate the right implementation Class
:param config: YAML Config object
:param log_file: Log file stream
:param unique: Unique ID
:param job_options: Job parameters
:param app: Ghost Application
:return: a FeaturesProvisioner sub-class object list
"""
ret = []
provisioners_config = get_provisioners_config(config)
# Use skip_salt_bootstrap default value if job options not set.
job_options = job_options or [config.get('skip_provisioner_bootstrap', True)]
for key, provisioner_config in provisioners_config.iteritems():
if key == 'salt':
ret.append(FeaturesProvisionerSalt(log_file, unique, job_options, provisioner_config, config))
elif key == 'ansible':
ret.append(FeaturesProvisionerAnsible(log_file, unique, app['build_infos']['ssh_username'],
provisioner_config, config))
else:
log("Invalid provisioner type. Please check your yaml 'config.yml' file", log_file)
raise GCallException("Invalid features provisioner type")
return ret
|
1645128
|
import pickle
import dnnlib
import dnnlib.tflib as tflib
# pretrained network snapshot urls
gdrive_urls = {
# will be added soon
}
def get_path_or_url(path_or_gdrive_path):
return gdrive_urls.get(path_or_gdrive_path, path_or_gdrive_path)
_cached_networks = dict()
def load_networks(path_or_gdrive_path):
path_or_url = get_path_or_url(path_or_gdrive_path)
if path_or_url in _cached_networks:
return _cached_networks[path_or_url]
if dnnlib.util.is_url(path_or_url):
stream = dnnlib.util.open_url(path_or_url, cache_dir = ".slater-cache")
else:
stream = open(path_or_url, "rb")
tflib.init_tf()
with stream:
G, D, Gs = pickle.load(stream, encoding = "latin1")[:3]
_cached_networks[path_or_url] = G, D, Gs
return G, D, Gs
|
1645136
|
import re
def parse_revision_string(r):
# convert "$Revision: x.y.z $" => "x.y.z"
try:
return re.search('Revision: (.*?)\s*\$',r).group(1)
except:
return "0"
# PickleRevisionControl class. Inherit me.
class PickleRevisionControl(object):
# Important! This line must be put into all subclasses
__revision__ = parse_revision_string("$Revision$")
def __init__(self):
self.__revision__ = self.__revision__
def is_obsolete(self):
return self.__revision__ < self.__class__.__revision__
def __setstate__(self, state):
self.__dict__.update(state)
if self.is_obsolete():
self.upgrade()
self.__revision__ = self.__class__.__revision__
def upgrade(self):
# do your upgrading here.
pass
#-----------------------------------------------------------------
# Example usage scenario
#-----------------------------------------------------------------
# Suppose you have a class "Widget" in Widget.py. One day, after a
# little misunderstanding with NASA, you get a memo declaring that
# all widget measurements will henceforth be in metric units.
#
# Changes to the widget source code go quickly, and you commit Widget.py
# version 1.5 to CVS. Unfortunately, all of your pickled version 1.4
# widgets still use imperial units for everything. But you've been
# using the PickleRevisionControl class all along, so you can fix
# things by adding few lines to Widget.upgrade()
#
#
# class Widget(PickleRevisionControl):
# __revision__ = parse_revision_string("$Revision: 1.5 $")
# def __init__(self):
# PickleRevisionControl.__init__(self)
# ...
#
# def upgrade(self):
# if self.__revision__ < '1.5':
# self.radius *= 0.0254 # inches to meters
# self.height *= 0.0254
#
# ...
#
# Now your old widgets will be magically updated when you unpickle them!
|
1645140
|
from MS import *
from MA import *
from MSV import *
import datetime
def sweep_sv_jumps(parameter_set_manager, dataset_name, run_id, name, desc, sequencer_ids, pack,
out_file=None, silent=False):
#parameter_set_manager.by_name("Number of Threads").set(2)
#parameter_set_manager.by_name("Use all Processor Cores").set(False)
#assert parameter_set_manager.get_num_threads() == 2
AbstractFilter.silent = silent
# creates scope so that destructor of call inserter is triggered (commits insert transaction)
def graph(pool):
analyze = AnalyzeRuntimes()
if not silent:
print("\tsetting up graph...")
pack_pledge = Pledge()
pack_pledge.set(pack)
pool_pledge = Pledge()
pool_pledge.set(pool)
assert len(sequencer_ids) == 1
section_fac = GenomeSectionFactory(parameter_set_manager, pack)
lock_module = Lock(parameter_set_manager)
sweep1 = CompleteBipartiteSubgraphSweep(parameter_set_manager, run_id)
sweep2 = ExactCompleteBipartiteSubgraphSweep(parameter_set_manager)
filter1 = FilterLowSupportShortCalls(parameter_set_manager)
filter2 = FilterFuzzyCalls(parameter_set_manager)
filter5 = FilterDiagonalLineCalls(parameter_set_manager)
filter6 = FilterLowScoreCalls(parameter_set_manager)
call_ambiguity = ComputeCallAmbiguity(parameter_set_manager)
get_call_inserter = GetCallVectorInserter(parameter_set_manager, DbConn(dataset_name), name, desc, run_id)
call_inserter_module = CallVectorInserterModule(parameter_set_manager)
res = VectorPledge()
inserter_vec = []
sections_pledge = promise_me(section_fac) # @note this cannot be in the loop (synchronization!)
# graph for single reads
for _ in parallel_graph(parameter_set_manager.get_num_threads()):
section_pledge = promise_me(lock_module, sections_pledge)
sweep1_pledge = promise_me(sweep1, pool_pledge, section_pledge, pack_pledge)
#analyze.register("CompleteBipartiteSubgraphSweep", sweep1_pledge) this would cause duplicate time
analyze.register("CompleteBipartiteSubgraphSweep::init", sweep1, True,
lambda x: x.cpp_module.time_init / parameter_set_manager.get_num_threads())
analyze.register("CompleteBipartiteSubgraphSweep::outer_while", sweep1, True,
lambda x: (x.cpp_module.time_complete_while - x.cpp_module.time_inner_while) / \
parameter_set_manager.get_num_threads())
analyze.register("CompleteBipartiteSubgraphSweep::inner_while", sweep1, True,
lambda x: x.cpp_module.time_inner_while / parameter_set_manager.get_num_threads())
sweep2_pledge = promise_me(sweep2, sweep1_pledge, pack_pledge)
analyze.register("ExactCompleteBipartiteSubgraphSweep", sweep2_pledge, True)
#### FILTERS ####
#filter1_pledge = promise_me(filter1, sweep2_pledge)
#analyze.register("FilterLowSupportShortCalls", filter1_pledge, True)
#filter2_pledge = promise_me(filter2, filter1_pledge)
#analyze.register("FilterFuzzyCalls", filter2_pledge, True)
#filter3 = ConnectorPatternFilter(parameter_set_manager, sv_db)
#filter3_pledge = promise_me(filter3, filter2_pledge, pack_pledge) # this filter was off already
#analyze.register("[4] ConnectorPatternFilter", filter3_pledge)
#filter3_pledge = promise_me(filter5, filter2_pledge)
#analyze.register("FilterDiagonalLineCalls", filter3_pledge, True)
call_ambiguity_pledge = promise_me(call_ambiguity, sweep2_pledge, pack_pledge)
analyze.register("ComputeCallAmbiguity", call_ambiguity_pledge, True)
#filter6_pledge = promise_me(filter6, call_ambiguity_pledge)
#analyze.register("FilterLowScoreCalls", filter6_pledge, True)
call_inserter = promise_me(get_call_inserter, pool_pledge)
inserter_vec.append(call_inserter)
# filter6_pledge
write_to_db_pledge = promise_me(call_inserter_module, call_inserter, pool_pledge, call_ambiguity_pledge)
analyze.register("CallInserterModule", write_to_db_pledge, True)
unlock_pledge = promise_me(UnLock(parameter_set_manager, section_pledge), write_to_db_pledge)
analyze.register("UnLock", unlock_pledge, True)
res.append(unlock_pledge)
# drain all sources
if not silent:
print("\texecuting graph...")
res.simultaneous_get( parameter_set_manager.get_num_threads() )
for inserter in inserter_vec:
inserter.get().close(pool_pledge.get()) # @todo for some reason the destructor does not trigger automatically :(
if not silent:
print("\tdone executing")
analyze.analyze(out_file)
return get_call_inserter.cpp_module.id
conn = DbConn(dataset_name)
SvCallTable(conn).drop_indices(0) # number does nothing at the moment
pool = PoolContainer(parameter_set_manager.get_num_threads() + 1, dataset_name)
sv_caller_run_id = graph(pool)
if not silent:
print("done sweeping")
analyze = AnalyzeRuntimes()
call_table = SvCallTable(conn)
if not silent:
print("num calls:", call_table.num_calls(sv_caller_run_id, 0))
print("computing index...")
start = datetime.datetime.now()
call_table.gen_indices(sv_caller_run_id)
end = datetime.datetime.now()
delta = end - start
analyze.register("compute_index", delta.total_seconds(), False, lambda x: x)
if not silent:
print("done computing index")
#print("high score filter...")
#start = datetime.datetime.now()
#call_table.filter_calls_with_high_score(sv_caller_run_id, 0.1)
#end = datetime.datetime.now()
#delta = end - start
#analyze.register("high_score_filter", delta.total_seconds(), False, lambda x: x)
#if not silent:
# #print("done high score filter")
if not silent:
print("merging dummy calls...")
start = datetime.datetime.now()
num_merged = merge_dummy_calls(parameter_set_manager, pool, sv_caller_run_id, 110, 5.0)
end = datetime.datetime.now()
delta = end - start
analyze.register("merging_dummy_calls", delta.total_seconds(), False, lambda x: x)
# #print("overlapping...")
#start = datetime.datetime.now()
#num_combined = 0 #combine_overlapping_calls(parameter_set_manager, pool, sv_caller_run_id)
#end = datetime.datetime.now()
#delta = end - start
#analyze.register("combine_overlapping_calls", delta.total_seconds(), False, lambda x: x)
if not silent:
print("done merging dummy calls; combined", num_merged, "calls")
analyze.analyze(out_file)
if not out_file is None:
out_file.write("run_id is " + str(sv_caller_run_id) + "\n")
return sv_caller_run_id
|
1645163
|
import random
import logging
import numpy as np
logger = logging.getLogger(__name__)
from state_buffer import StateBuffer
class Agent:
def __init__(self, environment, replay_memory, deep_q_network, args):
self.env = environment
self.mem = replay_memory
self.net = deep_q_network
self.buf = StateBuffer(args)
self.num_actions = self.env.numActions()
self.random_starts = args.random_starts
self.history_length = args.history_length
self.exploration_rate_start = args.exploration_rate_start
self.exploration_rate_end = args.exploration_rate_end
self.exploration_decay_steps = args.exploration_decay_steps
self.exploration_rate_test = args.exploration_rate_test
self.total_train_steps = args.start_epoch * args.train_steps
self.train_frequency = args.train_frequency
self.train_repeat = args.train_repeat
self.target_steps = args.target_steps
self.callback = None
def _restartRandom(self):
self.env.restart()
# perform random number of dummy actions to produce more stochastic games
for i in xrange(random.randint(self.history_length, self.random_starts) + 1):
reward = self.env.act(0)
terminal = self.env.isTerminal()
if terminal:
self.env.restart()
screen = self.env.getScreen()
# add dummy states to buffer
self.buf.add(screen)
def _explorationRate(self):
# calculate decaying exploration rate
if self.total_train_steps < self.exploration_decay_steps:
return self.exploration_rate_start - self.total_train_steps * (self.exploration_rate_start - self.exploration_rate_end) / self.exploration_decay_steps
else:
return self.exploration_rate_end
def step(self, exploration_rate):
# exploration rate determines the probability of random moves
if random.random() < exploration_rate:
action = random.randrange(self.num_actions)
logger.debug("Random action = %d" % action)
else:
# otherwise choose action with highest Q-value
state = self.buf.getStateMinibatch()
# for convenience getStateMinibatch() returns minibatch
# where first item is the current state
qvalues = self.net.predict(state)
assert len(qvalues[0]) == self.num_actions
# choose highest Q-value of first state
action = np.argmax(qvalues[0])
logger.debug("Predicted action = %d" % action)
# perform the action
reward = self.env.act(action)
screen = self.env.getScreen()
terminal = self.env.isTerminal()
# print reward
if reward <> 0:
logger.debug("Reward: %d" % reward)
# add screen to buffer
self.buf.add(screen)
# restart the game if over
if terminal:
logger.debug("Terminal state, restarting")
self._restartRandom()
# call callback to record statistics
if self.callback:
self.callback.on_step(action, reward, terminal, screen, exploration_rate)
return action, reward, screen, terminal
def play_random(self, random_steps):
#call env.restart first so that env.reset is called before step.
self.env.restart()
# play given number of steps
for i in xrange(random_steps):
# use exploration rate 1 = completely random
action, reward, screen, terminal = self.step(1)
self.mem.add(action, reward, screen, terminal)
def train(self, train_steps, epoch = 0):
# do not do restart here, continue from testing
#self._restartRandom()
# play given number of steps
for i in xrange(train_steps):
# perform game step
action, reward, screen, terminal = self.step(self._explorationRate())
self.mem.add(action, reward, screen, terminal)
# Update target network every target_steps steps
if self.target_steps and i % self.target_steps == 0:
self.net.update_target_network()
# train after every train_frequency steps
if self.mem.count > self.mem.batch_size and i % self.train_frequency == 0:
# train for train_repeat times
for j in xrange(self.train_repeat):
# sample minibatch
minibatch = self.mem.getMinibatch()
# train the network
self.net.train(minibatch, epoch)
# increase number of training steps for epsilon decay
self.total_train_steps += 1
def test(self, test_steps, epoch = 0):
# just make sure there is history_length screens to form a state
self._restartRandom()
# play given number of steps
for i in xrange(test_steps):
# perform game step
self.step(self.exploration_rate_test)
def play(self, num_games):
# just make sure there is history_length screens to form a state
self._restartRandom()
for i in xrange(num_games):
# play until terminal state
terminal = False
while not terminal:
action, reward, screen, terminal = self.step(self.exploration_rate_test)
# add experiences to replay memory for visualization
self.mem.add(action, reward, screen, terminal)
|
1645174
|
import sys
from setuptools import setup, find_packages, Extension
import numpy as np
if '--use-cython' in sys.argv:
USE_CYTHON = True
sys.argv.remove('--use-cython')
else:
USE_CYTHON = False
ext = '.pyx' if USE_CYTHON else '.c'
# cppext = '' if USE_CYTHON else 'pp'
extensions = [
Extension(
"deepgraph._triu_indices",
["deepgraph/_triu_indices" + ext],
include_dirs=[np.get_include()],
# language='c++',
),
Extension(
"deepgraph._find_selected_indices",
["deepgraph/_find_selected_indices" + ext],
include_dirs=[np.get_include()]
)
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(
extensions,
compiler_directives={'language_level': sys.version_info[0]}
)
setup(
name="DeepGraph",
version='0.2.3',
packages=find_packages(),
author="<NAME>",
author_email="<EMAIL>",
url='https://github.com/deepgraph/deepgraph/',
download_url='https://github.com/deepgraph/deepgraph/tarball/v0.2.3',
description=("Analyze Data with Pandas-based Networks."),
long_description=open('README.rst').read(),
install_requires=['numpy>=1.6',
'pandas>=0.17.0'],
license="BSD",
classifiers=[
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Cython',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics'],
ext_modules=extensions,
package_data={'deepgraph': ['../tests/*.py',
'../LICENSE.txt',
'./*.pyx',
'./*.c',
'./*.cpp',
]},
)
|
1645195
|
import numpy as np
class Agent(object):
def __init__(self, k, policy, init_exploration, prior=0, gamma=None):
self.policy = policy
self.k = k
self.prior = prior
self.gamma = gamma
self._value_estimates = prior * np.ones(self.k) # Estimated Mean reward
self.action_attempts = np.zeros(self.k)
self.t = 0
self.last_action = None
self.init_exploration = init_exploration
def reset(self):
"""
Resets the agent's memory to an initial state.
"""
self._value_estimates[:] = self.prior * np.ones(self.k)
self.action_attempts[:] = np.zeros(self.k)
self.last_action = None
self.t = 0
def choose(self):
if self.t < self.init_exploration:
action = np.random.randint(self.k)
else:
action = self.policy.choose(self)
self.last_action = action
return action
def observe(self, reward): # Updating value_estimates ! (calculating mean rewards)
self.action_attempts[self.last_action] += 1
if self.gamma is None:
g = 1 / self.action_attempts[self.last_action]
else:
g = self.gamma
q = self._value_estimates[self.last_action]
self._value_estimates[self.last_action] += g * (reward - q)
self.t += 1
@property
def value_estimates(self):
return self._value_estimates
class ContextualAgent(Agent):
"""
( linUCB disjoint model)
"""
def __init__(self, k, d, policy, init_exploration, prior=0, gamma=None):
super().__init__(k, policy, init_exploration, prior, gamma)
self.d = d
self.memory = {action: {'A': np.identity(self.d),
'b': np.zeros((self.d, 1))} for action in range(self.k)}
self.states = np.array([])
self.reset()
def reset(self):
self._value_estimates[:] = self.prior * np.ones(self.k)
self.action_attempts[:] = 0
self.last_action = None
self.t = 0
self.memory = {action: {'A': np.identity(self.d),
'b': np.zeros((self.d, 1))} for action in range(self.k)}
self.states = np.array([])
# FIXME
def get_state(self, bandit):
self.states = bandit.states
for action, memory in self.memory.items():
A = memory['A']
b = memory['b']
A_inv = np.linalg.inv(A)
theta_hat = np.dot(A_inv, b)
x_t = self.states[action]
self._value_estimates[action] = np.dot(x_t.T, theta_hat)
def observe(self, reward):
self.action_attempts[self.last_action] += 1
self.memory[self.last_action]['A'] += np.outer(self.states[self.last_action],
self.states[self.last_action])
self.memory[self.last_action]['b'] += reward * self.states[self.last_action].reshape((self.d, 1))
self.t += 1
|
1645197
|
from trex_stl_lib.api import *
class STLS1(object):
def create_stream (self):
base_pkt = Ether()/IP(dst="${DST_ADDRESS}")/UDP(dport=${DST_PORT})
size = 1440
pad = max(0, size - len(base_pkt) - 4) * 'x'
vm = STLVM()
vm.tuple_var(name="tuple", ip_min="${SRC_ADDRESS}", ip_max="${SRC_ADDRESS}",
port_min=${SRC_PORT}, port_max=${SRC_PORT2}, limit_flows=10000)
vm.write(fv_name="tuple.ip", pkt_offset="IP.src")
vm.fix_chksum()
vm.write(fv_name="tuple.port", pkt_offset="UDP.sport")
pkt = STLPktBuilder(pkt=base_pkt/pad, vm=vm)
return STLStream(packet=pkt, mode=STLTXCont())
def get_streams (self, direction = 0, **kwargs):
return [self.create_stream()]
# dynamic load - used for trex console or simulator
def register():
return STLS1()
# start -f trex.py -m 10mbps -p 0
|
1645202
|
import pandas as pd
import numpy as np
from trading_gym.envs.portfolio_gym.portfolio_gym import PortfolioTradingGym
np.random.seed(64)
def create_mock_data(order_book_ids, start_date="2019-01-01", end_date="2022-01-02", number_feature=3):
trading_dates = pd.date_range(start=start_date, end=end_date, freq="D")
number = len(trading_dates) * len(order_book_ids)
multi_index = pd.MultiIndex.from_product([order_book_ids, trading_dates], names=["order_book_id", "datetime"])
mock_data = pd.DataFrame(np.random.randn(number, number_feature + 1), index=multi_index,
columns=["feature1", "feature2", "feature3", "returns"])
mock_data["returns"] = mock_data["returns"] / 100 # 当期收益率
mock_data["returns"] = round(mock_data["returns"], 4)
return mock_data
def test_single_array_R():
order_book_ids = ["000001.XSHE"]
mock_data = create_mock_data(order_book_ids=order_book_ids, start_date="2019-01-01", end_date="2019-01-14")
sequence_window = 1
env = PortfolioTradingGym(data_df=mock_data, sequence_window=sequence_window, add_cash=True)
state = env.reset()
h_t_list = []
orderlist = [0.5, 0.8, 0.4, 1.0, 0.0, -0.5, -0.7, -0.4, -1.0, 0.3]
for i in range(len(orderlist)):
next_state, reward, done, info = env.step([orderlist[i], 0])
h_t_list.append(info["h_t"])
'''
000001.XSHE 2019-01-01 0.0219
2019-01-02 -0.0103
2019-01-03 0.0175
2019-01-04 -0.0017
2019-01-05 -0.0039
2019-01-06 0.0059
2019-01-07 -0.0049
2019-01-08 -0.0003
2019-01-09 -0.0136
2019-01-10 0.0068
2019-01-11 0.0077
2019-01-12 0.0136
2019-01-13 -0.0022
2019-01-14 -0.0012
'''
expected_h_t = ([494850, 500050], [809848.6, 198999.898], [402853.3822, 605369.6309],
[1004290.9, 0], [0, 1004391.359], [-499734.9212, 1506737.699], [-704690.4898, 1712075.95],
[-397473.9834, 1410480.594], [-1019895.146, 2026216.001], [304220.9, 704495.1425])
np.testing.assert_almost_equal(h_t_list, expected_h_t, decimal=1)
def test_single_number_R():
order_book_ids = ["000001.XSHE"]
mock_data = create_mock_data(order_book_ids=order_book_ids, start_date="2019-01-01", end_date="2019-01-14")
sequence_window = 1
env = PortfolioTradingGym(data_df=mock_data, sequence_window=sequence_window, add_cash=True)
state = env.reset()
h_t_list = []
orderlist = [0.5, 0.8, 0.4, 1.0, 0.0, -0.5, -0.7, -0.4, -1.0, 0.3]
for i in range(len(orderlist)):
next_state, reward, done, info = env.step(orderlist[i])
h_t_list.append(info["h_t"])
expected_h_t = ([494850, 500050], [809848.6, 198999.898], [402853.3822, 605369.6309],
[1004290.9, 0], [0, 1004391.359], [-499734.9212, 1506737.699], [-704690.4898, 1712075.95],
[-397473.9834, 1410480.594], [-1019895.146, 2026216.001], [304220.9, 704495.1425])
np.testing.assert_almost_equal(h_t_list, expected_h_t, decimal=1)
def test_single_cashfalse_R():
order_book_ids = ["000001.XSHE"]
mock_data = create_mock_data(order_book_ids=order_book_ids, start_date="2019-01-01", end_date="2019-01-14")
sequence_window = 1
env = PortfolioTradingGym(data_df=mock_data, sequence_window=sequence_window, add_cash=False)
state = env.reset()
h_t_list = []
orderlist = [0.5, 0.8, 0.4, 1.0, 0.0, -0.5, -0.7, -0.4, -1.0, 0.3]
for i in range(len(orderlist)):
next_state, reward, done, info = env.step(orderlist[i])
h_t_list.append(info["h_t"])
expected_h_t = ([494850], [809848.6], [402853.3822],[1004290.9], [0], [-499734.9212], [-704690.4898],
[-397473.9834], [-1019895.146], [304220.9])
np.testing.assert_almost_equal(h_t_list, expected_h_t, decimal=1)
if __name__ == "__main__":
#test_single_array_R()
test_single_number_R()
#test_single_cashfalse_R()
'''
Results here are same as those in test_single_stock.py.
'''
|
1645219
|
import random
import math as m
import pygame, random, sys
from pygame.locals import *
pygame.font.init()
white=(255, 255, 255)
green=(51, 255, 51)
red=(255, 0, 0)
yellow=(255, 255, 51)
blue=(0, 0, 255)
black=(0, 0, 0)
pink=(255, 0, 255)
xmin=0
xmax=1
ymin=0
ymax=1
radius=0.12
t=0
N=4 # No of hard disks inside the container
DISPLAYSURF=pygame.display.set_mode((500, 500))
pygame.display.set_caption('direct-disks')
DISPLAYSURF.fill(white)
clock=pygame.time.Clock()
myfont=pygame.font.SysFont('Comic Sans MS', 30)
def prepare():
pos=[[random.uniform(radius, 1.-radius), random.uniform(radius, 1.-radius)]]
for l in range(1, N):
p=[random.uniform(radius, 1.-radius), random.uniform(radius, 1.-radius)]
if min([m.sqrt((p[0]-P[0])**2+(p[1]-P[1])**2) for P in pos])>=2*radius: pos+=[p, ]
if len(pos)==N: return pos
else: return prepare()
prev=prepare() #Prepare the initial state, by using the same algorithm
while True:
for event in pygame.event.get():
if event.type==QUIT:
pygame.image.save(DISPLAYSURF, 'direct-disks.png')
pygame.quit()
sys.exit()
DISPLAYSURF.fill(white)
textsurface=myfont.render('Steps = '+str(t), False, black)
DISPLAYSURF.blit(textsurface, (120, 10))
pos=[[random.uniform(radius, 1.-radius), random.uniform(radius, 1.-radius)]]
for l in range(1, N):
p=[random.uniform(radius, 1.-radius), random.uniform(radius, 1.-radius)]
if min([m.sqrt((p[0]-P[0])**2+(p[1]-P[1])**2) for P in pos])>=2*radius: pos+=[p, ]
print (pos)
if len(pos)==N:
pygame.draw.circle(DISPLAYSURF, red, (int(pos[0][0]*500), int(pos[0][1]*500)), int(radius*500), 0)
pygame.draw.circle(DISPLAYSURF, blue, (int(pos[1][0]*500), int(pos[1][1]*500)), int(radius*500), 0)
pygame.draw.circle(DISPLAYSURF, yellow, (int(pos[2][0]*500), int(pos[2][1]*500)), int(radius*500), 0)
pygame.draw.circle(DISPLAYSURF, green, (int(pos[3][0]*500), int(pos[3][1]*500)), int(radius*500), 0)
prev=pos[:]
t+=1
else:
pygame.draw.circle(DISPLAYSURF, red, (int(prev[0][0]*500), int(prev[0][1]*500)), int(radius*500), 0)
pygame.draw.circle(DISPLAYSURF, blue, (int(prev[1][0]*500), int(prev[1][1]*500)), int(radius*500), 0)
pygame.draw.circle(DISPLAYSURF, yellow, (int(prev[2][0]*500), int(prev[2][1]*500)), int(radius*500), 0)
pygame.draw.circle(DISPLAYSURF, green, (int(prev[3][0]*500), int(prev[3][1]*500)), int(radius*500), 0)
## clock.tick(40)
pygame.display.update()
|
1645226
|
class SolutionStrManipulation:
def expand(self, S: str) -> List[str]:
blocks = S.replace("{", " ").replace("}", " ").strip().split()
dict_list = [sorted(block.split(',')) for block in blocks]
results = []
def backtrack(curr, comb):
if curr == len(dict_list):
results.append("".join(comb))
return
for letter in dict_list[curr]:
comb[curr] = letter
backtrack(curr+1, comb)
comb = [""] * len(dict_list)
backtrack(0, comb)
return results
class Solution:
def expand(self, S: str) -> List[str]:
dict_list = []
curr = 0
while curr < len(S):
if S[curr] == "{":
curr += 1
start_bracket = curr
while S[curr] != "}":
curr += 1
options = S[start_bracket:curr].split(",")
dict_list.append(sorted(options))
else:
dict_list.append([S[curr]])
curr += 1
results = []
def backtrack(curr, comb):
if curr == len(dict_list):
results.append("".join(comb))
return
for letter in dict_list[curr]:
comb[curr] = letter
backtrack(curr+1, comb)
comb = [""] * len(dict_list)
backtrack(0, comb)
return results
|
1645257
|
import numpy as np
import torch
import pandas as pd
import os
import shutil
import argparse
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error as sk_MAE
from tabulate import tabulate
import random,time
def set_model_properties(crystal_property):
if crystal_property in ['poisson-ratio','band-gap','absolute-energy','fermi-energy','formation-energy','new-property'] :
norm_action = None;classification = None
elif crystal_property == 'is_metal':
norm_action = 'classification-1';classification = 1
elif crystal_property == 'is_not_metal':
norm_action = 'classification-0';classification = 1
else:
norm_action = 'log';classification = None
return norm_action,classification
def torch_MAE(tensor1,tensor2):
return torch.mean(torch.abs(tensor1-tensor2))
def torch_accuracy(pred_tensor,true_tensor):
_,pred_tensor = torch.max(pred_tensor,dim=1)
correct = (pred_tensor==true_tensor).sum().float()
total = pred_tensor.size(0)
accuracy_ans = correct/total
return accuracy_ans
def output_training(metrics_obj,epoch,estop_val,extra='---'):
header_1, header_2 = 'MSE | e-stop','MAE | TIME'
if metrics_obj.c_property in ['is_metal','is_not_metal']:
header_1,header_2 = 'Cross_E | e-stop','Accuracy | TIME'
train_1,train_2 = metrics_obj.training_loss1[epoch],metrics_obj.training_loss2[epoch]
valid_1,valid_2 = metrics_obj.valid_loss1[epoch],metrics_obj.valid_loss2[epoch]
tab_val = [['TRAINING',f'{train_1:.4f}',f'{train_2:.4f}'],['VALIDATION',f'{valid_1:.4f}',f'{valid_2:.4f}'],['E-STOPPING',f'{estop_val}',f'{extra}']]
output = tabulate(tab_val,headers= [f'EPOCH # {epoch}',header_1,header_2],tablefmt='fancy_grid')
print(output)
return output
def load_metrics():
saved_metrics = pickle.load(open("MODELS/metrics_.pickle", "rb", -1))
return saved_metrics
|
1645306
|
import dash_bootstrap_components as dbc
from dash import html
from .util import make_subheading
nav_items = [
dbc.NavItem(dbc.NavLink("Active", active=True, href="#")),
dbc.NavItem(dbc.NavLink("A link", href="#")),
dbc.NavItem(dbc.NavLink("Another link", href="#")),
dbc.NavItem(dbc.NavLink("Disabled", disabled=True, href="#")),
dbc.DropdownMenu(
[dbc.DropdownMenuItem("Item 1"), dbc.DropdownMenuItem("Item 2")],
label="Dropdown",
nav=True,
),
]
nav1 = dbc.Col(
dbc.Nav(
nav_items,
),
md=6,
xs=12,
)
nav2 = dbc.Col(
dbc.Nav(nav_items, pills=True),
md=6,
xs=12,
)
nav = html.Div(
[make_subheading("Nav", "nav"), dbc.Row([nav1, nav2])],
className="mb-4",
)
|
1645318
|
import numpy as np
import matplotlib.pyplot as plt
GENERATE_POINTS_DIST = './data/generatePoints_distance.txt'
GENERATE_POINTS = './data/generatePoints.txt'
r = np.random.RandomState(24)
o = r.randn(400, 2)
o[:, 0] += 2
o[:, 1] += 6
u = r.randn(400, 2)
u[:, 0] += 4
u[:, 1] -= 0.5
v = r.randn(400, 2)
v[:, 0] += 7
v[:, 1] -= 0.5
p = r.randn(400, 2)
q = r.randn(400, 2) + 3
# q[:, 0] += 3
# q[:, 1] += 9
s = r.randn(400, 2) + 6
t = np.concatenate((o, p, q, s, u, v), axis=0)
with open(GENERATE_POINTS, 'w', encoding='utf-8') as f:
for pos in range(len(t)):
cor = t[pos]
f.write(str(pos) + ' ' + str(cor[0]) + ' ' + str(cor[1]) + '\n')
d = lambda x, y: np.sqrt(np.power((x[0] - y[0]), 2) + np.power((x[1] - y[1]), 2))
with open(GENERATE_POINTS_DIST, 'w', encoding='utf-8') as f:
for i in range(len(t)):
for j in range(i + 1, len(t)):
distance = d(t[i], t[j])
f.write(str(i) + ' ' + str(j) + ' ' + str(distance) + '\n')
# Without labels
x, y = t[:, 0], t[:, 1]
plt.plot(x, y, 'ok', markersize=1, alpha=0.5)
# plt.axis([-3, 10, -3, 9])
plt.xlabel('x')
plt.ylabel('y')
plt.title('Generated Points Plot')
plt.savefig('./images/generatedPoints.png')
plt.close()
color = {0: 'c', 1: 'r', 2: 'g', 3: 'b', 4: 'm', 5: 'y'}
cluster = [o, p, q, s, u, v]
for i in range(len(cluster)):
cur = cluster[i]
x, y = cur[:, 0], cur[:, 1]
plt.scatter(x, y, s=1, c=color[i], alpha=0.7, label=i + 1)
plt.legend()
plt.xlabel('x')
plt.ylabel('y')
plt.title('Generated Points with Lable')
plt.savefig('./images/generatedColoredPoints.png')
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.