id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11571333
|
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import sys
sys.path.append("..")
from stg_node import *
from utils.learning import *
class InputTensorSummarizer(object):
def __init__(self, features, labels,
mode, hps,
node, robot_node,
neighbors_via_edge_type,
instance_connected_to_robot,
edge_state_combine_method,
edge_influence_combine_method):
self.hps = hps
TD = {} # tensor_dict
self.robot_traj = robot_traj = features[robot_node]
self.our_traj = our_traj = features[node]
self.extras = extras = features["extras"]
self.traj_lengths = features["traj_lengths"]
self.features = features
self.node = node
self.robot_node = robot_node
self.neighbors_via_edge_type = neighbors_via_edge_type
self.connected_edge_types = neighbors_via_edge_type.keys()
self.instance_connected_to_robot = instance_connected_to_robot
self.edge_state_combine_method = edge_state_combine_method
self.edge_influence_combine_method = edge_influence_combine_method
our_present = str(self.node) + "_present"
robot_present = str(self.robot_node) + "_present"
our_future = str(self.node) + "_future"
robot_future = str(self.robot_node) + "_future"
self.node_type_connects_to_robot = False
for edge_type in self.connected_edge_types:
if 'robot' in edge_type:
self.node_type_connects_to_robot = True
break
with tf.variable_scope(self.node.type, reuse=tf.AUTO_REUSE):
with tf.variable_scope("data_rearranging"):
if mode == tf.estimator.ModeKeys.TRAIN:
mhl, ph = self.hps.minimum_history_length, self.hps.prediction_horizon
self.bag_idx = bag_idx = features["bag_idx"]
self.prediction_timesteps = mhl - 1 + tf.mod(tf.random_uniform(self.traj_lengths.shape,
maxval=2**31-1,
dtype=tf.int32),
self.traj_lengths-mhl-ph+1)
TD[robot_present] = extract_subtensor_per_batch_element(robot_traj, self.prediction_timesteps) # [bs, state_dim]
TD[our_present] = extract_subtensor_per_batch_element(our_traj, self.prediction_timesteps) # [bs, state_dim]
TD["extras_present"] = extract_subtensor_per_batch_element(extras, self.prediction_timesteps) # [bs, extras_dim]
TD["bag_idx"] = extract_subtensor_per_batch_element(bag_idx, self.prediction_timesteps) # [bs, 1]
TD[robot_future] = tf.stack([extract_subtensor_per_batch_element(robot_traj, self.prediction_timesteps+i+1)
for i in range(self.hps.prediction_horizon)], axis=1) # [bs, ph, state_dim]
TD[our_future] = tf.stack([extract_subtensor_per_batch_element(labels, self.prediction_timesteps+i+1)
for i in range(self.hps.prediction_horizon)], axis=1) # [bs, ph, state_dim]
elif mode == tf.estimator.ModeKeys.EVAL:
self.bag_idx = bag_idx = features["bag_idx"]
TD[robot_present] = self.extract_ragged_subarray(robot_traj) # [nbs, state_dim]
TD[our_present] = self.extract_ragged_subarray(our_traj) # [nbs, state_dim]
TD["extras_present"] = self.extract_ragged_subarray(extras) # [nbs, extras_dim]
TD["bag_idx"] = self.extract_ragged_subarray(bag_idx) # [nbs, 1]
TD[robot_future] = tf.stack([self.extract_ragged_subarray(robot_traj, i+1)
for i in range(self.hps.prediction_horizon)], axis=1) # [nbs, ph, state_dim]
TD[our_future] = tf.stack([self.extract_ragged_subarray(labels, i+1)
for i in range(self.hps.prediction_horizon)], axis=1) # [nbs, ph, state_dim]
elif mode == tf.estimator.ModeKeys.PREDICT:
TD[robot_present] = self.extract_subarray_ends(robot_traj) # [bs, state_dim]
TD[our_present] = self.extract_subarray_ends(our_traj) # [bs, state_dim]
TD["extras_present"] = self.extract_subarray_ends(extras) # [bs, extras_dim]
TD[robot_future] = features[robot_future] # [bs, ph, state_dim]
our_prediction_present = tf.concat([TD[our_present][:,p:p+1]
for p in hps.pred_indices], axis=1) # [bs/nbs, pred_dim]
TD["joint_present"] = tf.concat([TD[robot_present], our_prediction_present], axis=1) # [bs/nbs, state_dim+pred_dim]
# Node History
TD["history_encoder"] = self.encode_node_history(mode)
batch_size = tf.shape(TD["history_encoder"])[0]
# Node Edges
TD["edge_encoders"] = [self.encode_edge(mode, edge_type, self.neighbors_via_edge_type[edge_type]) for edge_type in self.connected_edge_types] # List of [bs/nbs, enc_rnn_dim]
TD["total_edge_influence"] = self.encode_total_edge_influence(TD["edge_encoders"], batch_size, mode) # [bs/nbs, 4*enc_rnn_dim]
# Tiling for multiple samples
if mode == tf.estimator.ModeKeys.PREDICT:
# This tiling is done because:
# a) we must consider the prediction case where there are many candidate robot future actions,
# b) the edge and history encoders are all the same regardless of which candidate future robot action we're evaluating.
TD["joint_present"] = tf.tile(TD["joint_present"], [tf.shape(features[robot_future])[0], 1])
TD[robot_present] = tf.tile(TD[robot_present], [tf.shape(features[robot_future])[0], 1])
TD["history_encoder"] = tf.tile(TD["history_encoder"], [tf.shape(features[robot_future])[0], 1])
TD["total_edge_influence"] = tf.tile(TD["total_edge_influence"], [tf.shape(features[robot_future])[0], 1])
concat_list = list()
# Every node has an edge-influence encoder (which could just be zero).
concat_list.append(TD["total_edge_influence"]) # [bs/nbs, 4*enc_rnn_dim]
# Every node has a history encoder.
concat_list.append(TD["history_encoder"]) # [bs/nbs, enc_rnn_dim]
if self.instance_connected_to_robot:
TD[self.robot_node.type + "_robot_future_encoder"] = self.encode_node_future(TD[robot_present],
TD[robot_future],
mode,
self.robot_node.type + '_robot')
# [bs/nbs, 4*enc_rnn_dim]
concat_list.append(TD[self.robot_node.type + "_robot_future_encoder"])
else:
# Four times because we're trying to mimic a bi-directional RNN's output (which is c and h from both ends).
concat_list.append(tf.zeros([batch_size, 4*self.hps.enc_rnn_dim_future[0]]))
TD["x"] = tf.concat(concat_list, axis=1) # [bs/nbs, (4 + 1 + 4)*enc_rnn_dim]
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
TD[self.node.type + "_future_encoder"] = self.encode_node_future(TD[our_present],
TD[our_future],
mode,
self.node.type) # [bs/nbs, 4*enc_rnn_dim]
self.tensor_dict = TD
def encode_multiple_input_states(self, input_feature_list, mode):
with tf.variable_scope("multiple_input_encoder"):
cell = stacked_rnn_cell(self.hps.rnn_cell,
self.hps.rnn_cell_kwargs,
self.hps.enc_rnn_dim_multiple_inputs,
self.hps.rnn_io_dropout_keep_prob,
mode)
# THIS IS BROKEN NOW!
# axis=1 because then we get size [batch_size, num_inputs, max_time, depth]
encoded_edges = tf.stack(input_feature_list, axis=1)
print(encoded_edges.get_shape())
outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell, input_feature_list,
sequence_length=len(input_feature_list),
dtype=tf.float32,
time_major=False)
# outputs, _ = tf.nn.dynamic_rnn(cell, node_history, self.traj_lengths,
# dtype=tf.float32,
# time_major=False) # [bs, max_time, enc_rnn_dim]
if mode == tf.estimator.ModeKeys.TRAIN:
return extract_subtensor_per_batch_element(outputs, self.prediction_timesteps)
elif mode == tf.estimator.ModeKeys.EVAL:
return self.extract_ragged_subarray(outputs) # [nbs, enc_rnn_dim]
elif mode == tf.estimator.ModeKeys.PREDICT:
return self.extract_subarray_ends(outputs) # [bs, enc_rnn_dim]
def encode_edge(self, mode, edge_type, connected_nodes):
with tf.variable_scope(edge_type, reuse=tf.AUTO_REUSE):
with tf.variable_scope("edge_encoder"):
cell = stacked_rnn_cell(self.hps.rnn_cell,
self.hps.rnn_cell_kwargs,
self.hps.enc_rnn_dim_edge,
self.hps.rnn_io_dropout_keep_prob,
mode)
if mode == tf.estimator.ModeKeys.PREDICT:
naming_func = lambda x: str(x)
else:
naming_func = lambda x: x
input_feature_list = [self.features[node] for node in connected_nodes]
if self.edge_state_combine_method == 'sum':
# Used in Structural-RNN.
stacked_edge_states = tf.stack(input_feature_list, axis=0)
combined_neighbors = tf.reduce_sum(stacked_edge_states, axis=0)
elif self.edge_state_combine_method == 'max':
# Used in NLP, e.g. max over word embeddings.
stacked_edge_states = tf.stack(input_feature_list, axis=0)
combined_neighbors = tf.reduce_max(stacked_edge_states, axis=0)
elif self.edge_state_combine_method == 'mean':
# Least destructive method that allows for a fixed output size.
stacked_edge_states = tf.stack(input_feature_list, axis=0)
combined_neighbors = tf.reduce_mean(stacked_edge_states, axis=0)
joint_history = tf.concat([combined_neighbors, self.our_traj, self.extras], 2, name="joint_history")
outputs, _ = tf.nn.dynamic_rnn(cell, joint_history, self.traj_lengths,
dtype=tf.float32,
time_major=False) # [bs, max_time, enc_rnn_dim]
if mode == tf.estimator.ModeKeys.TRAIN:
return extract_subtensor_per_batch_element(outputs, self.prediction_timesteps)
elif mode == tf.estimator.ModeKeys.EVAL:
return self.extract_ragged_subarray(outputs) # [nbs, enc_rnn_dim]
elif mode == tf.estimator.ModeKeys.PREDICT:
return self.extract_subarray_ends(outputs) # [bs, enc_rnn_dim]
def encode_total_edge_influence(self, encoded_edges, batch_size, mode):
with tf.variable_scope(self.node.type, reuse=tf.AUTO_REUSE):
with tf.variable_scope("total_edge_influence_encoder"):
if self.edge_influence_combine_method == 'sum':
stacked_encoded_edges = tf.stack(encoded_edges, axis=0)
combined_edges = tf.reduce_sum(stacked_encoded_edges, axis=0)
elif self.edge_influence_combine_method == 'max':
stacked_encoded_edges = tf.stack(encoded_edges, axis=0)
combined_edges = tf.reduce_max(stacked_encoded_edges, axis=0)
elif self.edge_influence_combine_method == 'bi-rnn':
if len(encoded_edges) == 0:
# Four times because we're trying to mimic a bi-directional
# RNN's output (which is c and h from both ends).
combined_edges = tf.zeros([batch_size, 4*self.hps.enc_rnn_dim_edge_influence[0]])
else:
cell = stacked_rnn_cell(self.hps.rnn_cell,
self.hps.rnn_cell_kwargs,
self.hps.enc_rnn_dim_edge_influence,
self.hps.rnn_io_dropout_keep_prob,
mode)
# axis=1 because then we get size [batch_size, max_time, depth]
encoded_edges = tf.stack(encoded_edges, axis=1)
_, state = tf.nn.bidirectional_dynamic_rnn(cell, cell, encoded_edges,
dtype=tf.float32,
time_major=False)
combined_edges = tf.concat([unpack_RNN_state(state[0]), unpack_RNN_state(state[1])], axis=1)
return combined_edges
def encode_node_history(self, mode):
with tf.variable_scope(self.node.type, reuse=tf.AUTO_REUSE):
with tf.variable_scope("history_encoder"):
cell = stacked_rnn_cell(self.hps.rnn_cell,
self.hps.rnn_cell_kwargs,
self.hps.enc_rnn_dim_history,
self.hps.rnn_io_dropout_keep_prob,
mode)
node_history = tf.concat([self.our_traj, self.extras], 2, name="node_history")
outputs, _ = tf.nn.dynamic_rnn(cell, node_history, self.traj_lengths,
dtype=tf.float32,
time_major=False) # [bs, max_time, enc_rnn_dim]
if mode == tf.estimator.ModeKeys.TRAIN:
return extract_subtensor_per_batch_element(outputs, self.prediction_timesteps)
elif mode == tf.estimator.ModeKeys.EVAL:
return self.extract_ragged_subarray(outputs) # [nbs, enc_rnn_dim]
elif mode == tf.estimator.ModeKeys.PREDICT:
return self.extract_subarray_ends(outputs) # [bs, enc_rnn_dim]
def encode_node_future(self, node_present, node_future, mode, scope):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
with tf.variable_scope("future_encoder"):
cell = stacked_rnn_cell(self.hps.rnn_cell,
self.hps.rnn_cell_kwargs,
self.hps.enc_rnn_dim_future,
self.hps.rnn_io_dropout_keep_prob,
mode)
initial_state = project_to_RNN_initial_state(cell, node_present)
_, state = tf.nn.bidirectional_dynamic_rnn(cell, cell, node_future,
initial_state_fw=initial_state,
dtype = tf.float32,
time_major=False)
return tf.concat([unpack_RNN_state(state[0]), unpack_RNN_state(state[1])], axis=1)
def extract_ragged_subarray(self, tensor, offset=0): # defines a new "batch size", call it "nbs"
mhl, ph = self.hps.minimum_history_length, self.hps.prediction_horizon
return tf.boolean_mask(tensor[:,mhl-1+offset:,:], #tensor.shape[1]
tf.sequence_mask(self.traj_lengths-mhl-ph+1, tf.shape(tensor)[1]-mhl+1-offset))
def extract_subarray_ends(self, tensor, offset=0): # ON THE CHOPPING BLOCK
return extract_subtensor_per_batch_element(tensor, self.traj_lengths-1+offset)
|
11571349
|
from functools import wraps
from disco.bot.command import CommandEvent
def require(*perms):
def func_wrap(func):
@wraps(func)
def wrapper(self, event: CommandEvent, *args, **kwargs):
perm_code = event.channel.get_permissions(event.author.id)
if perm_code.can(*perms):
func(self, event, *args, **kwargs)
else:
event.msg.reply("Sorry, but you do not have sufficient privileges to do this.")
return wrapper
return func_wrap
def parse_member(func):
@wraps(func)
def wrapper(self, event: CommandEvent, member: str, *args, **kwargs):
if member.isnumeric():
real_member = event.guild.members.get(int(member))
elif len(event.msg.mentions) == 1:
real_member = event.guild.members.get(next(iter(event.msg.mentions))) # Epic Hacks lmao
else:
for guild_member in event.guild.members.values():
if "#".join([guild_member.user.username, guild_member.user.discriminator]) == member:
real_member = guild_member
break
else:
real_member = None
if real_member is None:
event.msg.reply("Sorry, but we could not find the user ({user})".format(user=member))
else:
func(self, event, real_member, *args, **kwargs)
return wrapper
|
11571355
|
def application(environ, start_response):
content_length = int(environ.get('CONTENT_LENGTH', 0))
body = bytes(environ['wsgi.input'].read(content_length))
start_response('200', [('Content-Length', str(len(body)))])
return [body]
|
11571376
|
import sys
from os.path import join, isdir, sep
from os import listdir
from mock import patch
from nose.tools import raises
fake_path = join('tmp', 'file_fake', '')
def random_string_gen(range1=12):
import string
import random
return ''.join(random.choice(string.ascii_uppercase) for i in range(range1))
# redirect output for testing
from contextlib import contextmanager
try: # compatibility with python2 and python3.
from StringIO import StringIO
except ImportError:
from io import StringIO
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
def init_imports():
try:
import errno
import menpo
import menpodetect
import menpofit
return 1
except ImportError:
return -1
def test_init_functions():
assert(init_imports() == 1) # confirm that all imports work
p1 = './' + random_string_gen(25)
msg = random_string_gen(23)
from ..utils import check_if_path
with captured_output() as (out, err): # check_if_path prints the right message for non-existent paths.
check_if_path(p1, msg)
output = out.getvalue().strip()
assert(output == msg)
assert(check_if_path('.', '') is True) # should return True, since this is a path.
# fake_path = '/tmp/test.fake/'
# tt = check_if_path(fake_path, '')
# mock1.assert_called_once_with(fake_path)
def test_crop_rescale_img():
from .pipeline_aux import crop_rescale_img
from menpo.image import Image
import numpy as np
from menpo.shape import PointCloud
test_img = Image(np.random.random([100, 100]))
test_img.landmarks['PT'] = PointCloud([[20, 20], [20, 40], [40, 80], [40, 20]])
res_im = crop_rescale_img(test_img.copy()) # crop image and check reduced shapes
assert(res_im.shape[0] < test_img.shape[0])
assert(res_im.shape[1] < test_img.shape[1])
res_im2 = crop_rescale_img(test_img.copy(), crop_reading=1, pix_thres=40) # check pixel threshold
assert(res_im2.shape[0] < test_img.shape[0])
res_im3 = crop_rescale_img(test_img.copy(), crop_reading=1, pix_thres=400) # test that the image remains the same
assert(res_im3.shape[1] > test_img.shape[1]-3)
def test_pipeline_aux():
# @patch.object(pipeline_aux, 'load_images')
# def test_pipeline_aux(mock_load):
from .pipeline_aux import check_img_type, load_images
with patch('sys.stdout', new=StringIO()) as fake_out: # check_img_type for non valid path
check_img_type(['greg', 'l1', 'm1'], fake_path)
assert('valid path' in fake_out.getvalue())
def test_pipeline_aux_load_images():
from .pipeline_aux import load_images
with patch('sys.stdout', new=StringIO()) as fake_out:
# check for negative number of images as max_images
load_images(listdir('.'), '.', '.', '', max_images=-5)
assert('negative' in fake_out.getvalue())
# since python files (*.py) are here, it will fail reading an image at least once.
assert('Ignoring' in fake_out.getvalue())
with patch('sys.stdout', new=StringIO()) as fake_out: # check for positive number of images
load_images(listdir('.'), '.', '.', '', max_images=5)
assert('negative' not in fake_out.getvalue())
with patch('sys.stdout', new=StringIO()) as fake_out: # check for not valid path
if not isdir(fake_path):
ret = load_images(listdir('.'), fake_path, fake_path, '', max_images=5)
print(fake_out)
assert('not a valid path' in fake_out.getvalue())
assert(ret == [])
@raises(AssertionError)
def test_strip_separators_in_the_end_error():
from ..utils import strip_separators_in_the_end
strip_separators_in_the_end(9)
def test_strip_separators_in_the_end():
from ..utils import strip_separators_in_the_end
name = 'fake1'
name1 = name + sep
assert(name == strip_separators_in_the_end(name1)) # one sep in the end
assert(name == strip_separators_in_the_end(name)) # no sep in the end
name1 += sep*3
assert(name == strip_separators_in_the_end(name1)) # several sep in the end
assert('' == strip_separators_in_the_end('')) # several sep in the end
|
11571388
|
from . import nodes
from .lexer import LlvmIrLexer
from .parser import LlvmIrParser
from .codegenerator import CodeGenerator
def llvm_to_ir(source):
""" Convert llvm assembly code into an IR-module """
llvm = LlvmIrFrontend()
ir_module = llvm.compile(source)
return ir_module
class LlvmIrFrontend:
def __init__(self):
context = nodes.Context()
self.lexer = LlvmIrLexer(context)
self.parser = LlvmIrParser(context)
self.codegen = CodeGenerator()
def compile(self, f):
src = f.read()
if f.name:
self.lexer.filename = f.name
tokens = self.lexer.tokenize(src, eof=True)
self.parser.init_lexer(tokens)
module = self.parser.parse_module()
return self.codegen.generate(module)
|
11571412
|
import asyncio
from dataclasses import asdict
from model.Price import Price
from services.fetch import fetch_json
from services.s3 import write_to_s3
from services.time import current_time
async def bitcoin_price(session, s3_client):
coingecko, gemini, blockchain = await asyncio.gather(
fetch_json(session, "https://api.coingecko.com/api/v3/exchange_rates"),
fetch_json(session, "https://api.gemini.com/v1/pubticker/btcusd"),
fetch_json(session, "https://api.blockchain.info/stats")
)
prices = [
Price(
source="coingecko",
price=coingecko["rates"]["usd"]["value"]
),
Price(
source="gemini",
price=float(gemini["last"])
),
Price(
source="blockchain",
price=blockchain["market_price_usd"]
)
]
prices_document = {
'last_updated': current_time(),
'prices': [asdict(price) for price in prices]
}
await write_to_s3(prices_document, 'bitcoin_price', client=s3_client)
|
11571416
|
import cv2
import torch.hub
import os
import model
from PIL import Image
from torchvision import transforms
from grad_cam import BackPropagation
import time
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import threading
import vlc
from os.path import dirname, join
from twilio.rest import Client
import urllib.request
import smbus
import json
class MMA7455():
bus = smbus.SMBus(1)
def __init__(self):
self.bus.write_byte_data(0x1D, 0x16, 0x55) # Setup the Mode
self.bus.write_byte_data(0x1D, 0x10, 0) # Calibrate
self.bus.write_byte_data(0x1D, 0x11, 0) # Calibrate
self.bus.write_byte_data(0x1D, 0x12, 0) # Calibrate
self.bus.write_byte_data(0x1D, 0x13, 0) # Calibrate
self.bus.write_byte_data(0x1D, 0x14, 0) # Calibrate
self.bus.write_byte_data(0x1D, 0x15, 0) # Calibrate
def getValueX(self):
return self.bus.read_byte_data(0x1D, 0x06)
def getValueY(self):
return self.bus.read_byte_data(0x1D, 0x07)
def getValueZ(self):
return self.bus.read_byte_data(0x1D, 0x08)
current_dir = dirname(__file__)
# Alarm sound file
file = 'alarm.mp3'
# Sound player start
p = vlc.MediaPlayer(current_dir+'/'+file)
timebasedrow= time.time()
timebasedis= time.time()
timerundrow= time.time()
timerundis= time.time()
face_cascade = cv2.CascadeClassifier(current_dir+'/haar_models/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(current_dir+'/haar_models/haarcascade_eye.xml')
MyModel="BlinkModel.t7"
shape = (24,24)
classes = [
'Close',
'Open',
]
eyess=[]
cface=0
sens=30
mma = MMA7455()
# Obtaining the X, Y and Z values.
xmem=mma.getValueX()
ymem=mma.getValueY()
zmem=mma.getValueZ()
x = mma.getValueX()
y = mma.getValueY()
z = mma.getValueZ()
# Creating the base accelerometer values.
if(xmem > 127):
xmem=xmem-255
if(ymem > 127):
ymem=ymem-255
if(zmem > 127):
zmem=zmem-255
if(x > 127):
x=x-255
if(y > 127):
y=y-255
if(z > 127):
z=z-255
def send():
# Your Account SID from twilio.com/console
account_sid = "XXXXXXXX<KEY>"
# Your Auth Token from twilio.com/console
auth_token = "<PASSWORD>"
client = Client(account_sid, auth_token)
phone = "+XXXXXXXXXXXX"
print('crash')
send_url = 'http://ip-api.com/json'
r = requests.get(send_url)
j = json.loads(r.text)
text="The Driver Crash Here: "
text+="http://maps.google.com/maps?q=loc:{},{}".format(j['lat'],j['lon'])
print(text)
message = client.messages.create(to=phone, from_="++XXXXXXXXXXXX",body=text)
print(message.sid)
time.sleep(10)
stop()
def preprocess(image_path):
global cface
transform_test = transforms.Compose([
transforms.ToTensor()
])
image = cv2.imread(image_path['path'])
faces = face_cascade.detectMultiScale(
image,
scaleFactor=1.1,
minNeighbors=5,
minSize=(1, 1),
flags=cv2.CASCADE_SCALE_IMAGE
)
if len(faces) == 0:
...
else:
cface=1
(x, y, w, h) = faces[0]
face = image[y:y + h, x:x + w]
cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,0),2)
roi_color = image[y:y+h, x:x+w]
"""
Depending on the quality of your camera, this number can vary
between 10 and 40, since this is the "sensitivity" to detect the eyes.
"""
sensi=20
eyes = eye_cascade.detectMultiScale(face,1.3, sensi)
i=0
for (ex,ey,ew,eh) in eyes:
(x, y, w, h) = eyes[i]
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
eye = face[y:y + h, x:x + w]
eye = cv2.resize(eye, shape)
eyess.append([transform_test(Image.fromarray(eye).convert('L')), eye, cv2.resize(face, (48,48))])
i=i+1
cv2.imwrite(current_dir+'/temp-images/display.jpg',image)
def eye_status(image, name, net):
img = torch.stack([image[name]])
bp = BackPropagation(model=net)
probs, ids = bp.forward(img)
actual_status = ids[:, 0]
prob = probs.data[:, 0]
if actual_status == 0:
prob = probs.data[:,1]
#print(name,classes[actual_status.data], probs.data[:,0] * 100)
return classes[actual_status.data]
def func(imag,modl):
drow(images=[{'path': imag, 'eye': (0,0,0,0)}],model_name=modl)
def drow(images, model_name):
global eyess
global cface
global timebasedrow
global timebasedis
global timerundrow
global timerundis
net = model.Model(num_classes=len(classes))
checkpoint = torch.load(os.path.join(current_dir+'/model', model_name), map_location=torch.device('cpu'))
net.load_state_dict(checkpoint['net'])
net.eval()
flag =1
status=""
for i, image in enumerate(images):
if(flag):
preprocess(image)
flag=0
if cface==0:
image = cv2.imread(current_dir+"/temp-images/display.jpg")
image = cv2.putText(image, 'No face Detected', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
cv2.imwrite(current_dir+'/temp-images/display.jpg',image)
timebasedrow= time.time()
timebasedis= time.time()
timerundrow= time.time()
timerundis= time.time()
elif(len(eyess)!=0):
eye, eye_raw , face = eyess[i]
image['eye'] = eye
image['raw'] = eye_raw
image['face'] = face
timebasedrow= time.time()
timerundrow= time.time()
for index, image in enumerate(images):
status = eye_status(image, 'eye', net)
if(status =="Close"):
timerundis= time.time()
if((timerundis-timebasedis)>1.5):
image = cv2.imread(current_dir+'/temp-images/display.jpg')
image = cv2.putText(image, 'Distracted', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
cv2.imwrite(current_dir+'/temp-images/display.jpg',image)
if(not(p.is_playing())):
p.play()
else:
p.stop()
else:
timerundrow= time.time()
if((timerundrow-timebasedrow)>3):
if(not(p.is_playing())):
p.play()
image = cv2.imread(current_dir+'/temp-images/display.jpg')
image = cv2.putText(image, 'Drowsy', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
cv2.imwrite(current_dir+'/temp-images/display.jpg',image)
def main():
while 1:
global eyess
global cface
eyess=[]
cface=0
ret, img = cap.read()
cv2.imwrite(current_dir+'/temp-images/img.jpg',img)
func(current_dir+'/temp-images/img.jpg',MyModel)
def readAccel():
while 1:
try:
x = mma.getValueX()
y = mma.getValueY()
z = mma.getValueZ()
if(x > 127):
x=x-255
if(y > 127):
y=y-255
if(z > 127):
z=z-255
# Send sms if crash
if(abs(xmem-x)>sens or abs(ymem-y)>sens or abs(zmem-z)>sens):
send()
print('Crash')
except:
...
cap = cv2.VideoCapture(0)
timebasedrow= time.time()
timebasedis= time.time()
timerundrow= time.time()
timerundis= time.time()
r = threading.Thread(target=readAccel, name='readAccel')
m = threading.Thread(target=main, name='main')
r.start()
m.start()
|
11571418
|
from service.appservice import AppService
if __name__ == '__main__':
service = AppService(new_toolchain=True)
|
11571454
|
import requests
from resources.lib.soundcloud.api_interface import ApiInterface
class ApiPublic(ApiInterface):
"""This class uses the official SoundCloud API."""
api_host = "https://api.soundcloud.com/"
def _do_request(self, path, payload):
return requests.get(self.api_host + path, params=payload).json()
def search(self, query, kind):
pass
def charts(self, filters):
pass
def call(self, url):
pass
def discover(self, selection):
pass
def resolve_id(self, id):
pass
def resolve_url(self, url):
pass
def resolve_media_url(self, url):
pass
|
11571473
|
import os.path
from typing import List
import pytest
from click.testing import CliRunner
from localstack.cli.lpm import cli, console
from localstack.services.install import CommunityInstallerRepository, Installer
@pytest.fixture
def runner():
return CliRunner()
@pytest.mark.skip_offline
def test_list(runner, monkeypatch):
monkeypatch.setattr(console, "no_color", True)
result = runner.invoke(cli, ["list"])
assert result.exit_code == 0
assert "elasticmq/community" in result.output
@pytest.mark.skip_offline
def test_install_with_non_existing_package_fails(runner):
result = runner.invoke(cli, ["install", "elasticmq", "funny"])
assert result.exit_code == 1
assert "unable to locate installer for package funny" in result.output
@pytest.mark.skip_offline
def test_install_failure_returns_non_zero_exit_code(runner, monkeypatch):
def failing_installer():
raise Exception("failing installer")
def successful_installer():
pass
def patched_get_installer(self) -> List[Installer]:
return [
("failing-installer", failing_installer),
("successful-installer", successful_installer),
]
monkeypatch.setattr(CommunityInstallerRepository, "get_installer", patched_get_installer)
result = runner.invoke(cli, ["install", "successful-installer", "failing-installer"])
assert result.exit_code == 1
assert "one or more package installations failed." in result.output
@pytest.mark.skip_offline
def test_install_with_package(runner):
from localstack.services.install import INSTALL_PATH_ELASTICMQ_JAR
result = runner.invoke(cli, ["install", "elasticmq"])
assert result.exit_code == 0
assert os.path.exists(INSTALL_PATH_ELASTICMQ_JAR)
|
11571530
|
import torch
class AffineLandmarkTransformation(torch.nn.Module):
"""
Layer to apply a given transformation matrix to a set of landmarks
"""
def __init__(self):
super().__init__()
def forward(self, lmk_tensor, affine_tensor, inverse=False):
"""
the actual transformation
Parameters
----------
lmk_tensor : :class:`torch.Tensor`
the landmarks to transform (of shape N x Num_landmarks x 2)
affine_tensor : :class:`torch.Tensor`
the transformation to apply (of shape N x 6)
inverse : bool, optional
whether to apply the given transformation or it's inverse
(the default is False)
Returns
-------
:class:`torch.Tensor`
the transformed landmarks
"""
A = torch.zeros((affine_tensor.size(0), 2, 2),
device=affine_tensor.device)
A[:, 0] = affine_tensor[:, :2].clone()
A[:, 1] = affine_tensor[:, 2:4].clone()
t = affine_tensor[:, 4:6].clone()
if inverse:
A = A.inverse()
t = torch.bmm(
(-t).view(affine_tensor.size(0), -1, 2), A.permute(0, 2, 1))
t = t.squeeze(1)
output = torch.bmm(lmk_tensor.view(affine_tensor.size(0), -1, 2), A)
t = t.unsqueeze(1)
output = output + t
output = output.view(affine_tensor.size(0), -1, 2)
return output
|
11571564
|
from .bbox_head import BBoxHead
from .convfc_bbox_head import ConvFCBBoxHead, SharedFCBBoxHead
from .centripetal_mask import Centripetal_mask
__all__ = [
'BBoxHead', 'ConvFCBBoxHead', 'SharedFCBBoxHead', 'Centripetal_mask'
]
|
11571572
|
import random as r
#function for otp generation
def otpgen():
otp=""
for i in range(5):
otp+=str(r.randint(1,9))
print("Your One Time Password is ")
print(otp)
otpgen()
|
11571586
|
from django import template
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import AdminSite
from django.urls import resolve, reverse
register = template.Library()
def get_semantic_sidebar(app_list, current_app):
semantic_sidebar = getattr(settings, "SEMANTIC_SIDEBAR", None)
if semantic_sidebar:
ordered = []
for app_label in semantic_sidebar:
for app in app_list:
is_current = app["app_label"] == current_app
app["is_current"] = is_current
if app_label == app["app_label"]:
ordered.append(app)
app_list = ordered
return app_list
def get_app_label(resolver_match):
if "app_label" in resolver_match.kwargs:
return resolver_match.kwargs.get("app_label")
else:
# Reconstruct from url_name.
url_name = resolver_match.url_name
# Exclude model and action.
parts = url_name.split("_")[:-2]
# Return parts.
return "_".join(parts)
@register.simple_tag(takes_context=True)
def get_app_list(context):
request = context["request"]
resolver_match = resolve(request.path_info)
admin_name = resolver_match.namespace
current_app = get_app_label(resolver_match)
admin_site = get_admin_site(admin_name)
app_list = admin_site.get_app_list(request)
return get_semantic_sidebar(app_list, current_app)
def get_admin_site(current_app):
try:
resolver_match = resolve(reverse("%s:index" % current_app))
for func_closure in resolver_match.func.func_closure:
if isinstance(func_closure.cell_contents, AdminSite):
return func_closure.cell_contents
except Exception:
pass
return admin.site
def get_admin_url(request, admin_site):
try:
url = "{}:index".format(admin_site)
url = reverse(url)
except Exception:
pass
else:
return url
@register.simple_tag(takes_context=True)
def admin_apps(context):
return get_app_list(context)
|
11571598
|
from .best_config import BEST_CONFIGS
from .dgl_graph import *
from .utils import *
from .evaluator import *
from .logger import Logger
|
11571609
|
import numpy as np
import scipy.io as sio
import argparse
from camera import Camera
from plotting import *
# A very simple, but useful method to take the difference between the
# first and second element (usually for 2D vectors)
def diff(x):
return x[1] - x[0]
'''
FORM_INITIAL_VOXELS create a basic grid of voxels ready for carving
Arguments:
xlim - The limits of the x dimension given as [xmin xmax]
ylim - The limits of the y dimension given as [ymin ymax]
zlim - The limits of the z dimension given as [zmin zmax]
num_voxels - The approximate number of voxels we desire in our grid
Returns:
voxels - An ndarray of size (N, 3) where N is approximately equal the
num_voxels of voxel locations.
voxel_size - The distance between the locations of adjacent voxels
(a voxel is a cube)
Our initial voxels will create a rectangular prism defined by the x,y,z
limits. Each voxel will be a cube, so you'll have to compute the
approximate side-length (voxel_size) of these cubes, as well as how many
cubes you need to place in each dimension to get around the desired
number of voxel. This can be accomplished by first finding the total volume of
the voxel grid and dividing by the number of desired voxels. This will give an
approximate volume for each cubic voxel, which you can then use to find the
side-length. The final "voxels" output should be a ndarray where every row is
the location of a voxel in 3D space.
'''
def form_initial_voxels(xlim, ylim, zlim, num_voxels):
# TODO: Implement this method!
x_dim = xlim[-1] - xlim[0]
y_dim = ylim[-1] - ylim[0]
z_dim = zlim[-1] - zlim[0]
total_volume = x_dim * y_dim * z_dim
voxel_volume = float(total_volume / num_voxels)
voxel_size = np.cbrt(voxel_volume)
x_voxel_num = np.round(x_dim / voxel_size)
y_voxel_num = np.round(y_dim / voxel_size)
z_voxel_num = np.round(z_dim / voxel_size)
x_coor = np.linspace(xlim[0]+0.5*voxel_size, xlim[0]+(0.5+x_voxel_num-1)*voxel_size, x_voxel_num)
y_coor = np.linspace(ylim[0]+0.5*voxel_size, ylim[0]+(0.5+y_voxel_num-1)*voxel_size, y_voxel_num)
z_coor = np.linspace(zlim[0]+0.5*voxel_size, zlim[0]+(0.5+z_voxel_num-1)*voxel_size, z_voxel_num)
XX, YY, ZZ = np.meshgrid(x_coor, y_coor, z_coor)
voxels = np.vstack((XX.reshape(-1), YY.reshape(-1), ZZ.reshape(-1))).reshape(3, -1).T
return voxels, voxel_size
'''
GET_VOXEL_BOUNDS: Gives a nice bounding box in which the object will be carved
from. We feed these x/y/z limits into the construction of the inital voxel
cuboid.
Arguments:
cameras - The given data, which stores all the information
associated with each camera (P, image, silhouettes, etc.)
estimate_better_bounds - a flag that simply tells us whether to set tighter
bounds. We can carve based on the silhouette we use.
num_voxels - If estimating a better bound, the number of voxels needed for
a quick carving.
Returns:
xlim - The limits of the x dimension given as [xmin xmax]
ylim - The limits of the y dimension given as [ymin ymax]
zlim - The limits of the z dimension given as [zmin zmax]
The current method is to simply use the camera locations as the bounds. In the
section underneath the TODO, please implement a method to find tigther bounds:
One such approach would be to do a quick carving of the object on a grid with
very few voxels. From this coarse carving, we can determine tighter bounds. Of
course, these bounds may be too strict, so we should have a buffer of one
voxel_size around the carved object.
'''
def get_voxel_bounds(cameras, estimate_better_bounds = False, num_voxels = 4000):
camera_positions = np.vstack([c.T for c in cameras])
xlim = [camera_positions[:,0].min(), camera_positions[:,0].max()]
ylim = [camera_positions[:,1].min(), camera_positions[:,1].max()]
zlim = [camera_positions[:,2].min(), camera_positions[:,2].max()]
# For the zlim we need to see where each camera is looking.
camera_range = 0.6 * np.sqrt(diff( xlim )**2 + diff( ylim )**2)
for c in cameras:
viewpoint = c.T - camera_range * c.get_camera_direction()
zlim[0] = min( zlim[0], viewpoint[2] )
zlim[1] = max( zlim[1], viewpoint[2] )
# Move the limits in a bit since the object must be inside the circle
xlim = xlim + diff(xlim) / 4 * np.array([1, -1])
ylim = ylim + diff(ylim) / 4 * np.array([1, -1])
if estimate_better_bounds:
# TODO: Implement this method!
voxels, voxel_size = form_initial_voxels(xlim, ylim, zlim, num_voxels)
for c in cameras:
voxels = carve(voxels, c)
xlim = [voxels[0][0]-1.5*voxel_size, voxels[0][0]+1.5*voxel_size]
ylim = [voxels[0][1]-1.5*voxel_size, voxels[0][1]+1.5*voxel_size]
zlim = [voxels[0][2]-1.5*voxel_size, voxels[0][2]+1.5*voxel_size]
return xlim, ylim, zlim
'''
CARVE: carves away voxels that are not inside the silhouette contained in
the view of the camera. The resulting voxel array is returned.
Arguments:
voxels - an Nx3 matrix where each row is the location of a cubic voxel
camera - The camera we are using to carve the voxels with. Useful data
stored in here are the "silhouette" matrix, "image", and the
projection matrix "P".
Returns:
voxels - a subset of the argument passed that are inside the silhouette
'''
def carve(voxels, camera):
# TODO: Implement this method!
# find all corresponding image points of voxels
homo_voxels = np.hstack((voxels, np.ones((voxels.shape[0], 1)))).T
# keep track of voxels index
N = voxels.shape[0]
voxel_index = np.arange(0, N)
# project from 3D to 2D, projection matrix: (3, 4)
P = camera.P
img_voxels = P.dot(homo_voxels)
# normalize
img_voxels /= img_voxels[2, :]
# drop out z
img_voxels = img_voxels[0:2, :].T
# check whether the voxel points are in range of image
img_y_max, img_x_max = camera.silhouette.shape
img_y_min = 0; img_x_min = 0
voxelX = img_voxels[:, 0]
x_range_filter = np.all([voxelX > img_x_min, voxelX < img_x_max], axis=0)
img_voxels = img_voxels[x_range_filter, :]
voxel_index = voxel_index[x_range_filter]
voxelY = img_voxels[:, 1]
y_range_filter = np.all([voxelY > img_y_min, voxelY < img_y_max], axis=0)
img_voxels = img_voxels[y_range_filter, :]
voxel_index = voxel_index[y_range_filter]
# check whether the point is in the silhouette
img_voxels = img_voxels.astype(int)
silhouette_filter = (camera.silhouette[img_voxels[:, 1], img_voxels[:, 0]] == 1)
voxel_index = voxel_index[silhouette_filter]
return voxels[voxel_index, :]
'''
ESTIMATE_SILHOUETTE: Uses a very naive and color-specific heuristic to generate
the silhouette of an object
Arguments:
im - The image containing a known object. An ndarray of size (H, W, C).
Returns:
silhouette - An ndarray of size (H, W), where each pixel location is 0 or 1.
If the (i,j) value is 0, then that pixel location in the original image
does not correspond to the object. If the (i,j) value is 1, then that
that pixel location in the original image does correspond to the object.
'''
def estimate_silhouette(im):
return np.logical_and(im[:,:,0] > im[:,:,2], im[:,:,0] > im[:,:,1] )
if __name__ == '__main__':
estimate_better_bounds = True
use_true_silhouette = True
frames = sio.loadmat('frames.mat')['frames'][0]
cameras = [Camera(x) for x in frames]
# Generate the silhouettes based on a color heuristic
if not use_true_silhouette:
for i, c in enumerate(cameras):
c.true_silhouette = c.silhouette
c.silhouette = estimate_silhouette(c.image)
if i == 0:
plt.figure()
plt.subplot(121)
plt.imshow(c.true_silhouette, cmap = 'gray')
plt.title('True Silhouette')
plt.subplot(122)
plt.imshow(c.silhouette, cmap = 'gray')
plt.title('Estimated Silhouette')
plt.show()
# Generate the voxel grid
# You can reduce the number of voxels for faster debugging, but
# make sure you use the full amount for your final solution
num_voxels = 6e6
xlim, ylim, zlim = get_voxel_bounds(cameras, estimate_better_bounds)
# This part is simply to test forming the initial voxel grid
voxels, voxel_size = form_initial_voxels(xlim, ylim, zlim, 4000)
plot_surface(voxels)
voxels, voxel_size = form_initial_voxels(xlim, ylim, zlim, num_voxels)
# Test the initial carving
voxels = carve(voxels, cameras[0])
if use_true_silhouette:
plot_surface(voxels)
# Result after all carvings
for c in cameras:
voxels = carve(voxels, c)
plot_surface(voxels, voxel_size)
|
11571620
|
import itertools
import socket
import unittest
from stompest._backwards import nextMethod
from stompest.error import StompConnectTimeout
from stompest.protocol import StompFailoverUri, StompFailoverTransport
from stompest.tests import mock
class StompFailoverUriTest(unittest.TestCase):
def test_configuration(self):
uri = 'tcp://localhost:61613'
configuration = StompFailoverUri(uri)
self.assertEqual(configuration.brokers, [{'host': 'localhost', 'protocol': 'tcp', 'port': 61613}])
self.assertEqual(configuration.options, {'priorityBackup': False, 'initialReconnectDelay': 10, 'reconnectDelayJitter': 0, 'maxReconnectDelay': 30000, 'backOffMultiplier': 2.0, 'startupMaxReconnectAttempts': 0, 'maxReconnectAttempts':-1, 'useExponentialBackOff': True, 'randomize': True})
uri = 'tcp://123.456.789.0:61616?randomize=true,maxReconnectAttempts=-1,priorityBackup=true'
configuration = StompFailoverUri(uri)
self.assertTrue(configuration.options['randomize'])
self.assertEqual(configuration.options['priorityBackup'], True)
self.assertEqual(configuration.options['maxReconnectAttempts'], -1)
self.assertEqual(configuration.brokers, [{'host': '123.456.789.0', 'protocol': 'tcp', 'port': 61616}])
uri = 'failover:(tcp://primary:61616,tcp://secondary:61616)?randomize=false,maxReconnectAttempts=2,backOffMultiplier=3.0'
configuration = StompFailoverUri(uri)
self.assertEqual(configuration.uri, uri)
self.assertFalse(configuration.options['randomize'])
self.assertEqual(configuration.options['backOffMultiplier'], 3.0)
self.assertEqual(configuration.options['maxReconnectAttempts'], 2)
self.assertEqual(configuration.brokers, [
{'host': 'primary', 'protocol': 'tcp', 'port': 61616},
{'host': 'secondary', 'protocol': 'tcp', 'port': 61616}
])
def test_configuration_invalid_uris(self):
for uri in [
'tcp://:61613', 'tcp://61613', 'tcp:localhost:61613', 'tcp:/localhost',
'tcp://localhost:', 'tcp://localhost:a', 'tcp://localhost:61613?randomize=1', 'tcp://localhost:61613?randomize=True',
'tcp://localhost:61613??=False', 'tcp://localhost:61613?a=False', 'tcp://localhost:61613?maxReconnectDelay=False'
'failover:(tcp://primary:61616, tcp://secondary:61616)', 'failover:tcp://primary:61616, tcp://secondary:61616',
'failover:tcp://primary:61616,tcp://secondary:61616)', 'failover:(tcp://primary:61616,tcp://secondary:61616',
]:
self.assertRaises(ValueError, lambda: StompFailoverUri(uri))
class StompFailoverTest(unittest.TestCase):
def test_time_scales_and_reconnect_attempts(self):
uri = 'failover:tcp://remote1:61615,tcp://localhost:61616,tcp://remote2:61617?randomize=false,startupMaxReconnectAttempts=3,initialReconnectDelay=7,backOffMultiplier=3.0,maxReconnectAttempts=1'
protocol = StompFailoverTransport(uri)
expectedDelaysAndBrokers = [
(0, {'host': 'remote1', 'protocol': 'tcp', 'port': 61615}),
(0.007, {'host': 'localhost', 'protocol': 'tcp', 'port': 61616}),
(0.021, {'host': 'remote2', 'protocol': 'tcp', 'port': 61617}),
(0.063, {'host': 'remote1', 'protocol': 'tcp', 'port': 61615})
]
self._test_failover(iter(protocol), expectedDelaysAndBrokers)
expectedDelaysAndBrokers = [
(0, {'host': 'remote1', 'protocol': 'tcp', 'port': 61615}),
(0.007, {'host': 'localhost', 'protocol': 'tcp', 'port': 61616})
]
self._test_failover(iter(protocol), expectedDelaysAndBrokers)
uri = 'failover:(tcp://remote1:61615,tcp://localhost:61616)?randomize=false,startupMaxReconnectAttempts=3,initialReconnectDelay=7,maxReconnectDelay=8,maxReconnectAttempts=0'
protocol = StompFailoverTransport(uri)
expectedDelaysAndBrokers = [
(0, {'host': 'remote1', 'protocol': 'tcp', 'port': 61615}),
(0.007, {'host': 'localhost', 'protocol': 'tcp', 'port': 61616}),
(0.008, {'host': 'remote1', 'protocol': 'tcp', 'port': 61615}),
(0.008, {'host': 'localhost', 'protocol': 'tcp', 'port': 61616})
]
self._test_failover(iter(protocol), expectedDelaysAndBrokers)
expectedDelaysAndBrokers = [
(0, {'host': 'remote1', 'protocol': 'tcp', 'port': 61615})
]
self._test_failover(iter(protocol), expectedDelaysAndBrokers)
uri = 'failover:(tcp://remote1:61615,tcp://localhost:61616)?randomize=false,startupMaxReconnectAttempts=2,initialReconnectDelay=3,useExponentialBackOff=false'
protocol = StompFailoverTransport(uri)
expectedDelaysAndBrokers = [
(0, {'host': 'remote1', 'protocol': 'tcp', 'port': 61615}),
(0.003, {'host': 'localhost', 'protocol': 'tcp', 'port': 61616}),
(0.003, {'host': 'remote1', 'protocol': 'tcp', 'port': 61615})
]
self._test_failover(iter(protocol), expectedDelaysAndBrokers)
def test_priority_backup(self):
uri = 'failover:tcp://remote1:61616,tcp://localhost:61616,tcp://127.0.0.1:61615,tcp://remote2:61616?startupMaxReconnectAttempts=3,priorityBackup=true,randomize=false'
protocol = StompFailoverTransport(uri)
self._test_failover(iter(protocol), [
(0, {'host': 'localhost', 'protocol': 'tcp', 'port': 61616}),
(0.01, {'host': '127.0.0.1', 'protocol': 'tcp', 'port': 61615}),
(0.02, {'host': 'remote1', 'protocol': 'tcp', 'port': 61616}),
(0.04, {'host': 'remote2', 'protocol': 'tcp', 'port': 61616})
])
@mock.patch('socket.gethostbyname')
def test_priority_backup_localhost_lookup(self, mock_gethostbyname):
local_ip = '1.2.3.4'
uri = 'failover:tcp://remote1:61616,tcp://localhost:61616,tcp://127.0.0.1:61615,tcp://%s:61616?startupMaxReconnectAttempts=3,priorityBackup=true,randomize=false' % local_ip
protocol = StompFailoverTransport(uri)
mock_gethostbyname.side_effect = lambda *_args, **_kwargs: local_ip
self._test_failover(iter(protocol), [
(0, {'host': 'localhost', 'protocol': 'tcp', 'port': 61616}),
(0.01, {'host': '127.0.0.1', 'protocol': 'tcp', 'port': 61615}),
(0.02, {'host': local_ip, 'protocol': 'tcp', 'port': 61616}),
(0.04, {'host': 'remote1', 'protocol': 'tcp', 'port': 61616}),
])
@mock.patch('socket.gethostbyname')
def test_priority_backup_broken_localhost_lookup(self, mock_gethostbyname):
local_ip = '1.2.3.4'
uri = 'failover:tcp://remote1:61616,tcp://localhost:61616,tcp://127.0.0.1:61615,tcp://%s:61616?startupMaxReconnectAttempts=3,priorityBackup=true,randomize=false' % local_ip
protocol = StompFailoverTransport(uri)
def _broken_gethostbyname(host):
raise socket.gaierror()
mock_gethostbyname.side_effect = _broken_gethostbyname
self._test_failover(iter(protocol), [
(0, {'host': 'localhost', 'protocol': 'tcp', 'port': 61616}),
(0.01, {'host': '127.0.0.1', 'protocol': 'tcp', 'port': 61615}),
(0.02, {'host': 'remote1', 'protocol': 'tcp', 'port': 61616}),
(0.04, {'host': local_ip, 'protocol': 'tcp', 'port': 61616}),
])
def test_randomize(self):
uri = 'failover:tcp://remote1:61616,tcp://localhost:61616,tcp://127.0.0.1:61615,tcp://remote2:61616?priorityBackup=true,randomize=true,startupMaxReconnectAttempts=3'
protocol = StompFailoverTransport(uri)
localShuffled = remoteShuffled = 0
localHosts = ['localhost', '127.0.0.1']
remoteHosts = ['remote1', 'remote2']
while (localShuffled * remoteShuffled) == 0:
protocol = StompFailoverTransport(uri)
hosts = [broker['host'] for (broker, _) in itertools.islice(protocol, 4)]
self.assertEqual(set(hosts[:2]), set(localHosts))
if (hosts[:2] != localHosts):
localShuffled += 1
self.assertEqual(set(hosts[2:]), set(remoteHosts))
if (hosts[2:] != remoteHosts):
remoteShuffled += 1
def test_jitter(self):
uri = 'failover:tcp://remote1:61616?useExponentialBackOff=false,startupMaxReconnectAttempts=1,reconnectDelayJitter=4'
for j in itertools.count():
protocol = iter(StompFailoverTransport(uri))
nextProtocol = nextMethod(protocol)
nextProtocol()
_, delay = nextProtocol()
self.assertTrue(abs(delay - 0.01) < 0.004)
if (j > 10) and (abs(delay - 0.01) > 0.003):
break
def _test_failover(self, brokersAndDelays, expectedDelaysAndBrokers):
for (expectedDelay, expectedBroker) in expectedDelaysAndBrokers:
nextBrokerAndDelay = nextMethod(brokersAndDelays)
broker, delay = nextBrokerAndDelay()
self.assertEqual(delay, expectedDelay)
self.assertEqual(broker, expectedBroker)
self.assertRaises(StompConnectTimeout, nextBrokerAndDelay)
if __name__ == '__main__':
unittest.main()
|
11571621
|
from unittest import TestCase
from idewavecore.errors.storage import OverwriteFrozenFieldError
from idewavecore.session import Storage, ItemFlag
TEST_KEY = 'key'
TEST_VALUE = 'value'
TEST_ANOTHER_VALUE = 'another value'
TEST_NON_EXISTENT_KEY = 'this key not exists'
class TestStorage(TestCase):
def setUp(self) -> None:
self.storage = Storage()
def test_set_and_get_item(self):
self.storage.set_items([
{
TEST_KEY: {
'value': TEST_VALUE
}
}
])
self.assertEqual(self.storage.get_value(TEST_KEY), TEST_VALUE)
def test_clean_temporary_fields(self):
self.storage.set_items([
{
TEST_KEY: {
'value': TEST_VALUE
}
}
])
self.storage.clean_temporary_fields()
self.assertIsNone(self.storage.get_value(TEST_KEY))
def test_not_clean_persistent_fields(self):
self.storage.set_items([
{
TEST_KEY: {
'value': TEST_VALUE,
'flags': ItemFlag.PERSISTENT
}
}
])
self.storage.clean_temporary_fields()
self.assertIsNotNone(self.storage.get_value(TEST_KEY))
self.assertEqual(self.storage.get_value(TEST_KEY), TEST_VALUE)
def test_cannot_change_frozen_field(self):
self.storage.set_items([
{
TEST_KEY: {
'value': TEST_VALUE,
'flags': ItemFlag.FROZEN
}
}
])
with self.assertRaises(OverwriteFrozenFieldError):
self.storage._set_item(TEST_KEY, TEST_ANOTHER_VALUE)
self.assertEqual(self.storage.get_value(TEST_KEY), TEST_VALUE)
def test_get_non_existent_value(self):
self.assertIsNone(self.storage.get_value(TEST_NON_EXISTENT_KEY))
|
11571646
|
import numpy as np
import scipy.sparse
from utils import SGD_regression_test_error
class RandomBinning(object):
def __init__(self, D, lifetime, M):
""" Sets up a random binning object for the isotropic Laplacian kernel in D dimensions.
A random binning object is a 3-tuple (widths, shifts, keys) where
- widths is a list of D reals, specifying bin widths in each input dimension
- shifts is a list of D reals, specifying bin shifts
- keys is a dictionary int -> int giving sequential numbers to non-empty bins
"""
self.widths = [np.array([np.random.gamma(shape=2, scale=1.0 / lifetime) for _ in range(D)]) for _ in range (M)]
self.shifts = [np.array([np.random.uniform(low=0.0, high=width) for width in widths]) for widths in self.widths]
self.keys = {}
self.C = 0
self.M = M
self.D = D
def get_features(self, X, M=None, expand=True):
""" Returns unnormalized Random binning features for the provided datapoints X (one datapoint in each row).
:param X: Matrix of dimensions NxD, containing N datapoints (one in each row).
:param expand: Specifies whether new features should be created if a datapoint lies in a bin
that has been empty so far. (True for training, False for testing.)
:return: Sparse binary matrix of dimensions NxC, where C is the number of generated features.
Each row is the feature expansion of one datapoint and contains at most M ones.
"""
N = np.shape(X)[0]
if M is None:
M = self.M
assert M <= self.M
# stacking experiment
X_stack = np.tile(X, self.M)
shifts_stack = np.concatenate(self.shifts)
widths_stack = np.concatenate(self.widths)
X_coordinates = np.ceil((X_stack - shifts_stack) / widths_stack).astype(int)
# compute indices
row_indices = []
col_indices = []
X_coordinates.flags.writeable = False
feature_from_repetition = []
for m in range(M):
X_coords = X_coordinates[:, (self.D*m):(self.D*(m+1))]
X_coords.flags.writeable = False
for n, coordinates in enumerate(X_coords):
coordinates.flags.writeable = False
#h = hash(coordinates.data)
h = tuple(coordinates.tolist())
if (m, h) in self.keys:
row_indices.append(n)
col_indices.append(self.keys[(m, h)])
elif expand:
row_indices.append(n)
col_indices.append(self.C)
self.keys[(m, h)] = self.C
feature_from_repetition.append(m)
self.C += 1
# construct features
values = [1]*len(row_indices)
Z = scipy.sparse.coo_matrix((values, (row_indices, col_indices)), shape=(N, self.C))
return Z.tocsr(), np.array(feature_from_repetition)
def random_binning_features(X, lifetime, R_max):
D = X.shape[1]
rb = RandomBinning(D, lifetime, R_max)
return rb.get_features(X)
def evaluate_random_binning(X, y, X_test, y_test, M, lifetime, delta):
# construct random binning features
rb = RandomBinning(X.shape[1], lifetime, M)
Z, _ = rb.get_features(X) / np.sqrt(M)
Z_test, _ = rb.get_features(X_test, expand=False) / np.sqrt(M)
# solve primal problem using SGD
SGD_epochs = 10
error_test = SGD_regression_test_error(Z, y, Z_test, y_test, delta, SGD_epochs)
print 'RB lg_lifetime = %.2f; C = %d; error_test = %.2f%%' \
% (np.log2(lifetime), np.shape(Z)[1], error_test)
return error_test
|
11571659
|
from __future__ import annotations
import typing
from typing_extensions import TypedDict
from . import block_crud
from . import block_normalize
from ctc import spec
if typing.TYPE_CHECKING:
class BlockGasStats(TypedDict):
base_fee: int | float | None
min_gas_price: int | float | None
median_gas_price: int | float | None
mean_gas_price: float | None
max_gas_price: int | float | None
gas_used: int
gas_limit: int
n_transactions: int
class BlocksGasStats(TypedDict):
min_base_fee: int | float | None
median_base_fee: int | float | None
mean_base_fee: int | float | None
max_base_fee: int | float | None
min_gas_price: int | float | None
min_median_gas_price: int | float | None
min_mean_gas_price: int | float | None
min_max_gas_price: int | float | None
median_min_gas_price: int | float | None
median_median_gas_price: int | float | None
median_mean_gas_price: int | float | None
median_max_gas_price: int | float | None
mean_min_gas_price: int | float | None
mean_median_gas_price: int | float | None
mean_gas_price: int | float | None
mean_max_gas_price: int | float | None
max_min_gas_price: int | float | None
max_median_gas_price: int | float | None
max_mean_gas_price: int | float | None
max_gas_price: int | float | None
min_gas_used: int | float | None
median_gas_used: int | float | None
mean_gas_used: int | float | None
max_gas_used: int | float | None
min_gas_limit: int | float | None
median_gas_limit: int | float | None
mean_gas_limit: int | float | None
max_gas_limit: int | float | None
min_n_transactions: int | float | None
median_n_transactions: int | float | None
mean_n_transactions: int | float | None
max_n_transactions: int | float | None
n_blocks: int
async def async_get_block_gas_stats(
block: spec.BlockNumberReference | spec.Block,
normalize: bool = True,
provider: spec.ProviderSpec = None,
) -> BlockGasStats:
"""get gas statistics for a given block"""
if isinstance(block, dict):
block_data = block
else:
block_data = await block_crud.async_get_block(
block, include_full_transactions=True, provider=provider
)
return get_block_gas_stats(block_data, normalize=normalize)
def get_block_gas_stats(
block: spec.Block,
normalize: bool = True,
) -> BlockGasStats:
import numpy as np
base_fee: int | float | None = block.get('base_fee_per_gas')
if len(block['transactions']) > 0:
if isinstance(block['transactions'][0], str):
raise Exception(
'transaction data not in block, use include_full_transactions=True when retrieving block'
)
gas_prices: list[int | float] = [
typing.cast(spec.Transaction, transaction)['gas_price']
for transaction in block['transactions']
]
if normalize:
gas_prices = [gas_price / 1e9 for gas_price in gas_prices]
if base_fee is not None:
base_fee = base_fee / 1e9
min_gas_price = min(gas_prices)
median_gas_price = float(np.median(gas_prices))
mean_gas_price = sum(gas_prices) / len(gas_prices)
max_gas_price = max(gas_prices)
else:
min_gas_price = None
median_gas_price = None
mean_gas_price = None
max_gas_price = None
return {
'base_fee': base_fee,
'min_gas_price': min_gas_price,
'median_gas_price': median_gas_price,
'mean_gas_price': mean_gas_price,
'max_gas_price': max_gas_price,
'gas_used': block['gas_used'],
'gas_limit': block['gas_limit'],
'n_transactions': len(block['transactions']),
}
async def async_get_gas_stats_by_block(
blocks: typing.Sequence[spec.BlockNumberReference | spec.Block],
normalize: bool = True,
provider: spec.ProviderSpec = None,
) -> list[BlockGasStats]:
import asyncio
coroutines = [
async_get_block_gas_stats(
block=block,
normalize=normalize,
provider=provider,
)
for block in blocks
]
return await asyncio.gather(*coroutines)
async def async_get_blocks_gas_stats(
blocks: typing.Sequence[spec.BlockNumberReference] | None = None,
start_block: spec.BlockNumberReference | None = None,
end_block: spec.BlockNumberReference | None = None,
normalize: bool = True,
provider: spec.ProviderSpec = None,
) -> BlocksGasStats:
"""get gas statistics aggregated over multiple blocks"""
if blocks is None:
if start_block is None or end_block is None:
raise Exception(
'must specify blocks or {start_block and end_block}'
)
start_block = await block_normalize.async_block_number_to_int(
block=start_block, provider=provider
)
end_block = await block_normalize.async_block_number_to_int(
block=end_block, provider=provider
)
blocks = list(range(start_block, end_block + 1))
blocks_gas_stats = await async_get_gas_stats_by_block(
blocks=blocks,
normalize=normalize,
provider=provider,
)
return aggregate_blocks_gas_stats(blocks_gas_stats=blocks_gas_stats)
def _mmmm(
items: typing.Sequence[int | float | None],
) -> list[int | float | None]:
import numpy as np
non_none = [item for item in items if item is not None]
median = np.median(non_none)
if type(non_none[0]) is int:
typed_median: int | float = int(median)
else:
typed_median = float(median)
if len(items) == 0:
return [None, None, None, None]
else:
return [
min(non_none),
typed_median,
np.mean(non_none),
max(non_none),
]
def aggregate_blocks_gas_stats(
blocks_gas_stats: typing.Sequence[BlockGasStats],
) -> BlocksGasStats:
base_fees = [stats['base_fee'] for stats in blocks_gas_stats]
min_gas_prices = [stats['min_gas_price'] for stats in blocks_gas_stats]
median_gas_prices = [
stats['median_gas_price'] for stats in blocks_gas_stats
]
mean_gas_prices = [stats['mean_gas_price'] for stats in blocks_gas_stats]
max_gas_prices = [stats['max_gas_price'] for stats in blocks_gas_stats]
gas_useds = [stats['gas_used'] for stats in blocks_gas_stats]
gas_limits = [stats['gas_limit'] for stats in blocks_gas_stats]
n_transactionss = [stats['n_transactions'] for stats in blocks_gas_stats]
min_base_fee, median_base_fee, mean_base_fee, max_base_fee = _mmmm(
base_fees
)
(
min_gas_price,
median_min_gas_price,
mean_min_gas_price,
max_min_gas_price,
) = _mmmm(min_gas_prices)
(
min_median_gas_price,
median_median_gas_price,
mean_median_gas_price,
max_median_gas_price,
) = _mmmm(median_gas_prices)
(
min_mean_gas_price,
median_mean_gas_price,
mean_gas_price,
max_mean_gas_price,
) = _mmmm(mean_gas_prices)
(
min_max_gas_price,
median_max_gas_price,
mean_max_gas_price,
max_gas_price,
) = _mmmm(max_gas_prices)
(
min_gas_used,
median_gas_used,
mean_gas_used,
max_gas_used,
) = _mmmm(gas_useds)
(
min_gas_limit,
median_gas_limit,
mean_gas_limit,
max_gas_limit,
) = _mmmm(gas_limits)
(
min_n_transactions,
median_n_transactions,
mean_n_transactions,
max_n_transactions,
) = _mmmm(n_transactionss)
return {
'min_base_fee': min_base_fee,
'median_base_fee': median_base_fee,
'mean_base_fee': mean_base_fee,
'max_base_fee': max_base_fee,
#
'min_gas_price': min_gas_price,
'min_median_gas_price': min_median_gas_price,
'min_mean_gas_price': min_mean_gas_price,
'min_max_gas_price': min_max_gas_price,
#
'median_min_gas_price': median_min_gas_price,
'median_median_gas_price': median_median_gas_price,
'median_mean_gas_price': median_mean_gas_price,
'median_max_gas_price': median_max_gas_price,
#
'mean_min_gas_price': mean_min_gas_price,
'mean_median_gas_price': mean_median_gas_price,
'mean_gas_price': mean_gas_price,
'mean_max_gas_price': mean_max_gas_price,
#
'max_min_gas_price': max_min_gas_price,
'max_median_gas_price': max_median_gas_price,
'max_mean_gas_price': max_mean_gas_price,
'max_gas_price': max_gas_price,
#
'min_gas_used': min_gas_used,
'median_gas_used': median_gas_used,
'mean_gas_used': mean_gas_used,
'max_gas_used': max_gas_used,
#
'min_gas_limit': min_gas_limit,
'median_gas_limit': median_gas_limit,
'mean_gas_limit': mean_gas_limit,
'max_gas_limit': max_gas_limit,
#
'min_n_transactions': min_n_transactions,
'median_n_transactions': median_n_transactions,
'mean_n_transactions': mean_n_transactions,
'max_n_transactions': max_n_transactions,
#
'n_blocks': len(blocks_gas_stats),
}
|
11571676
|
import numpy as np
from cykhash import unique_int64, unique_int32, unique_float64, unique_float32
from cykhash import unique_stable_int64, unique_stable_int32, unique_stable_float64, unique_stable_float32
UNIQUE={
np.float64 : unique_float64,
np.float32 : unique_float32,
np.int64 : unique_int64,
np.int32 : unique_int32,
}
UNIQUE_STABLE = {
np.float64 : unique_stable_float64,
np.float32 : unique_stable_float32,
np.int64 : unique_stable_int64,
np.int32 : unique_stable_int32,
}
class UniqueArange:
params = [
[np.float64, np.float32, np.int64, np.int32],
[1_000, 2_000, 8_000, 10_000, 100_000, 1_000_000, 10_000_000, 100_000_000],
]
param_names = ["dtype", "M"]
def setup(self, dtype, M):
self.array = np.arange(M, dtype=dtype)
def time_unique(self, dtype, M):
UNIQUE[dtype](self.array)
def time_unique_stable(self, dtype, M):
UNIQUE_STABLE[dtype](self.array)
def peakmem_unique(self, dtype, M):
UNIQUE[dtype](self.array)
def peakmem_unique_stable(self, dtype, M):
UNIQUE_STABLE[dtype](self.array)
class UniqueRandomDivFactor10:
params = [
[np.float64, np.float32, np.int64, np.int32],
[1_000, 2_000, 8_000, 10_000, 100_000, 1_000_000, 10_000_000, 100_000_000],
]
param_names = ["dtype", "M"]
def setup(self, dtype, M):
np.random.seed(42)
self.array = np.random.randint(0, M//10, M).astype(dtype)
def time_unique(self, dtype, M):
UNIQUE[dtype](self.array)
def time_unique_stable(self, dtype, M):
UNIQUE_STABLE[dtype](self.array)
def peakmem_unique(self, dtype, M):
UNIQUE[dtype](self.array)
def peakmem_unique_stable(self, dtype, M):
UNIQUE_STABLE[dtype](self.array)
class UniqueRandomDivFactor10Add220:
params = [
[np.float64, np.float32, np.int64, np.int32],
[1_000, 2_000, 8_000, 10_000, 100_000, 1_000_000, 10_000_000, 100_000_000],
]
param_names = ["dtype", "M"]
def setup(self, dtype, M):
np.random.seed(42)
self.array = (np.random.randint(0, M//10, M)+2**26).astype(dtype)
def time_unique(self, dtype, M):
UNIQUE[dtype](self.array)
def time_unique_stable(self, dtype, M):
UNIQUE_STABLE[dtype](self.array)
class UniqueRandomMulFactor10:
params = [
[np.float64, np.float32, np.int64, np.int32],
[1_000, 2_000, 8_000, 10_000, 100_000, 1_000_000, 10_000_000, 100_000_000],
]
param_names = ["dtype", "M"]
def setup(self, dtype, M):
np.random.seed(42)
self.array = np.random.randint(0, M*10, M).astype(dtype)
def time_unique(self, dtype, M):
UNIQUE[dtype](self.array)
def time_unique_stable(self, dtype, M):
UNIQUE_STABLE[dtype](self.array)
class UniqueSingle:
params = [
[np.float64, np.float32, np.int64, np.int32],
[10_000_000, 100_000_000],
]
param_names = ["dtype", "M"]
def setup(self, dtype, M):
self.array = np.ones(M, dtype=dtype)
def peakmem_unique(self, dtype, M):
UNIQUE[dtype](self.array)
def peakmem_unique_stable(self, dtype, M):
UNIQUE_STABLE[dtype](self.array)
|
11571696
|
import os
import numpy as np
import _pickle as pickle
import pandas as pd
# for e2e
long_dir = './rnastralign/long/pseudoknot_ct'
short_dir = './rnastralign/short/pseudoknot_ct'
long_list = os.listdir(long_dir)
short_list = os.listdir(short_dir)
pred_list = list(filter(lambda x: 'pred' in x, long_list+short_list))
pseudoknot_f1 = list(map(lambda x: float(x.split('_')[0]), pred_list))
print('Exact f1: ', np.average(pseudoknot_f1))
# for rnastructure
with open('../data/rnastralign_test_pseudoknot_tag.pickle', 'rb') as f:
pseudoknot_tag = pickle.load(f)
# load rna structure data
filepath = './traditional_method_results/rnastralign/results_no_shift/RNAStructure.tsv'
df = pd.read_csv(filepath, sep='\t', header=None)
rnastructure_f1 = df.iloc[:, -1].values
rnastructure_pse_f1 = list()
for i in range(len(pseudoknot_tag)):
if pseudoknot_tag[i][-1]:
rnastructure_pse_f1.append(rnastructure_f1[i])
|
11571698
|
from lib.utils.import_helper import import_module_with_default
ALL_PLUGIN_EXPORTERS = import_module_with_default(
"exporter_plugin", "ALL_PLUGIN_EXPORTERS", default=[]
)
# No default exporter is provided
ALL_EXPORTERS = ALL_PLUGIN_EXPORTERS
def get_exporter(name: str):
for exporter in ALL_EXPORTERS:
if exporter.exporter_name == name:
return exporter
raise ValueError(f"Unknown exporter name {name}")
|
11571765
|
from precise.skaters.portfolioutil.portfunctions import portfolio_variance
from precise.skaters.portfoliostatic.diagportfactory import diagonal_portfolio_factory
from typing import List
from itertools import zip_longest
from precise.skaters.locationutil.vectorfunctions import normalize
def diagonal_portfolio_variance(cov=None, pre=None):
"""
Variance of the unit min-var portfolio
(Used in some hierarchical methods to allocate capital)
"""
w = diagonal_portfolio_factory(pre=pre, cov=cov)
return portfolio_variance(cov=cov,w=w)
def diagonal_allocation_factory(covs:List, pres:List=None)->[float]:
""" Allocate capital between portfolios using either cov or pre matrices
:param covs: List of covariance matrices
:param pres: List of precision matrices
:return: Capital allocation vector
"""
if pres is None:
pres = []
# Remark: This was used in <NAME>'s original HRP portfolio paper https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2708678
return normalize([ 1/diagonal_portfolio_variance(cov=cov, pre=pre) for cov, pre in zip_longest(covs, pres, fillvalue=None) ])
|
11571778
|
import unittest
import requests
from src.config import PYTHON_MODULE_PORT, MESSAGE_LOCAL_WRONG_ID, TEST_RUN_EDITS
from src.helper import env
class APIProductPostTest(unittest.TestCase):
def setUp(self):
self.local_id_wrong = 0
self.local_id = 1
self.url = f'http://localhost:{PYTHON_MODULE_PORT}/admin/{self.local_id}/product'
self.url_wrong_id = f'http://localhost:{PYTHON_MODULE_PORT}/admin/{self.local_id_wrong}/product/1'
self.status_code = 200
self.product_name = 'Bordar'
self.currency = 'EUR'
self.price_type = 'UNIT'
self.price = 10.5
self.new_name = 'Llenites'
self.product_id = 1
def test_status_code(self):
response = requests.put(f'{self.url}/{self.product_id}', json=dict())
self.assertEqual(response.status_code, self.status_code)
def test_local_id_wrong(self):
request_body = dict(
name=self.product_name,
currency=self.currency,
price=self.price,
price_type=self.price_type,
local_id=self.local_id_wrong
)
response = requests.put(self.url_wrong_id, json=request_body).json()
self.assertEqual(response.get('error'), True)
self.assertEqual(response.get('message'), MESSAGE_LOCAL_WRONG_ID)
def test_edit(self):
if env.run_modifications() or TEST_RUN_EDITS:
request_body = dict(name=self.new_name)
response = requests.put(f'{self.url}/{self.product_id}', json=request_body).json()
self.assertEqual(response.get('error'), False)
self.assertEqual(response.get('response').get('edited'), True)
else:
self.assertTrue(True)
|
11571786
|
import stgfunc as stg
import lvsfunc as lvf
import EoEfunc as eoe
from vsdpir import DPIR
import vapoursynth as vs
from stgfunc import depth
from vsutil import insert_clip
from debandshit import dumb3kdb
from lvsfunc.util import get_prop
from vardautomation import FileInfo
from vardautomation import PresetWEB
from vardefunc.noise import Graigasm
from .constants import graigasm_args
from vardefunc.misc import merge_chroma
from vardefunc.util import finalise_output
from stgfunc.utils import replace_squaremask
from typing import Tuple, List, Dict, Optional
from vsutil import get_y, join, plane, iterate
core = vs.core
core.max_cache_size = 16 * 2 ** 10
class SeleProFiltering:
FUNI: FileInfo
BILI: FileInfo
lowpass: List[int]
Oycore: stg.oyster.Core
dfttest_args: Dict[str, int]
def __init__(
self, FUNI: FileInfo, BILI: FileInfo,
OP_ED: Optional[Tuple[Optional[Tuple[int, int]], Optional[Tuple[int, int]]]] = None,
OP_START_REPLACE: bool = False
):
self.FUNI = FUNI
self.BILI = BILI
self.OP_ED = OP_ED
self.OP_START_REPLACE = OP_START_REPLACE
@finalise_output()
def workraw_filterchain(self):
src = depth(self.FUNI.clip_cut, 16)
ref = src.rgvs.RemoveGrain(16)
denoise = eoe.dn.BM3D(src, 1.95, 1, 'fast', fast=True, ref=ref, skip_basic=True, chroma=False)
deband = dumb3kdb(denoise, 8, 24)
return deband.add.Grain(0.25)
@finalise_output()
def filterchain(self):
self.mix_sources()
if self.OP_ED:
self.mix_OP_ED(self.FUNI)
self.mix_OP_ED(self.BILI)
y = depth(get_y(self.FUNI.clip_cut), 16)
self.__setup_oyster(y, plane(self.FUNI.clip_cut, 1))
y1, y2 = self.__freqmerging_luma(y)
u, v = self.__freqmerging_chroma()
diff_mask = self.__masks_diff(y1, y2)
detail_mask = self.__masks_detail(y)
linemask = self.__masks_linemask(y2)
exp_linemask = self.__masks_exp_linemask(linemask)
detail_mask2 = self.__masks_detail2(detail_mask, exp_linemask, diff_mask)
details = y1.std.MaskedMerge(y, detail_mask2)
y_masked = details.std.MaskedMerge(y2, linemask)
merge = join([y_masked, u, v])
denoise = self.__denoise(merge)
if self.OP_ED:
denoise = self.__scenefilter_OP_ED(denoise)
custom = self.custom_scenefiltering(denoise, merge)
grain = self.__graining(custom, detail_mask2)
return grain.std.MaskedMerge(custom, linemask)
def mix_sources(self) -> None:
pass
def mix_OP_ED(self, file: FileInfo) -> None:
if self.OP_ED[0]:
OP_AV1 = stg.src(r".\Extra\NCs\Source\SELECTION PROJECT OPテーマ 「Glorious Days」_AV1.mp4", ref=file.clip_cut)[:2158]
OP_START, OP_ENDIN = self.OP_ED[0]
texture = OP_AV1.grain.Add(20, 3, 0.07, 0.12, 69420, True).bilateral.Gaussian(1)
merge = file.clip_cut[OP_START:OP_ENDIN + 1]
if self.OP_START_REPLACE:
FUNI03 = FileInfo(r".\Source\[SubsPlease] Selection Project - 03 (1080p) [4C3303CD].mkv", (240, 0), preset=PresetWEB)
OP_EP03 = FUNI03.clip_cut[480:2637 + 1][0:195 + 1]
merge = insert_clip(merge, OP_EP03, 0)
deband = OP_AV1.bilateral.Gaussian(1.5)
merge = replace_squaremask(merge, deband, (727, 50, 599, 516), (None, 195))
merge = replace_squaremask(merge, texture, (993, 50, 464, 516), (2042, None))
merge = replace_squaremask(merge, OP_AV1, (624, 50, 833, 516), (2042, None))
black = merge.std.BlankClip(length=1)
white = black.std.Invert()
merge = black + merge[1:195] + white + merge[196:2041] + white + merge[2042:]
file.clip_cut = insert_clip(file.clip_cut, merge, OP_START)
file.clip_cut = lvf.rfs(file.clip_cut, self.FUNI.clip_cut, (OP_START, OP_START + 56))
if self.OP_ED[1]:
ED_START, ED_ENDIN = self.OP_ED[1]
ED_VP9 = stg.src(r".\Extra\NCs\Source\SELECTION PROJECT EDテーマ 「Only One Yell」_VP9.webm", ref=file.clip_cut)
ED_CUT = file.clip_cut[ED_START:ED_ENDIN + 1]
while ED_VP9.num_frames == ED_CUT.num_frames:
ED_VP9 += ED_VP9[-1]
merge = replace_squaremask(ED_CUT, ED_VP9, (993, 50, 464, 516), (1791, None))
file.clip_cut = insert_clip(file.clip_cut, merge, ED_START)
def custom_scenefiltering(self, denoise: vs.VideoNode, merge: vs.VideoNode):
return denoise
def __setup_oyster(self, y: vs.VideoNode, chroma: vs.VideoNode) -> None:
self.Oycore = stg.oyster.Core()
self.lowpass = [0.0, 0.0, 0.12, 1024.0, 1.0, 1024.0]
self.dfttest_args = dict(smode=0, sosize=0, tbsize=1, tosize=0, tmode=0)
self.block_mask = depth(self.Oycore.GenBlockMask(y), 32)
self.block_mask_uv = depth(self.Oycore.GenBlockMask(chroma), 32)
def __freqmerging_luma(self, funi: vs.VideoNode = None, bili: vs.VideoNode = None) -> Tuple[vs.VideoNode, vs.VideoNode]:
funi_y, bili_y = depth(get_y(funi or self.FUNI.clip_cut), get_y(bili or self.BILI.clip_cut), 32)
funi_y_ref = stg.oyster.Basic(funi_y, None, 6, 1, 2400, True)
bili_y_ref = stg.oyster.Basic(bili_y, None, 6, 1, 2800, True)
args = dict(sbsize=9, slocation=self.lowpass, **self.dfttest_args)
funif = core.dfttest.DFTTest(funi_y, **args)
bilif = core.dfttest.DFTTest(bili_y, **args)
funi_y_reff = core.dfttest.DFTTest(funi_y_ref, **args)
bili_y_reff = core.dfttest.DFTTest(bili_y_ref, **args)
funi_mer_y = core.std.MergeDiff(bilif, core.std.MakeDiff(funi_y, funif))
bili_mer_y = core.std.MergeDiff(funif, core.std.MakeDiff(bili_y, bilif))
funi_y_ref_mer = core.std.MergeDiff(bili_y_reff, core.std.MakeDiff(funif, funi_y_reff))
bili_y_ref_mer = core.std.MergeDiff(funi_y_reff, core.std.MakeDiff(bilif, bili_y_reff))
freqmerged_funi = core.std.MaskedMerge(
funi_mer_y, funi_y_ref_mer, self.block_mask
).std.Merge(funi_mer_y, 4 / 7)
freqmerged_bili = core.std.MaskedMerge(
bili_mer_y, bili_y_ref_mer, self.block_mask
).std.Merge(funi_y_ref_mer, 2 / 5) # Intentional Funi here
return depth(freqmerged_funi, freqmerged_bili, 16)
def ___freqmerge_chroma(self, clip_y: vs.VideoNode, filt_y: vs.VideoNode) -> vs.VideoNode:
ref = stg.oyster.Basic(clip_y, None, 6, 1, True)
ref = self.Oycore.FreqMerge(filt_y, ref, 9, self.lowpass)
mer_y = self.Oycore.FreqMerge(filt_y, clip_y, 9, self.lowpass)
freqmerged = core.std.MaskedMerge(
mer_y, ref, self.block_mask_uv
).std.Merge(ref, 3 / 5)
return depth(freqmerged, 16)
def __freqmerging_chroma(self, funi: vs.VideoNode = None, bili: vs.VideoNode = None) -> Tuple[vs.VideoNode, vs.VideoNode]:
funi, bili = funi or self.FUNI.clip_cut, bili or self.BILI.clip_cut
ub, uf = depth(plane(bili, 1), plane(funi, 1), 32)
vb, vf = depth(plane(bili, 2), plane(funi, 2), 32)
return self.___freqmerge_chroma(ub, uf), self.___freqmerge_chroma(vb, vf)
def __masks_diff(self, y1: vs.VideoNode, y2: vs.VideoNode) -> vs.VideoNode:
_diff_mask = core.std.MakeDiff(y1, y2).std.PlaneStats()
black = _diff_mask.std.BlankClip()[0].get_frame(0)
thr = 100 << 8
def ___diff_mask_process(f, n) -> vs.VideoNode:
low_range = get_prop(f, 'PlaneStatsMin', int) <= thr
hig_range = get_prop(f, 'PlaneStatsMax', int) >= (2 << 16) - 1 - thr > thr
return f if low_range or hig_range else black
diff_mask = _diff_mask.std.ModifyFrame(
_diff_mask, ___diff_mask_process
)
return diff_mask.std.BinarizeMask(135 << 8).std.Inflate()
def __masks_detail(self, y: vs.VideoNode) -> vs.VideoNode:
return stg.mask.linemask(y)
def __masks_linemask(self, y: vs.VideoNode) -> vs.VideoNode:
return stg.mask.tcanny(y, 0.0275, True).std.Inflate()
def __masks_exp_linemask(self, linemask: vs.VideoNode) -> vs.VideoNode:
exp_linemask = linemask.std.BinarizeMask(24 << 8)
exp_linemask = iterate(
iterate(
exp_linemask,
lambda x: x.std.Maximum().std.Minimum(), 10
), lambda x: x.std.Inflate().std.Deflate(), 5
)
exp_linemask = exp_linemask.bilateral.Gaussian(1.5, 1)
return iterate(exp_linemask, core.std.Deflate, 15)
def __masks_linemask3(self, linemask: vs.VideoNode, exp_linemask: vs.VideoNode, diff_mask: vs.VideoNode) -> vs.VideoNode:
return core.std.Expr([linemask, exp_linemask, diff_mask], 'y x 2 * - 4 * z +').std.Limiter()
def __masks_detail2(self, details_mask: vs.VideoNode, exp_linemask: vs.VideoNode, diff_mask: vs.VideoNode) -> vs.VideoNode:
return core.std.Expr([details_mask, exp_linemask, diff_mask], 'x y - z +').std.Limiter()
def __masks_deband(
self, linemask: vs.VideoNode, exp_linemask: vs.VideoNode, linemask3: vs.VideoNode, diff_mask: vs.VideoNode
) -> vs.VideoNode:
return core.std.Expr([
core.std.Expr([
linemask, exp_linemask, linemask3, diff_mask
], 'y x 2 * - 4 * z log - a +'), linemask3
], 'x y -').std.Limiter()
def __denoise(self, clip: vs.VideoNode) -> vs.VideoNode:
denoise_bm3d = eoe.dn.BM3D(clip, 1.75, 1, ['np', 'high'], chroma=False)
return stg.denoise.KNLMeansCL(
denoise_bm3d, sigma=[None, 0.37],
contraSharpening=True, ref_clip=clip
)
def __scenefilter_OP_ED(self, clip: vs.VideoNode) -> vs.VideoNode:
if self.OP_ED[0]:
OP_START, OP_ENDIN = self.OP_ED[0]
deband = dumb3kdb(
DPIR(
depth(clip.resize.Spline64(format=vs.RGB24, matrix_in=1), 32), 4.35
).resize.Spline64(format=clip.format.id, matrix=1), 16, 12
)
return lvf.rfs(clip, deband, (OP_START + 1, OP_START + 56))
return clip
def __graining(self, clip: vs.VideoNode, detail_mask: vs.VideoNode) -> vs.VideoNode:
grain_mask = core.adg.Mask(clip.std.PlaneStats(), 8)
grain_mask = core.std.Expr([grain_mask, detail_mask], 'x y -')
y = get_y(clip)
pref = iterate(y, core.std.Maximum, 2).std.Convolution([1] * 9)
grainY = Graigasm(**graigasm_args).graining(y, prefilter=pref)
return merge_chroma(grainY, clip)
|
11571813
|
import datetime
import logging
from queries import pool
import queries
from tornado import gen, ioloop, web
class ExampleHandler(web.RequestHandler):
SQL = 'SELECT * FROM pg_stat_activity'
@gen.coroutine
def get(self):
try:
result = yield self.application.session.query(self.SQL)
except queries.OperationalError as error:
logging.error('Error connecting to the database: %s', error)
raise web.HTTPError(503)
rows = []
for row in result.items():
row = dict([(k, v.isoformat()
if isinstance(v, datetime.datetime) else v)
for k, v in row.items()])
rows.append(row)
result.free()
self.finish({'pg_stat_activity': rows})
class ReportHandler(web.RequestHandler):
@gen.coroutine
def get(self):
self.finish(pool.PoolManager.report())
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
application = web.Application([
(r'/', ExampleHandler),
(r'/report', ReportHandler)
], debug=True)
application.session = queries.TornadoSession()
application.listen(8000)
ioloop.IOLoop.instance().start()
|
11571828
|
import FWCore.ParameterSet.Config as cms
from GeneratorInterface.Pythia6Interface.pythiaDefault_cff import *
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(10000.0),
PythiaParameters = cms.PSet(
# Default (mostly empty - to keep PYTHIA default) card file
# Name of the set is "pythiaDefault"
pythiaDefaultBlock,
# User cards - name is "myParameters"
myParameters = cms.vstring('PMAS(32,1)= 5000. !mass of Zprime',
'MSEL=0 !(D=1) to select between full user control (0, then use MSUB) and some preprogrammed alternative',
'MSTP(44) = 3 !only select the Z process',
'MSUB(141) = 1 !ff gamma z0 Z0',
'MSTJ(11)=3 ! Choice of the fragmentation function',
'MSTJ(22)=2 !Decay those unstable particles',
'MSTP(2)=1 !which order running alphaS',
'MSTP(33)=0 !(D=0) inclusion of K factors in (=0: none, i.e. K=1)',
'MSTP(51)=7 !structure function chosen',
'MSTP(81)=1 !multiple parton interactions 1 is Pythia default',
'MSTP(82)=4 !Defines the multi-parton model',
'MSTU(21)=1 !Check on possible errors during program execution',
'PARJ(71)=10. !for which ctau 10 mm',
'PARP(82)=1.9 !pt cutoff for multiparton interactions',
'PARP(89)=1000. !sqrts for which PARP82 is set',
'PARP(84)=0.4 !Multiple interactions: matter distribution Registered by <EMAIL>',
'PARP(90)=0.16 !Multiple interactions: rescaling power Registered by <EMAIL>',
'PMAS(5,1)=4.2 !mass of b quark',
'PMAS(6,1)=175. !mass of top quark',
'PMAS(23,1)=91.187 !mass of Z',
'PMAS(24,1)=80.22 !mass of W',
'MDME(289,1)= 1 !d dbar',
'MDME(290,1)= 1 !u ubar',
'MDME(291,1)= 1 !s sbar',
'MDME(292,1)= 1 !c cbar',
'MDME(293,1)= 0 !b bar',
'MDME(294,1)= 0 !t tbar',
'MDME(295,1)= 0 !4th gen Q Qbar',
'MDME(296,1)= 0 !4th gen Q Qbar',
'MDME(297,1)= 0 !e e',
'MDME(298,1)= 0 !neutrino e e',
'MDME(299,1)= 0 ! mu mu',
'MDME(300,1)= 0 !neutrino mu mu',
'MDME(301,1)= 0 !tau tau',
'MDME(302,1)= 0 !neutrino tau tau',
'MDME(303,1)= 0 !4th generation lepton',
'MDME(304,1)= 0 !4th generation neutrino',
'MDME(305,1)= 0 !W W',
'MDME(306,1)= 0 !H charged higgs',
'MDME(307,1)= 0 !Z',
'MDME(308,1)= 0 !Z',
'MDME(309,1)= 0 !sm higgs',
'MDME(310,1)= 0 !weird neutral higgs HA'),
# This is a vector of ParameterSet names to be read, in this order
# The first two are in the include files below
# The last one are simply my additional parameters
parameterSets = cms.vstring('pythiaDefault',
'myParameters')
)
)
|
11571890
|
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk.standard import StandardSkillBuilder
from ask_sdk_core.utils import is_request_type, is_intent_name
from ask_sdk_model.dialog.delegate_directive import DelegateDirective
import random
import re
import os
#################
# Set Up Logger #
#################
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
###########
# Globals #
###########
DB = os.environ.get('DDB_NAME', None)
if DB:
sb = StandardSkillBuilder(
table_name=DB, auto_create_table=True)
else:
logger.info("DB Name not set")
sb = SkillBuilder()
###########
# Outputs #
###########
SKILL_NAME = "<NAME>"
#Randomize speechcons
SPEECHCONS = {
'excited': [
"high five",
"hurray",
"kaboom",
"kerching",
"magnificent",
"okey dokey",
"splendid",
"way to go",
"wahoo",
"well done",
"yahoo",
"yippee"
],
'apology': [
"aw man",
"aww applesauce",
"d'oh",
"fiddlesticks",
"good grief",
"gracious me",
"my bad",
"my goodness",
"oof",
"oops",
"ruh roh",
"shucks",
"uh oh",
"whoops"
]
}
logger.info("Outputs are set")
#############
# Utilities #
#############
def get_speechcon(emotion):
speech_con_number = random.randint(0,(int(len(SPEECHCONS[emotion])-1)))
return '<say-as interpret-as="interjection">%s</say-as>.' % SPEECHCONS[emotion][speech_con_number]
def prepare_card(text):
return re.sub('<[^<]+?>', '', text)
def set_session_attr(handler_input, key, value):
handler_input.attributes_manager.session_attributes[key]=value
return handler_input
def append_session_attr(handler_input, key, value):
if value != None:
if key in handler_input.attributes_manager.session_attributes:
handler_input.attributes_manager.session_attributes[key].append(value)
else:
handler_input.attributes_manager.session_attributes[key] = []
handler_input.attributes_manager.session_attributes[key].append(value)
return handler_input
def get_session_attr(handler_input, key):
try:
return handler_input.attributes_manager.session_attributes.get(key, None)
except:
return None
def get_slot(handler_input, slot):
try:
return handler_input.request_envelope.request.intent.slots.get(slot, None)
except:
return None
def del_session_attr(handler_input, key):
try:
del handler_input.attributes_manager.session_attributes[key]
return
except:
return
def save_data(handler_input):
try:
handler_input.attributes_manager.persistent_attributes = handler_input.attributes_manager.session_attributes
handler_input.attributes_manager.save_persistent_attributes()
return True
except:
return False
def get_data_from_database(handler_input):
try:
handler_input.attributes_manager.persistent_attributes = handler_input.attributes_manager.session_attributes = handler_input.attributes_manager.persistent_attributes
except:
return None
SPEECH = {
'End': "Goodbye!",
'Exception': "Sorry, there was some problem. Please try again!",
'LaunchRequest': "Welcome to %s! To begin making ice cream just say, Lets start." % SKILL_NAME,
'CancelIntent': "Okay. To start over you can say, Lets start. If you would like to exit the %s, just say, stop." % SKILL_NAME,
'StopIntent': "Thank you for making ice cream together! Come back soon!",
'HelpIntent': "If you would like to start making Ice Cream, just say, lets start. To add a topping, just say, I want sprinkles",
'addTopping': "%s If you would like to add more toppings just tell me what you would like. To start over, just say, lets start." % get_speechcon('excited'),
'addToppingNoIceCream': "That sounded like a topping, but I did not get your ice cream flavor yet. Just tell me flavor of ice cream you would like.",
'Start': "Now, every good ice cream starts with a base, so tell me what flavor of ice cream do you want?",
'StartReplay': "What flavor of ice cream do you want?",
'IceCreamBuilder': "%s Now, what are some toppings you would like to add?" % get_speechcon('excited'),
'IceCreamBuilderWithTopping': "%s If you would like to add toppings just tell me what you would like. When you are done adding toppings, just say, all done." % get_speechcon('excited'),
'Finish': "Alright! What a wonderful %s ice cream with %s! To make a new ice cream just say, lets start!",
'FinishNoState': "%s It does not appear that you have created an ice cream yet. To start, just say, Lets start. To exit, just say, close" % get_speechcon('apology'),
'FinishNoTopping': "Alright! What a wonderful %s ice cream. Next time, add some toppings by saying, Add fudge. To make a new ice cream just say, lets start!",
'FinishNoFlavor': "%s It does not appear that you have created an ice cream yet. To start, just say, Lets start. To exit, just say, close" % get_speechcon('apology'),
'Fallback': "%s I cannot help you with that. To start making ice cream, just say, lets start. For help, you can say, help" % get_speechcon('apology')
}
REPROMPT = {
'addTopping': "To add a topping just tell me what you want to add. To start over, just say, lets start.",
'IceCreamBuilder': "To add a topping just tell me what you want to add. To start over, just say, lets start.",
'IceCreamBuilderWithTopping': "To add a topping just tell me what you want to add. To start over, just say, all done.",
'Fallback': "To start making ice cream, just say, lets start. For help, you can say, help"
}
logger.info("Utilities are set")
############
# Handlers #
############
## Custom Intents ##
@sb.request_handler(can_handle_func=is_intent_name("IceCreamBuilderIntent"))
def ice_cream_builder_intent_handler(handler_input):
"""Handler for Ice Cream Builder Intent."""
# type: (HandlerInput) -> Response
logger.info("Entering IceCreamBuilderIntent")
flavor = get_slot(handler_input, slot='flavor')
topping = get_slot(handler_input, slot='topping')
if flavor.value:
handler_input = set_session_attr(handler_input, key='FLAVOR', value=flavor.value)
logger.info("Set flavor to %s" % flavor.value)
if topping.value:
logger.info("Set topping to %s" % topping.value)
handler_input = append_session_attr(handler_input, key='TOPPING', value=topping.value)
speech_text = SPEECH['IceCreamBuilderWithTopping']
reprompt = REPROMPT['IceCreamBuilderWithTopping']
handler_input = set_session_attr(handler_input, key='STATE', value='TOPPING')
else:
speech_text = SPEECH['IceCreamBuilder']
reprompt = REPROMPT['IceCreamBuilder']
handler_input = set_session_attr(handler_input, key='STATE', value='FLAVOR')
return handler_input.response_builder.speak(speech_text).ask(reprompt).set_should_end_session(
False).response
else:
handler_input.response_builder.add_directive(DelegateDirective())
return handler_input.response_builder.response
@sb.request_handler(can_handle_func=is_intent_name("StartIntent"))
def start_intent_handler(handler_input):
"""Handler for Start Intent."""
# type: (HandlerInput) -> Response
logger.info("Entering StartIntent")
if get_session_attr(handler_input, key='STATE'):
speech_text = SPEECH['StartReplay']
del_session_attr(handler_input, 'TOPPING')
del_session_attr(handler_input, 'FLAVOR')
else:
speech_text = SPEECH['Start']
handler_input = set_session_attr(handler_input, key='STATE', value='START')
return handler_input.response_builder.speak(speech_text).set_should_end_session(
False).response
@sb.request_handler(can_handle_func=is_intent_name("FinishIntent"))
def finish_intent_handler(handler_input):
"""Handler for Finish Intent."""
# type: (HandlerInput) -> Response
logger.info("Entering FinishIntent")
if get_session_attr(handler_input, key='STATE'):
flavor = get_session_attr(handler_input, key='FLAVOR')
topping = get_session_attr(handler_input, key='TOPPING')
toppingList = ""
if topping:
if len(topping) > 1:
topping[-1]= 'and ' + topping[-1]
for t in topping:
toppingList += "%s, " % t
#remove last comma and space
toppingList = toppingList[:-2]
else:
toppingList = topping[0]
if not flavor:
speech_text = SPEECH['FinishNoFlavor']
elif not toppingList:
speech_text = SPEECH['FinishNoTopping'] % flavor
handler_input = set_session_attr(handler_input, key='STATE', value='Finish')
else:
speech_text = SPEECH['Finish'] % (flavor, toppingList)
handler_input = set_session_attr(handler_input, key='STATE', value='Finish')
else:
speech_text = SPEECH['FinishNoState']
return handler_input.response_builder.speak(speech_text).set_should_end_session(
False).response
@sb.request_handler(can_handle_func=is_intent_name("addToppingIntent"))
def add_topping_intent_handler(handler_input):
"""Handler for Add Topping Intent."""
# type: (HandlerInput) -> Response
logger.info("Entering addToppingIntent")
topping = get_slot(handler_input, slot='topping')
if topping:
handler_input = append_session_attr(handler_input, key='TOPPING', value=topping.value)
speech_text = SPEECH['addTopping']
reprompt = REPROMPT['addTopping']
handler_input = set_session_attr(handler_input, key='STATE', value='Topping')
return handler_input.response_builder.speak(speech_text).ask(reprompt).set_should_end_session(
False).response
else:
handler_input.response_builder.add_directive(DelegateDirective())
return handler_input.response_builder.response
## Built-in Intents ##
@sb.request_handler(can_handle_func=is_request_type("LaunchRequest"))
def launch_request_handler(handler_input):
"""Handler for Skill Launch."""
# type: (HandlerInput) -> Response
logger.info("Entering LaunchRequest")
get_data_from_database(handler_input)
speech_text = SPEECH['LaunchRequest']
return handler_input.response_builder.speak(speech_text).set_should_end_session(
False).response
@sb.request_handler(can_handle_func=is_intent_name("AMAZON.HelpIntent"))
def help_intent_handler(handler_input):
"""Handler for Help Intent."""
# type: (HandlerInput) -> Response
logger.info("Entering HelpIntent")
speech_text = SPEECH['HelpIntent']
return handler_input.response_builder.speak(speech_text).ask(
speech_text).response
@sb.request_handler(can_handle_func=is_intent_name("AMAZON.StopIntent"))
def stop_intent_handler(handler_input):
"""Handler for Stop Intent."""
# type: (HandlerInput) -> Response
logger.info("Entering AMAZON.StopIntent")
speech_text = SPEECH['StopIntent']
return handler_input.response_builder.speak(speech_text).set_should_end_session(
True).response
@sb.request_handler(can_handle_func=is_intent_name("AMAZON.CancelIntent"))
def cancel_intent_handler(handler_input):
"""Handler for Cancel Intent."""
# type: (HandlerInput) -> Response
logger.info("Entering AMAZON.CancelIntent")
speech_text = SPEECH['CancelIntent']
return handler_input.response_builder.speak(speech_text).set_should_end_session(
False).response
@sb.request_handler(can_handle_func=is_intent_name("AMAZON.FallbackIntent"))
def fallback_handler(handler_input):
"""AMAZON.FallbackIntent is only available in en-US locale.
This handler will not be triggered except in that locale,
so it is safe to deploy on any locale.
"""
# type: (HandlerInput) -> Response
logger.info("Entering AMAZON.FallbackIntent")
speech = SPEECH['Fallback']
reprompt = REPROMPT['Fallback']
handler_input.response_builder.speak(speech).ask(reprompt)
return handler_input.response_builder.response
@sb.request_handler(can_handle_func=is_request_type("SessionEndedRequest"))
def session_ended_request_handler(handler_input):
"""Handler for Session End."""
# type: (HandlerInput) -> Response
logger.info("Entering AMAZON.SessionEndedRequest")
save_data(handler_input)
return handler_input.response_builder.response
logger.info("Handlers are set")
handler = sb.lambda_handler()
|
11571898
|
import glob
from PIL import Image, ImageOps
from utils import IMAGE_SIZE
if __name__ == "__main__":
background_rate = 0
border_rate = 0
content_rate = 0
folder = "./dataset/training/segmentation_mask/"
images = glob.glob(f'{folder}*.png')
number_of_images = len(images)
print(f"Starting training data analysis for {number_of_images} images")
for filename in images:
print(f"Analysing image {filename}")
size = IMAGE_SIZE, IMAGE_SIZE
image = Image.open(filename)
pixels = image.load()
width, height = image.size
if height >= width:
delta_w = height - width
delta_h = 0
else:
delta_w = 0
delta_h = width - height
padding = (delta_w // 2, delta_h // 2, delta_w - (delta_w // 2), delta_h - (delta_h // 2))
image = ImageOps.expand(image, padding).resize(size, Image.NEAREST)
pixels = image.load()
width, height = image.size
number_of_pixels_per_image = width * height
background_counter = 0
border_counter = 0
content_counter = 0
for x in range(width):
for y in range(height):
rgba_info = pixels[x, y]
r_channel = rgba_info[0]
g_channel = rgba_info[1]
b_channel = rgba_info[2]
if r_channel == 255:
border_counter += 1
elif g_channel == 255:
content_counter += 1
elif b_channel == 255:
background_counter += 1
elif r_channel == 0 and g_channel == 0 and b_channel == 0:
background_counter += 1
else:
print("ERROR: INVALID PIXEL")
background_rate += background_counter / number_of_pixels_per_image
border_rate += border_counter / number_of_pixels_per_image
content_rate += content_counter / number_of_pixels_per_image
image.close()
print(f" - Percentage of background in files = {background_rate / number_of_images}")
print(f" - Percentage of border in files = {border_rate / number_of_images}")
print(f" - Percentage of content in files = {content_rate / number_of_images}")
|
11571908
|
import datetime
import json
import logging
import multiprocessing
from pathlib import Path
import time
import flask
import pandas as pd
import werkzeug
from topicsexplorer import database
from topicsexplorer import utils
from topicsexplorer import workflow
# Initialize logging with logfile in tempdir:
utils.init_logging(logging.INFO)
# Initialize Flask application:
web = utils.init_app("topicsexplorer")
@web.route("/")
def index():
"""Home page."""
logging.debug("Rendering home page template...")
utils.init_db(web)
return flask.render_template("index.html", help=True)
@web.route("/help")
def help():
"""Help page."""
logging.debug("Rendering help page template...")
return flask.render_template("help.html", go_back=True)
@web.route("/error")
def error():
"""Error page."""
with utils.LOGFILE.open("r", encoding="utf-8") as logfile:
log = logfile.read().split("\n")[-20:]
return flask.render_template(
"error.html", reset=True, log="\n".join(log), tempdir=utils.TEMPDIR
)
@web.route("/modeling", methods=["POST"])
def modeling():
"""Modeling page."""
logging.debug("Calling modeling page endpoint...")
# Must be global to use anywhere:
global start
global process
start = time.time()
logging.info("Initializing topic modeling process...")
logging.info("Started topic modeling process.")
workflow.wrapper()
logging.debug("Rendering modeling page template...")
return flask.render_template("modeling.html", abort=True)
@web.route("/overview-topics")
def overview_topics():
"""Topics overview page."""
logging.debug("Calling topics overview page endpoint...")
logging.info("Get document-topic distributions...")
response = get_document_topic_distributions()
document_topic = pd.read_json(response, orient="index")
logging.info("Get token frequencies...")
response = get_token_frequencies()
token_freqs = json.loads(response)
logging.info("Add frequencies to weights...")
document_topic = document_topic.multiply(token_freqs, axis=0)
logging.info("Sum the weights...")
dominance = document_topic.sum(axis=0)
logging.info("Scale weights...")
proportions = utils.scale(dominance)
proportions = pd.Series(proportions, index=dominance.index)
proportions = proportions.sort_values(ascending=False)
# Convert pandas.Series to a 2-D array:
proportions = list(utils.series2array(proportions))
corpus_size = get_corpus_size()
number_topics = get_number_of_topics()
logging.debug("Rendering topics overview template...")
return flask.render_template(
"overview-topics.html",
current="topics",
help=True,
reset=True,
topics=True,
documents=True,
document_topic_distributions=True,
parameters=True,
export_data=True,
proportions=proportions,
corpus_size=corpus_size,
number_topics=number_topics,
)
@web.route("/overview-documents")
def overview_documents():
"""Documents overview page."""
logging.debug("Calling documents overview page endpoint...")
sizes = pd.DataFrame(get_textfile_sizes(), columns=["title", "size"])
proportions = utils.scale(sizes["size"])
proportions = pd.Series(proportions, index=sizes["title"])
proportions = proportions.sort_values(ascending=False)
# Convert pandas.Series to a 2-D array:
proportions = list(utils.series2array(proportions))
corpus_size = get_corpus_size()
return flask.render_template(
"overview-documents.html",
current="documents",
help=True,
reset=True,
topics=True,
documents=True,
document_topic_distributions=True,
parameters=True,
export_data=True,
proportions=proportions,
corpus_size=corpus_size,
)
@web.route("/document-topic-distributions")
def document_topic_distributions():
"""Document-topic distributions page."""
logging.debug("Calling document-topic distributions endpoint...")
logging.debug("Rendering document-topic distributions page template...")
return flask.render_template(
"document-topic-distributions.html",
current="document-topic-distributions",
help=True,
reset=True,
topics=True,
documents=True,
document_topic_distributions=True,
parameters=True,
export_data=True,
)
@web.route("/topics/<topic>")
def topics(topic):
"""Topic page."""
logging.debug("Calling topic page endpoint...")
logging.info("Get topics...")
topics = json.loads(get_topics())
logging.info("Get document-topic distributions...")
document_topic = pd.read_json(get_document_topic_distributions(), orient="index")
logging.info("Get topic similarity matrix...")
topic_similarites = pd.read_json(get_topic_similarities())
logging.info("Get related documents...")
related_docs = document_topic[topic].sort_values(ascending=False)[:10]
related_docs_proportions = utils.scale(related_docs, minimum=70)
related_docs_proportions = pd.Series(
related_docs_proportions, index=related_docs.index
)
related_docs_proportions = related_docs_proportions.sort_values(ascending=False)
# Convert pandas.Series to a 2-D array:
related_docs_proportions = list(utils.series2array(related_docs_proportions))
logging.info("Get related words...")
related_words = topics[topic][:15]
logging.info("Get similar topics...")
similar_topics = topic_similarites[topic].sort_values(ascending=False)[1:4]
logging.debug("Rendering topic page template...")
return flask.render_template(
"detail-topic.html",
current="topics",
help=True,
reset=True,
topics=True,
documents=True,
document_topic_distributions=True,
parameters=True,
export_data=True,
topic=topic,
similar_topics=similar_topics.index,
related_words=related_words,
related_documents=related_docs_proportions,
)
@web.route("/documents/<title>")
def documents(title):
"""Document page."""
logging.debug("Calling document page endpoint...")
logging.info("Get textfiles...")
text = get_textfile(title)
logging.info("Get document-topics distributions...")
document_topic = pd.read_json(get_document_topic_distributions(), orient="index")
logging.info("Get document similarity matrix...")
document_similarites = pd.read_json(get_document_similarities())
logging.info("Get related topics...")
related_topics = document_topic.loc[title].sort_values(ascending=False) * 100
distribution = list(related_topics.to_dict().items())
logging.info("Get similar documents...")
similar_docs = document_similarites[title].sort_values(ascending=False)[1:4]
logging.debug("Use only the first 10000 characters (or less) from document...")
text = (
text
if len(text) < 10000
else "{}... This was an excerpt of the original text.".format(text[:10000])
)
logging.debug("Split paragraphs...")
text = text.split("\n\n")
n = get_number_of_topics()
top_topics = [
"{} most relevant".format(n) if int(n) >= 10 else n,
"Top {}".format(n),
]
logging.debug("Rendering document page template...")
return flask.render_template(
"detail-document.html",
current="documents",
help=True,
reset=True,
topics=True,
documents=True,
document_topic_distributions=True,
parameters=True,
export_data=True,
title=title,
text=text,
distribution=distribution,
similar_documents=similar_docs.index,
related_topics=related_topics.index,
top_topics=top_topics,
)
@web.route("/parameters")
def parameters():
"""Paramter page."""
logging.debug("Calling parameters page endpoint...")
logging.info("Get parameters...")
data = json.loads(get_parameters())[0]
info = json.loads(data)
logging.debug("Rendering parameters page template...")
return flask.render_template(
"overview-parameters.html",
current="parameters",
parameters=True,
help=True,
reset=True,
topics=True,
documents=True,
document_topic_distributions=True,
export_data=True,
**info
)
# API endpoints:
@web.route("/api/status")
def get_status():
"""Current modeling status."""
seconds = int(time.time() - start)
elapsed_time = datetime.timedelta(seconds=seconds)
with utils.LOGFILE.open("r", encoding="utf-8") as logfile:
messages = logfile.readlines()
message = messages[-1].strip()
message = utils.format_logging(message)
return "Elapsed time: {}<br>{}".format(elapsed_time, message)
@web.route("/api/document-topic-distributions")
def get_document_topic_distributions():
"""Document-topics distributions."""
return database.select("document_topic_distributions")
@web.route("/api/topics")
def get_topics():
"""Topics."""
return database.select("topics")
@web.route("/api/document-similarities")
def get_document_similarities():
"""Document similarity matrix."""
return database.select("document_similarities")
@web.route("/api/topic-similarities")
def get_topic_similarities():
"""Topic similarity matrix."""
return database.select("topic_similarities")
@web.route("/api/textfiles/<title>")
def get_textfile(title):
"""Textfiles."""
return database.select("textfile", title=title)
@web.route("/api/stopwords")
def get_stopwords():
"""Stopwords."""
return database.select("stopwords")
@web.route("/api/token-frequencies")
def get_token_frequencies():
"""Token frequencies per document."""
return database.select("token_freqs")
@web.route("/api/parameters")
def get_parameters():
"""Model parameters."""
return json.dumps(database.select("parameters"))
@web.route("/api/textfile-sizes")
def get_textfile_sizes():
"""Textfile sizes."""
return database.select("textfile_sizes")
@web.route("/api/corpus-size")
def get_corpus_size():
"""Corpus size."""
return str(len(get_textfile_sizes()))
@web.route("/api/number-topics")
def get_number_of_topics():
"""Number of topics."""
return str(len(json.loads(get_topics())))
@web.route("/export/<filename>")
def export(filename):
"""Data archive."""
if "topicsexplorer-data.zip" in {filename}:
utils.export_data()
path = Path(utils.TEMPDIR, filename)
return flask.send_file(filename_or_fp=str(path))
@web.errorhandler(werkzeug.exceptions.HTTPException)
def handle_http_exception(e):
"""Handle errors.."""
return error()
for code in werkzeug.exceptions.default_exceptions:
web.errorhandler(code)(handle_http_exception)
@web.after_request
def add_header(r):
"""Clear cache after request."""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers["Cache-Control"] = "public, max-age=0"
return r
@web.teardown_appcontext
def close_connection(exception):
"""Close connection to SQLite database."""
db = getattr(flask.g, "_database", None)
if db is not None:
db.close()
|
11571931
|
from typing import List
from pydantic import Field
from tarkov import models
from tarkov.fleamarket.models import OfferId
from tarkov.inventory.types import ItemId, TemplateId
from tarkov.inventory_dispatcher.models import ActionModel, ActionType
class RequiredItem(models.Base):
id: ItemId
count: int
class RagfairBuyOffer(models.Base):
offer_id: OfferId = Field(alias="id")
count: int
requirements: List[RequiredItem] = Field(alias="items")
class RagfairOfferRequirement(models.Base):
template_id: TemplateId = Field(alias="_tpl")
count: int
level: int
side: int
onlyFunctional: bool
class Buy(ActionModel):
Action: ActionType
offers: List[RagfairBuyOffer]
class Add(ActionModel):
Action: ActionType
sellInOnePiece: bool
items: List[ItemId]
requirements: List[RagfairOfferRequirement]
|
11571970
|
import tensorflow as tf
import argparse
import gym
import pybullet_envs
import roboschool
from mvc.envs.wrappers import BatchEnvWrapper, MuJoCoWrapper
from mvc.controllers.ppo import PPOController
from mvc.controllers.eval import EvalController
from mvc.models.networks.ppo import PPONetwork, PPONetworkParams
from mvc.models.metrics import Metrics
from mvc.models.rollout import Rollout
from mvc.view import View
from mvc.interaction import interact
def make_envs(env_name, num_envs, reward_scale):
return [MuJoCoWrapper(gym.make(env_name), reward_scale)\
for _ in range(num_envs)]
def main(args):
# environments
env = BatchEnvWrapper(
make_envs(args.env, args.num_envs, args.reward_scale), args.render)
env.seed(args.seed)
eval_env = BatchEnvWrapper(
make_envs(args.env, args.num_envs, args.reward_scale))
eval_env.seed(args.seed)
num_actions = env.action_space.shape[0]
# network parameters
params = PPONetworkParams(fcs=args.layers, num_actions=num_actions,
state_shape=env.observation_space.shape,
num_envs=args.num_envs,
batch_size=args.batch_size, epsilon=args.epsilon,
learning_rate=args.lr, grad_clip=args.grad_clip,
value_factor=args.value_factor,
entropy_factor=args.entropy_factor)
# deep neural network
network = PPONetwork(params)
# rollout buffer
rollout = Rollout()
# metrics
saver = tf.train.Saver()
metrics = Metrics(args.name, args.log_adapter, saver)
# controller
controller = PPOController(network, rollout, metrics, args.num_envs,
args.time_horizon, args.epoch, args.batch_size,
args.gamma, args.lam, args.final_steps,
args.log_interval, args.save_interval,
args.eval_interval)
# view
view = View(controller)
# evaluation
eval_controller = EvalController(network, metrics, args.eval_episodes)
eval_view = View(eval_controller)
# save hyperparameters
metrics.log_parameters(vars(args))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# save model graph for debugging
metrics.set_model_graph(sess.graph)
if args.load is not None:
saver.restore(sess, args.load)
interact(env, view, eval_env, eval_view, batch=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--time-horizon', type=int,
default=2048, help='interval to update')
parser.add_argument('--num-envs', type=int, default=1,
help='the number of environments')
parser.add_argument('--epoch', type=int, default=10,
help='epoch of training')
parser.add_argument('--batch-size', type=int, default=64,
help='batch size of training')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor')
parser.add_argument('--lam', type=float, default=0.95,
help='lambda of generalized advantage estimation')
parser.add_argument('--log-interval', type=int, help='interval of logging')
parser.add_argument('--final-steps', type=int, default=10 ** 6,
help='the number of training steps')
parser.add_argument('--layers', type=int, nargs='+', default=[64, 64],
help='layer units')
parser.add_argument('--epsilon', type=float, default=0.2,
help='clipping factor')
parser.add_argument('--lr', type=float, default=3e-4, help='learning rate')
parser.add_argument('--grad-clip', type=float, default=0.5,
help='gradient clipping')
parser.add_argument('--value-factor', type=float, default=1.0,
help='value loss weight')
parser.add_argument('--entropy-factor', type=float, default=0.0,
help='entropy loss weight')
parser.add_argument('--env', type=str, default='Pendulum-v0',
help='training environment')
parser.add_argument('--reward-scale', type=float, default=1.0,
help='reward scaling')
parser.add_argument('--name', type=str, default='experiment',
help='experiment name')
parser.add_argument('--log-adapter', type=str, default='tfboard',
help='log adapter (visdom, tfboard)')
parser.add_argument('--save-interval', type=int, default=2048 * 50,
help='interval of saving parameters')
parser.add_argument('--load', type=str, help='path to model')
parser.add_argument('--eval-interval', type=int, default=2048 * 10,
help='interval of evaluation')
parser.add_argument('--eval-episodes', type=int, default=10,
help='the number of evaluation episode')
parser.add_argument('--render', action='store_true',
help='show frames of environment')
parser.add_argument('--seed', type=int, default=0,
help='Random seed of environment')
args = parser.parse_args()
main(args)
|
11571971
|
import os
import deprecation
import numpy as np
import tensorflow as tf
from kungfu._utils import _log_event
from kungfu.python import propose_new_size
from kungfu.tensorflow.initializer import BroadcastGlobalVariablesOp
from kungfu.tensorflow.ops import (all_reduce, consensus, current_cluster_size,
resize_cluster_from_url,
step_based_schedule)
class KungFuElasticTrainHook(tf.train.SessionRunHook):
@deprecation.deprecated()
def __init__(self, schedule, max_step, model_dir, save_final_model=False):
self._schedule = schedule
self._max_step = max_step
self._model_dir = model_dir
self._save_final_model = save_final_model
self._need_sync = True
def _build_resize_op(self, config, step):
new_size_op = step_based_schedule(config, step)
resize_op = resize_cluster_from_url()
return resize_op, new_size_op
def begin(self):
self._sync_op = BroadcastGlobalVariablesOp()
self._step = 0
self._step_place = tf.placeholder(dtype=tf.int32, shape=())
self._sync_step_op = all_reduce(self._step_place, op='max')
self._resize_op, self._new_size_op = self._build_resize_op(
self._schedule, self._step_place)
def after_create_session(self, sess, coord):
pass
def before_run(self, run_context):
if self._step >= self._max_step: # shouldn't happen
print('request_stop before kungfu_step: %d' % (self._step))
# run_context.request_stop()
# FIXME: force quit
if self._need_sync:
is_first = self._step == 0
if is_first:
_log_event('BEFORE first _sync_step_op')
self._step = run_context.session.run(
self._sync_step_op, feed_dict={self._step_place: self._step})
if is_first:
_log_event('BEFORE first _sync_op')
run_context.session.run(self._sync_op)
if is_first:
_log_event('AFTER first _sync_op')
self._need_sync = False
def after_run(self, run_context, run_values):
new_size = run_context.session.run(
self._new_size_op, feed_dict={self._step_place: self._step})
propose_new_size(new_size)
changed, detached = run_context.session.run(self._resize_op)
if detached:
run_context.request_stop()
return
if changed:
print('changed on %d' % (self._step))
self._need_sync = True
self._step += 1
if self._step >= self._max_step:
print('request_stop on kungfu_step: %d' % (self._step))
run_context.request_stop()
def end(self, sess):
print('stopped at step: %d' % (self._step))
if self._save_final_model:
self.save(sess, 'final')
def save(self, sess, idx):
vs = tf.global_variables()
d = dict()
for t in vs:
v = sess.run(t)
d[t.name] = v
np.savez(os.path.join(self._model_dir, 'variables-%s.npz' % (idx)),
**d)
|
11572074
|
from __future__ import print_function
import uuid
import msgpack
import raft.tcp as tcp
class NoConnection(Exception):
pass
class RaftClient(object):
def __init__(self, server):
self.tcp = tcp.TCP(0, 'client')
self.tcp.start()
self.msgs = {}
self.tcp.connect(server)
if not self.tcp.u2c:
# wait 2 seconds to connect
self.tcp.recv(0.5)
if not self.tcp.u2c:
raise NoConnection
self.leader = next(iter(self.tcp.u2c.keys()))
def _send(self, rpc, msgid):
self.tcp.send(rpc, self.leader)
msgids = self.poll(0.5)
if not msgids or not msgid in msgids:
return # XXX put real recovery logic here
msg = self.msgs[msgid][0]
if msg['type'] == 'cr_rdr':
self.leader = msg['leader']
print("redirected to %s! %s" % (self.leader, msg['addr']))
self.tcp.connect(msg['addr'])
del self.msgs[msgid]
return self._send(rpc, msgid)
def poll(self, timeout=0):
ans = self.tcp.recv(timeout)
if not ans:
return
msgids = set()
for _, msgs in ans:
for msg in msgs:
msg = msgpack.unpackb(msg, use_list=False)
msgid = msg['id']
msgids.add(msgid)
ums = self.msgs.get(msgid, [])
ums.append(msg)
self.msgs[msgid] = ums
return msgids
def send(self, data):
msgid = uuid.uuid4().hex
rpc = self.cq_rpc(data, msgid)
self._send(rpc, msgid)
return msgid
def update_hosts(self, config):
msgid = uuid.uuid4().hex
rpc = self.pu_rpc(config, msgid)
self._send(rpc, msgid)
return msgid
def cq_rpc(self, data, msgid):
# client query rpc
rpc = {
'type': 'cq',
'id': msgid,
'data': data
}
return msgpack.packb(rpc)
def pu_rpc(self, config, msgid):
# protocol update rpc
rpc = {
'type': 'pu',
'id': msgid,
'config': config
}
return msgpack.packb(rpc)
|
11572105
|
import click
import json, cPickle
import os, glob, subprocess
import h5py
from music21 import *
from constants import *
@click.group()
def analysis():
"""Performs various analyses."""
pass
@click.command()
def embed_note():
text_to_utf = { v:k for k,v in json.load(open(SCRATCH_DIR + '/utf_to_txt.json', 'rb')).items() }
utf_to_idx = json.load(open(SCRATCH_DIR + '/concat_corpus.json', 'rb'))['token_to_idx']
in_text = []
for utf in utf_to_idx: # we iterate across 'concat_corpus.json' because it omits unseen symbols (like the torch-rnn model)
in_text.append(utf)
in_text = ''.join(in_text)
input_fp = SCRATCH_DIR + '/input.utf'
open(input_fp, 'wb').write(in_text)
subprocess.call(' '.join([
'zsh',
'~/bachbot/scripts/analysis/embed_note.zsh',
input_fp,
'~/data',
]), shell = True)
@click.command()
def embed_chords():
text_to_utf = { v:k for k,v in json.load(open(SCRATCH_DIR + '/utf_to_txt.json', 'rb')).items() }
utf_to_idx = json.load(open(SCRATCH_DIR + '/concat_corpus.json', 'rb'))['token_to_idx']
chords = [
chord.Chord(['C3','E3','G3','C4']),
chord.Chord(['E3','G3','C4','E4']),
chord.Chord(['G3','C4','E4','G4']),
chord.Chord(['A3', 'C4', 'E4', 'A4'])
]
for c in chords:
in_text = [START_DELIM]
for note in c:
data = (note.pitch.midi, True)
text_data = str(data)
in_text.append(text_to_utf[text_data])
in_text.append(text_to_utf[CHORD_BOUNDARY_DELIM])
in_text = ''.join(in_text)
input_fp = SCRATCH_DIR + '/input.utf'
open(input_fp, 'wb').write(in_text)
# key, mode = harmony.chordSymbolFigureFromChord(c, True) # TODO: use this
out_dir = "/home/fl350/data/chord_embed/\'{0}\'".format(c.fullName)
#print out_dir, key, mode
subprocess.call(' '.join([
'zsh',
'~/bachbot/scripts/analysis/embed_note.zsh',
input_fp,
out_dir,
]), shell = True)
map(analysis.add_command, [
embed_note,
embed_chords
])
|
11572111
|
import subprocess
from torchpack.utils.logging import logger
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str)
parser.add_argument('--name', type=str)
parser.add_argument('--space', type=str)
parser.add_argument('--pr', type=float, nargs='+')
parser.add_argument('--mode', type=str)
args = parser.parse_args()
pres = ['python',
'examples/eval.py',
f'examples/configs/'
f'{args.dataset}/{args.name}/eval/x2/real/opt2/pruned/300.yml',
'--jobs=5',
'--run-dir']
with open(f"logs/x2/pruned/{args.dataset}.{args.name}."
f"{'-'.join(list(map(str, args.pr)))}.{args.space}."
f"{args.mode}.pruned.txt",
'a') as wfid:
for prune_ratio in args.pr:
if 'maxwell' in args.space:
n_blk = 4
else:
n_blk = 8
exp = f'runs/{args.dataset}.{args.name}.prune.searched.x2.noise.' \
f'opt2.setting0.pr{prune_ratio}.{args.space}.{args.mode}'
logger.info(f"running command {pres + [exp]}")
subprocess.call(pres + [exp], stderr=wfid)
|
11572129
|
from pulpo_forms.fieldtypes import TextField
from pulpo_forms.fieldtypes import FieldFactory
class TextAreaField(TextField.TextField):
"""
Validator for text area is the same as simple TextField
"""
template_name = "text_area/template.html"
edit_template_name = "text_area/template_edit.html"
prp_template_name = "text_area/properties.html"
def get_assets():
return ['js/fields/TextAreaField.js']
def get_styles():
return ['css/fields/TextAreaField.css']
def __str__(self):
return "Multi Line Text"
FieldFactory.FieldFactory.register('TextAreaField', TextAreaField)
|
11572142
|
from ._misc import RegressBoxes, RegressDims, UpsampleLike, Anchors, ClipBoxes # noqa: F401
from .filter_detections import FilterDetections # noqa: F401
from .fit_road_planes import FitRoadPlanes # noqa: F401
|
11572156
|
import GLWindow
import moderngl
from PIL import Image, ImageDraw, ImageFont
import numpy as np
wnd = GLWindow.create_window()
ctx = moderngl.create_context()
prog = ctx.program(
vertex_shader='''
#version 330
in vec2 in_vert;
in vec3 in_text;
out vec3 v_text;
void main() {
v_text = in_text;
gl_Position = vec4(in_vert, 0.0, 1.0);
}
''',
fragment_shader='''
#version 330
uniform sampler2DArray Texture;
in vec3 v_text;
out vec4 f_color;
void main() {
f_color = texture(Texture, v_text);
}
''',
)
x1 = np.cos(np.linspace(0.0, 2.0 * np.pi, 3, False)) * 0.3 * 9/16 - 0.4
y1 = np.sin(np.linspace(0.0, 2.0 * np.pi, 3, False)) * 0.3
z1 = np.full(3, 0.0)
x2 = np.cos(np.linspace(0.0, 2.0 * np.pi, 4, False)) * 0.3 * 9/16
y2 = np.sin(np.linspace(0.0, 2.0 * np.pi, 4, False)) * 0.3
z2 = np.full(4, 1.0)
x3 = np.cos(np.linspace(0.0, 2.0 * np.pi, 5, False)) * 0.3 * 9/16 + 0.4
y3 = np.sin(np.linspace(0.0, 2.0 * np.pi, 5, False)) * 0.3
z3 = np.full(5, 2.0)
shapes = [
np.dstack([x1, y1, x1, y1, z1]).astype('f4'),
np.dstack([x2, y2, x2, y2, z2]).astype('f4'),
np.dstack([x3, y3, x3, y3, z3]).astype('f4'),
]
vbo = ctx.buffer(b''.join(shape.tobytes() for shape in shapes))
dbo = ctx.buffer(np.array([
3, 1, 0, 0, 0,
4, 1, 3, 0, 0,
5, 1, 7, 0, 0,
], dtype='i4'))
images = [
Image.new('RGB', (256, 256), 'red'),
Image.new('RGB', (256, 256), 'green'),
Image.new('RGB', (256, 256), 'blue'),
]
merged = b''.join(img.tobytes() for img in images)
tex = ctx.texture_array((256, 256, 3), 3, merged)
tex.use()
vao = ctx.simple_vertex_array(prog, vbo, 'in_vert', 'in_text')
while wnd.update():
ctx.clear(1.0, 1.0, 1.0, 1.0)
# vao.mglo.render(moderngl.TRIANGLE_FAN, 3, 0, 1)
# vao.mglo.render(moderngl.TRIANGLE_FAN, 4, 3, 1)
# vao.mglo.render(moderngl.TRIANGLE_FAN, 5, 7, 1)
vao.render_indirect(dbo, moderngl.TRIANGLE_FAN)
|
11572211
|
import os
import pickle
import re
import nltk
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.tokenize import regexp_tokenize
from tqdm import tqdm
from sklearn.neighbors import KernelDensity
import gensim
import torch
import torch.utils.data as Data
import torchtext.vocab as vocab
from torch.utils.data import Dataset
import torch.nn.functional as F
from utils import sentence_tokenize, transform_format, check_ack_word
def read_all_unlabeled(limited_unlabeled_data):
with open(data_path + 'labeled_data.pkl', 'rb') as f:
labeled_data = pickle.load(f)
# {mid: sentences, labels}
with open(data_path + 'unlabeled_data.pkl', 'rb') as f:
unlabeled_data = pickle.load(f)
# {mid: message}
with open(data_path + 'mid2target.pkl', 'rb') as f:
mid2target = pickle.load(f)
# {mid: target, team_size}
with open(data_path + 'label_mapping.pkl', 'rb') as f:
label_mapping = pickle.load(f)
print(label_mapping)
try:
with open(data_path + 'vocab_2.pkl', 'rb') as f:
vocab = pickle.load(f)
print('unk words: ', vocab.unk_count)
print('vocab size: ', vocab.vocab_size)
except:
vocab = Vocab(unlabeled_data=unlabeled_data,
labeled_data=labeled_data, embedding_size=embedding_size)
with open(data_path + 'vocab_2.pkl', 'wb') as f:
pickle.dump(vocab, f)
print('unk words: ', vocab.unk_count)
print('vocab size: ', vocab.vocab_size)
all_ids = [k for k in limited_unlabeled_data]
train_unlabeled_dataset = Loader_unlabeled(
vocab, limited_unlabeled_data, all_ids, mid2target, max_seq_num, max_seq_len)
return train_unlabeled_dataset
def read_data(data_path, n_labeled_data=300, n_unlabeled_data=-1, max_seq_num=8, max_seq_len=64, embedding_size=128):
with open(data_path + 'labeled_data.pkl', 'rb') as f:
labeled_data = pickle.load(f)
# {mid: sentences, labels}
with open(data_path + 'unlabeled_data.pkl', 'rb') as f:
unlabeled_data = pickle.load(f)
# {mid: message}
with open(data_path + 'mid2target.pkl', 'rb') as f:
mid2target = pickle.load(f)
# {mid: target, team_size}
with open(data_path + 'label_mapping.pkl', 'rb') as f:
label_mapping = pickle.load(f)
print(label_mapping)
try:
with open(data_path + 'vocab_2.pkl', 'rb') as f:
vocab = pickle.load(f)
print('unk words: ', vocab.unk_count)
print('vocab size: ', vocab.vocab_size)
except:
vocab = Vocab(unlabeled_data=unlabeled_data,
labeled_data=labeled_data, embedding_size=embedding_size)
with open(data_path + 'vocab_2.pkl', 'wb') as f:
pickle.dump(vocab, f)
print('unk words: ', vocab.unk_count)
print('vocab size: ', vocab.vocab_size)
np.random.seed(1)
labeled_data_ids = list(labeled_data.keys())
np.random.shuffle(labeled_data_ids)
unlabeled_data_ids = list(unlabeled_data.keys())
np.random.shuffle(unlabeled_data_ids)
if len(labeled_data_ids) > 1000:
n_labeled_data = min(len(labeled_data_ids)-800, n_labeled_data)
else:
n_labeled_data = min(len(labeled_data_ids)-500, n_labeled_data)
train_labeled_ids = labeled_data_ids[:n_labeled_data]
if n_unlabeled_data == -1:
n_unlabeled_data = len(unlabeled_data_ids)
train_unlabeled_ids = unlabeled_data_ids[:n_unlabeled_data]
if len(labeled_data_ids) > 1000:
val_ids = labeled_data_ids[-800:-400]
test_ids = labeled_data_ids[-400:]
else:
val_ids = labeled_data_ids[-500:-300]
test_ids = labeled_data_ids[-300:]
train_labeled_dataset = Loader_labeled(
vocab, labeled_data, train_labeled_ids, mid2target, label_mapping, max_seq_num, max_seq_len)
train_unlabeled_dataset = Loader_unlabeled(
vocab, unlabeled_data, train_unlabeled_ids, mid2target, max_seq_num, max_seq_len)
val_dataset = Loader_labeled(
vocab, labeled_data, val_ids, mid2target, label_mapping, max_seq_num, max_seq_len)
test_dataset = Loader_labeled(
vocab, labeled_data, test_ids, mid2target, label_mapping, max_seq_num, max_seq_len)
n_class_sentence = 0
for (u,v) in label_mapping.items():
if v!= 0:
n_class_sentence += 1
n_class_sentence += 1
doc_label = []
for (u,v) in mid2target.items():
doc_label.append(v)
n_class_doc = max(doc_label) + 1
print("#Labeled: {}, Unlabeled {}, Val {}, Test {}, N class {}, {}".format(
len(train_labeled_ids), len(train_unlabeled_ids), len(val_ids), len(test_ids), n_class_sentence, n_class_doc))
return train_labeled_dataset, train_unlabeled_dataset, val_dataset, test_dataset, vocab, n_class_sentence, n_class_doc
class Vocab(object):
def __init__(self, unlabeled_data=None, labeled_data=None, embedding_size=128, max_seq_num=6, max_seq_len=128):
self.word2id = {}
self.id2word = {}
self.pattern = r"""(?x)
(?:[A-Z]\.)+
|\$?\d+(?:\.\d+)?%?
|\w+(?:[-']\w+)*
|\.\.\.
|(?:[.,;"'?():-_`])
"""
self.english_punctuations = []
# ',', '.', ':', ';', '(', ')', '[', ']', '@', '#', '%', '*', '\"', '=', '^', '_', '~', '-']
self.build_vocab(unlabeled_data, labeled_data,
embedding_size, max_seq_num, max_seq_len)
self.vocab_size = len(self.word2id)
self.embed = self.build_embed_matrix(embedding_size)
def build_vocab(self, unlabeled_data, labeled_data, embedding_size, max_seq_num, max_seq_len):
sentences = []
words = []
if unlabeled_data is not None:
for (u, v) in unlabeled_data.items():
try:
results = re.compile(r'http[a-zA-Z0-9.?/&=:#%_-]*', re.S)
dd = results.sub(" <website> ", v)
results = re.compile(r'www.[a-zA-Z0-9.?/&=:#%_-]*', re.S)
dd = results.sub(" <website> ", dd)
results = re.compile(r'[a-zA-Z0-9.?/&=:#%_-]*.(com|net|org|io|gov|me|edu)', re.S)
dd = results.sub(" <website> ", dd)
sents = sentence_tokenize(dd)
for j in range(0, len(sents)):
a = regexp_tokenize(
transform_format(sents[j]), self.pattern)
temp = []
for k in range(0, len(a)):
if a[k] not in self.english_punctuations and check_ack_word(a[k]) == 1:
if a[k].isdigit():
a[k] = '<number>'
elif a[k][0] == '$':
a[k] = '<money>'
elif a[k][-1] == '%':
a[k] = '<percentage>'
temp.append(a[k].lower())
words.append(a[k].lower())
if len(temp) > 0:
sentences.append(temp)
except:
#print(u,v)
#exit()
pass
if labeled_data is not None:
for (u, v) in labeled_data.items():
for i in range(0, len(v[0])):
v[0][i] = str(v[0][i])
try:
results = re.compile(r'http[a-zA-Z0-9.?/&=:#%_-]*', re.S)
dd = results.sub(" <website> ", v[0][i])
results = re.compile(r'www.[a-zA-Z0-9.?/&=:#%_-]*', re.S)
dd = results.sub(" <website> ", dd)
results = re.compile(r'[a-zA-Z0-9.?/&=:#%_-]*.(com|net|org|io|gov|me|edu)', re.S)
dd = results.sub(" <website> ", dd)
except:
print(u, v)
print(v[0][i])
exit()
a = regexp_tokenize(transform_format(dd), self.pattern)
temp = []
for k in range(0, len(a)):
if a[k] not in self.english_punctuations and check_ack_word(a[k]) == 1:
if a[k].isdigit():
a[k] = '<number>'
elif a[k][0] == '$':
a[k] = '<money>'
elif a[k][-1] == '%':
a[k] = '<percentage>'
temp.append(a[k].lower())
words.append(a[k].lower())
if len(temp) > 0:
sentences.append(temp)
word_frequency = {}
for i in range(0, len(words)):
if words[i] in word_frequency:
word_frequency[words[i]] += 1
else:
word_frequency[words[i]] = 1
self.model = gensim.models.Word2Vec(
sentences, size=embedding_size, window=5, min_count=1, iter=20, negative=50)
x = 4
self.word2id['<pad>'] = 0
self.id2word[0] = '<pad>'
self.word2id['<sos>'] = 2
self.id2word[2] = '<sos>'
self.word2id['<eos>'] = 3
self.id2word[3] = '<eos>'
self.unk_count = 0
for i in range(0, len(sentences)):
for j in range(0, len(sentences[i])):
if word_frequency[sentences[i][j].lower()] >= 2:
if sentences[i][j].lower() in self.model:
if sentences[i][j].lower() in self.word2id:
pass
else:
self.word2id[sentences[i][j].lower()] = x
self.id2word[x] = sentences[i][j].lower()
x = x + 1
else:
self.word2id['<unk>'] = 1
self.id2word[1] = '<unk>'
self.unk_count += 1
def build_embed_matrix(self, embedding_size):
X = np.random.normal(loc=0.0, scale=1.0, size=[
self.vocab_size, embedding_size])
X[0] = np.zeros([1, embedding_size])
#X[0] = np.zeros([1, embedding_size])
#X[0] = np.zeros([1, embedding_size])
for (u, v) in self.id2word.items():
if v in self.model:
vector = self.model.wv[v]
X[u] = vector
return X
class Loader_labeled(Dataset):
def __init__(self, vocab, labeled_data, ids, target, label_set, max_seq_num=8, max_seq_len=64):
self.vocab = vocab
self.labeled_data = labeled_data
self.ids = ids
self.target = target
self.max_seq_len = max_seq_len
self.max_seq_num = max_seq_num
self.pattern = r"""(?x)
(?:[A-Z]\.)+
|\$?\d+(?:\.\d+)?%?
|\w+(?:[-']\w+)*
|\.\.\.
"""
self.english_punctuations = []
# ',', '.', ':', ';', '(', ')', '[', ']', '@', '#', '%', '*', '\"', '=', '^', '_', '~', '-']
self.label_set = label_set
self.kde = KernelDensity(bandwidth=0.5, kernel='gaussian')
self.load_data(labeled_data, ids)
def __len__(self):
return len(self.ids)
def __getitem__(self, idx):
mid = self.ids[idx]
sents, l, sent_len, doc_len= self.message[mid]
message_target = self.lookup_score(mid)
labels = np.array([10] * self.max_seq_num)
doc_len = np.array(doc_len)
sent_length = np.array([0] * self.max_seq_num)
# select labeled sent
mask1 = np.array([0] * self.max_seq_num)
# select unlabeled sent
mask2 = np.array([0] * self.max_seq_num)
# select padded sent
mask3 = np.array([1] * self.max_seq_num)
# select unpadded sent
mask4 = np.array([0] * self.max_seq_num)
for i in range(0, len(l)):
labels[i] = l[i]
sent_length[i] = sent_len[i]
if l[i] != 10:
mask1[i] = 1
mask2[i] = 0
mask3[i] = 0
mask4[i] = 1
if l[i] == 10:
mask1[i] = 0
mask2[i] = 1
mask3[i] = 0
mask4[i] = 1
message_vec = torch.LongTensor(self.message2id(sents))
return (message_vec, labels, message_target, mask1, mask2, mask3, mask4, mid, sent_length, doc_len)
def lookup_score(self, id):
return self.target[id]
def lookup_label_id(self, s):
return self.label_set[s]
def message2id(self, message):
X = np.zeros([self.max_seq_num, self.max_seq_len])
for i in range(0, len(message)):
for j, si in enumerate(message[i]):
if i < self.max_seq_num and j < self.max_seq_len:
try:
id = self.vocab.word2id[si.lower()]
X[i][j] = id
except:
X[i][j] = 1
for i in range(len(message), self.max_seq_num):
X[i][0] = 2
X[i][1] = 3
return X
def load_data(self, labeled_data, ids):
self.message = {}
labels_esit = []
for i in ids:
sentences = []
labels = []
doc_len = []
sent_len = []
sents, l = labeled_data[i]
for j in range(0, len(sents)):
sents[j] = str(sents[j])
results = re.compile(r'www.[a-zA-Z0-9.?/&=:#%_-]*', re.S)
dd = results.sub(" <website> ", sents[j])
results = re.compile(r'http[a-zA-Z0-9.?/&=:#%_-]*', re.S)
dd = results.sub(" <website> ", dd)
results = re.compile(r'[a-zA-Z0-9.?/&=:#%_-]*.(com|net|org|io|gov|me|edu)', re.S)
dd = results.sub(" <website> ", dd)
a = regexp_tokenize(transform_format(dd), self.pattern)
temp = []
for k in range(0, len(a)):
if a[k] not in self.english_punctuations and check_ack_word(a[k]) == 1:
if a[k].isdigit():
a[k] = '<number>'
elif a[k][0] == '$':
a[k] = '<money>'
elif a[k][-1] == '%':
a[k] = '<percentage>'
temp.append(a[k].lower())
if len(temp) > 0:
temp_ = ['<sos>']
for k in range(0, min(len(temp), self.max_seq_len -2)):
temp_.append(temp[k])
temp_.append('<eos>')
sentences.append(temp_)
labels.append(self.lookup_label_id(l[j]))
labels_esit.append(self.lookup_label_id(l[j]))
sent_len.append(len(temp_) - 1)
doc_len.append(len(sents) - 1)
self.message[i] = (sentences, labels, sent_len, doc_len)
x_d = set()
for (u, v) in self.label_set.items():
x_d.add(v)
x_d = np.array(list(x_d))
self.kde.fit(np.array(labels_esit)[:, None])
self.dist = self.kde.score_samples(x_d[:, None])
self.esit_dist = F.softmax(torch.tensor(self.dist), dim = -1)
class Loader_unlabeled(Dataset):
def __init__(self, vocab, unlabeled_data, ids, target, max_seq_num=8, max_seq_len=64):
self.vocab = vocab
self.unlabeled_data = unlabeled_data
#self.ids = ids
self.target = target
self.max_seq_num = max_seq_num
self.max_seq_len = max_seq_len
self.pattern = r"""(?x)
(?:[A-Z]\.)+
|\$?\d+(?:\.\d+)?%?
|\w+(?:[-']\w+)*
|\.\.\.
"""
self.english_punctuations = []
# ',', '.', ':', ';', '(', ')', '[', ']', '@', '#', '%', '*', '\"', '=', '^', '_', '~', '-']
self.load_data(unlabeled_data, ids)
def __len__(self):
return len(self.ids)
def __getitem__(self, idx):
mid = self.ids[idx]
sents, l, sent_len, doc_len = self.message[mid]
message_target = self.lookup_score(mid)
doc_len = np.array(doc_len)
sent_length = np.array([0] * self.max_seq_num)
labels = np.array([10] * self.max_seq_num)
# select labeled sent
mask1 = np.array([0] * self.max_seq_num)
# select unlabeled sent
mask2 = np.array([0] * self.max_seq_num)
# select padded sent
mask3 = np.array([1] * self.max_seq_num)
# select unpadded sent
mask4 = np.array([0] * self.max_seq_num)
for i in range(0, len(l)):
labels[i] = l[i]
sent_length[i] = sent_len[i]
if l[i] != 10:
mask1[i] = 1
mask2[i] = 0
mask3[i] = 0
mask4[i] = 1
if l[i] == 10:
mask1[i] = 0
mask2[i] = 1
mask3[i] = 0
mask4[i] = 1
message_vec = torch.LongTensor(self.message2id(sents))
return (message_vec, labels, message_target, mask1, mask2, mask3, mask4, mid, sent_length, doc_len)
def message2id(self, message):
X = np.zeros([self.max_seq_num, self.max_seq_len])
for i in range(0, len(message)):
for j, si in enumerate(message[i]):
if i < self.max_seq_num and j < self.max_seq_len:
try:
id = self.vocab.word2id[si.lower()]
X[i][j] = id
except:
X[i][j] = 1
for i in range(len(message), self.max_seq_num):
X[i][0] = 2
X[i][1] = 3
return X
def lookup_score(self, id):
return self.target[id]
def load_data(self, unlabeled_data, ids):
self.message = {}
self.ids = []
self.data_num = 0
for i in ids:
try:
sentences = []
labels = []
doc = unlabeled_data[i]
doc_len = []
sent_len = []
doc += '.'
results = re.compile(r'http[a-zA-Z0-9.?/&=:#%_-]*', re.S)
dd = results.sub(" <website> ", doc)
results = re.compile(r'www.[a-zA-Z0-9.?/&=:#%_-]*', re.S)
dd = results.sub(" <website> ", dd)
results = re.compile(r'[a-zA-Z0-9.?/&=:#%_-]*.(com|net|org|io|gov|me|edu)', re.S)
dd = results.sub(" <website> ", dd)
sents = sentence_tokenize(dd)
# print(sents)
for j in range(0, len(sents)):
a = regexp_tokenize(
transform_format(sents[j]), self.pattern)
temp = []
for k in range(0, len(a)):
if a[k] not in self.english_punctuations and check_ack_word(a[k]) == 1:
if a[k].isdigit():
a[k] = '<number>'
elif a[k][0] == '$':
a[k] = '<money>'
elif a[k][-1] == '%':
a[k] = '<percentage>'
temp.append(a[k].lower())
if len(temp) > 0:
temp_ = ['<sos>']
for k in range(0, min(len(temp), self.max_seq_len - 2)):
temp_.append(temp[k])
temp_.append('<eos>')
sentences.append(temp_)
labels.append(10)
sent_len.append(len(temp_) - 1)
doc_len.append(min(len(sents) - 1, self.max_seq_num - 1))
self.message[i] = (sentences[:self.max_seq_num],
labels[:self.max_seq_num], sent_len[:self.max_seq_num], doc_len)
self.ids.append(i)
except:
if str(doc) != "nan":
print(doc)
pass
|
11572244
|
import re
from .default import DefaultParser
class KarmaParser(DefaultParser):
name = "karma"
def command_matches(self, command):
return "karma" in command
def num_passed(self, result):
return self.num_total(result) - self.num_failed(result)
def num_total(self, result):
# Executed 2 of 2 (1 FAILED)
m = re.findall('Executed (\d+) of (\d+)', result.cleaned_output)
return int(m[-1][1])
def num_failed(self, result):
# You'll see either
# Executed 2 of 2 (1 FAILED)
# Executed 1 of 1 SUCCESS
# Note that karma rewrites the screen as it goes,
# so you have to grab the last one.
fails = re.findall(
'Executed (\d+) of (\d+) \((\d+) FAILED\)',
result.cleaned_output
)
if len(fails) > 0:
return int(fails[-1][-1])
else:
m = re.findall(
'Executed (\d+) of (\d+) SUCCESS',
result.cleaned_output
)
if len(m) > 0:
return 0
|
11572245
|
from agileutil.rpc.transport import TcpTransport, UdpTransport, HttpTransport, ClientUdpTransport, RpcTransport
from agileutil.rpc.serialize import BinarySerialize, JsonSerialize
from multiprocessing import cpu_count
class RpcProtocal(object):
__slots__ = ('transport')
def __init__(self):
self.transport = RpcTransport()
def serialize(self, obj):
serializer = None
if self.serializeType == 'bin':
serializer = BinarySerialize()
elif self.serializeType == 'json':
serializer = JsonSerialize()
if serializer == None:
raise Exception('unknown serializeType')
return serializer.serialize(obj)
def unserialize(self, msg):
serializer = None
if self.serializeType == 'bin':
serializer = BinarySerialize()
elif self.serializeType == 'json':
serializer = JsonSerialize()
if serializer == None:
raise Exception('unknown serializeType')
return serializer.unserialize(msg)
def parseRequest(self, package):
func = package['func']
args = package['args']
kwargs = package['kwargs']
return func, args, kwargs
class UdpProtocal(RpcProtocal):
def __init__(self):
pass
class HttpProtocal(RpcProtocal):
def __init__(self, host, port, worker = cpu_count(), serializeType = 'bin', timeout = 10, poolConnection=5, poolMaxSize = 20, maxRetries = 3):
RpcProtocal.__init__(self)
self.host = host
self.port = port
self.worker = worker
self.serializeType = serializeType
self.timeout = timeout
self.transport = HttpTransport(host, port, timeout, poolConnection, poolMaxSize, maxRetries)
class TcpProtocal(RpcProtocal):
def __init__(self, host, port, serializeType = 'bin', timeout = 10):
RpcProtocal.__init__(self)
self.serializeType = serializeType
self.timeout = timeout
self.transport = TcpTransport(host, port, timeout)
class UdpProtocal(RpcProtocal):
def __init__(self, host, port, serializeType = 'bin', timeout = 10):
RpcProtocal.__init__(self)
self.serializeType = serializeType
self.timeout = timeout
self.transport = UdpTransport(host, port)
class ClientUdpProtocal(UdpProtocal):
def __init__(self, host, port, serializeType = 'bin', timeout = 10):
UdpProtocal.__init__(self, host, port, serializeType=serializeType, timeout=timeout)
self.serializeType = serializeType
self.host = host
self.port = port
self.timeout = timeout
self.transport = ClientUdpTransport(host, port, timeout)
def newTransport(self):
self.transport.close()
self.transport = ClientUdpTransport(self.host, self.port, self.timeout)
|
11572249
|
import unittest
from tinyber.c_nodes import int_max_size_type
class TestBasic(unittest.TestCase):
def test_int8(self):
size = "int8_t"
self.assertEqual(int_max_size_type(-2**7, 0), size)
self.assertEqual(int_max_size_type(-1, 0), size)
self.assertEqual(int_max_size_type(-1, 2**7 - 1), size)
self.assertEqual(int_max_size_type(-2**7, 2**7 - 1), size)
self.assertNotEqual(int_max_size_type(0, 2**7), size)
self.assertNotEqual(int_max_size_type(0, 2**7 - 1), size)
def test_int16(self):
size = "int16_t"
self.assertEqual(int_max_size_type(-1, 256), size)
self.assertEqual(int_max_size_type(-1, 2**15 - 1), size)
self.assertEqual(int_max_size_type(-2**15, 2**15 - 1), size)
self.assertNotEqual(int_max_size_type(0, 2**15), size)
self.assertNotEqual(int_max_size_type(0, 2**15 - 1), size)
def test_int32(self):
size = "int32_t"
self.assertEqual(int_max_size_type(-1, 2**16), size)
self.assertEqual(int_max_size_type(-1, 2**31 - 1), size)
self.assertEqual(int_max_size_type(-2**31, 2**31 - 1), size)
self.assertNotEqual(int_max_size_type(-1, 2**31), size)
self.assertNotEqual(int_max_size_type(0, 2**31), size)
def test_int64(self):
size = "int64_t"
self.assertEqual(int_max_size_type(-1, 2**32), size)
self.assertEqual(int_max_size_type(-1, 2**63 - 1), size)
self.assertEqual(int_max_size_type(-2**63, 2**63 - 1), size)
self.assertNotEqual(int_max_size_type(0, 2**63), size)
with self.assertRaises(NotImplementedError):
int_max_size_type(-1, 2**63)
with self.assertRaises(NotImplementedError):
int_max_size_type(-2**64, 0)
def test_uint8(self):
size = "uint8_t"
self.assertEqual(int_max_size_type(0, 0), size)
self.assertEqual(int_max_size_type(0, 2**8 - 1), size)
# self.assertNotEqual(int_max_size_type(0, -1), size)
self.assertNotEqual(int_max_size_type(0, 2**8), size)
def test_uint16(self):
size = "uint16_t"
self.assertEqual(int_max_size_type(0, 256), size)
self.assertEqual(int_max_size_type(0, 2**16 - 1), size)
self.assertNotEqual(int_max_size_type(0, 2**16), size)
def test_uint32(self):
size = "uint32_t"
self.assertEqual(int_max_size_type(0, 2**16), size)
self.assertEqual(int_max_size_type(0, 2**32 - 1), size)
self.assertNotEqual(int_max_size_type(0, 2**32), size)
def test_uint64(self):
size = "uint64_t"
self.assertEqual(int_max_size_type(0, 2**32), size)
self.assertEqual(int_max_size_type(0, 2**64 - 1), size)
with self.assertRaises(NotImplementedError):
int_max_size_type(0, 2**64)
if __name__ == '__main__':
unittest.main()
|
11572322
|
import numpy as np
import ora
from ora import *
import pytest
pytest.importorskip("ora.np")
#-------------------------------------------------------------------------------
# FIXME: Combine.
TIME_TYPES = (SmallTime, Unix32Time, Unix64Time, Time, NsTime, HiTime, Time128)
# FIXME: Put the offset dtype in an attribute.
@pytest.mark.parametrize("Time,offset_dtype", [
(SmallTime , np.dtype("uint32")),
(Unix32Time , np.dtype("int32")),
(Unix64Time , np.dtype("int64")),
(Time , np.dtype("uint64")),
(NsTime , np.dtype("int64")),
(HiTime , np.dtype("uint64")),
])
def test_to_offset(Time, offset_dtype):
arr = np.array([
Time.MIN,
Time.from_offset(0),
Time.MAX,
Time.INVALID,
Time.MISSING,
])
off = ora.np.to_offset(arr)
assert off.dtype == offset_dtype
assert len(off) == 5
assert off[0] == Time.MIN.offset
assert off[1] == 0
assert off[2] == Time.MAX.offset
|
11572325
|
from aws_cfn_update.rest_api_body_updater import RestAPIBodyUpdater
import json
def test_new_resource_name():
updater = RestAPIBodyUpdater()
updater.resource_name = 'RestAPI'
result = updater.new_resource_name('RestAPI')
assert result == 'RestAPIv1'
result = updater.new_resource_name(result)
assert result == 'RestAPIv2'
updater.resource_name = 'Restv2Api'
result = updater.new_resource_name('Restv2Apiv3')
assert result == 'Restv2Apiv4'
def test_matching_names():
updater = RestAPIBodyUpdater()
updater.resource_name = 'RestAPI'
updater.template = {
'Resources':
{
'RestAPIv10': {'Type': 'AWS::ApiGateway::RestApi'},
'RestApi': {'Type': 'AWS::ApiGateway::RestApi'},
'Restv2APiv1': {'Type': 'AWS::ApiGateway::RestApi'},
'RestAPIv1': {'Type': 'AWS::ApiGateway::RestApi'},
'RestAPIv0': {'Type': 'AWS::ApiGateway::RestApi'},
'RestAPIv_01': {'Type': 'AWS::ApiGateway::RestApi'}
}
}
result = updater.find_matching_resources()
assert result == ['RestAPIv0', 'RestAPIv1', 'RestAPIv10']
updater.resource_name = 'RestApi'
result = updater.find_matching_resources()
assert result == ['RestApi']
updater.resource_name = 'NotARestApi'
result = updater.find_matching_resources()
assert not result
sample = {
"Resources": {
"Deployment": {
"Type": "AWS::ApiGateway::Deployment",
"Properties": {
"RestApiId": {
"Ref": "RestAPI"
}
}
},
"RestAPI": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {
"EndpointConfiguration": {
"Types": [
"REGIONAL"
]
},
"Body": {
"swagger": "2.0"
}
}
}
}
}
def test_replace_body():
updater = RestAPIBodyUpdater()
updater.resource_name = 'RestAPI'
updater.template = json.loads(json.dumps(sample))
updater.body = {'swagger': '2.0', 'description': 'a new one'}
updater.update_template()
assert updater.dirty
assert 'description' in updater.template['Resources']['RestAPI']['Properties']['Body']
assert updater.template['Resources']['RestAPI']['Properties']['Body']['description'] == 'a new one'
def test_add_new_version():
updater = RestAPIBodyUpdater()
updater.resource_name = 'RestAPI'
updater.template = json.loads(json.dumps(sample))
updater.body = {'swagger': '2.0', 'description': 'a new one'}
updater.add_new_version = True
updater.update_template()
assert updater.dirty
assert 'RestAPI' not in updater.template['Resources']
assert 'RestAPIv1' in updater.template['Resources']
assert 'description' in updater.template['Resources']['RestAPIv1']['Properties']['Body']
assert updater.template['Resources']['RestAPIv1']['Properties']['Body']['description'] == 'a new one'
multiple = {
"Resources": {
"Deployment": {
"Type": "AWS::ApiGateway::Deployment",
"Properties": {
"RestApiId": {
"Ref": "RestAPIv3"
}
}
},
"RestAPI": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {
"EndpointConfiguration": {
"Types": [
"REGIONAL"
]
},
"Body": {
"swagger": "2.0"
}
}
},
"RestAPIv2": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {
"EndpointConfiguration": {
"Types": [
"REGIONAL"
]
},
"Body": {
"swagger": "2.0"
}
}
},
"RestAPIv3": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {
"EndpointConfiguration": {
"Types": [
"GLOBAL"
]
},
"Body": {
"swagger": "2.0"
}
}
}
}
}
def test_add_new_version_keep_two():
updater = RestAPIBodyUpdater()
updater.resource_name = 'RestAPI'
updater.template = json.loads(json.dumps(multiple))
updater.body = {'swagger': '2.0', 'description': 'a new one'}
updater.add_new_version = True
updater.keep = 2
updater.update_template()
assert updater.dirty
assert 'RestAPI' not in updater.template['Resources']
assert 'RestAPIv2' not in updater.template['Resources']
assert 'RestAPIv3' in updater.template['Resources']
assert 'RestAPIv4' in updater.template['Resources']
assert 'GLOBAL' == updater.template['Resources']['RestAPIv4']['Properties']['EndpointConfiguration']['Types'][0]
assert 'description' in updater.template['Resources']['RestAPIv4']['Properties']['Body']
assert updater.template['Resources']['RestAPIv4']['Properties']['Body']['description'] == 'a new one'
assert updater.template['Resources']['Deployment']['Properties']['RestApiId']['Ref'] == 'RestAPIv4'
def test_load_and_merge():
updater = RestAPIBodyUpdater()
updater.resource_name = 'RestAPI'
updater.template = json.loads(json.dumps(sample))
updater.api_gateway_extensions = 'tests/aws-extensions.yaml'
updater.open_api_specification = 'tests/api-specification.yaml'
updater.body = updater.load_and_merge_swagger_body()
updater.update_template()
assert updater.dirty
assert updater.template['Resources']['RestAPI']['Properties']['Body'] == updater.body
|
11572326
|
import sys
import os
import config.parser as config
from brain.helpers.sql import session_transaction
from brain.models.sqlobjects import User, Base
from sqlalchemy import create_engine
from irma.common.base.exceptions import IrmaDatabaseResultNotFound,\
IrmaDatabaseError
if len(sys.argv) not in (4, 5):
print("usage: {0} <username> <rmqvhost> <ftpuser>\n"
" with <username> a string\n"
" <rmqvhost> the rmqvhost used for the frontend\n"
" <ftpuser> the ftpuser used by the frontend\n"
"example: {0} test1 mqfrontend frontend"
"".format(sys.argv[0]))
sys.exit(1)
name = sys.argv[1]
rmqvhost = sys.argv[2]
ftpuser = sys.argv[3]
# Auto-create directory for sqlite db
db_name = os.path.abspath(config.sqldb.dbname)
dirname = os.path.dirname(db_name)
if not os.path.exists(dirname):
print("SQL directory does not exist {0}"
"..creating".format(dirname))
os.makedirs(dirname)
os.chmod(dirname, 0o777)
elif not (os.path.isdir(dirname)):
print("Error. SQL directory is a not a dir {0}"
"".format(dirname))
raise IrmaDatabaseError("Can not create Brain database dir")
if not os.path.exists(db_name):
# touch like method to create a rw-rw-rw- file for db
open(db_name, 'a').close()
os.chmod(db_name, 0o666)
# Retrieve database informations
engine = create_engine(config.sqldb.url, echo=config.sql_debug_enabled())
# and create Database in case
Base.metadata.create_all(engine)
with session_transaction() as session:
try:
user = User.get_by_rmqvhost(session, rmqvhost=rmqvhost)
print("rmqvhost {0} is already assigned to user {1}. "
"Updating with new parameters.".format(user.name, user.rmqvhost))
session.query(User)\
.filter_by(id=user.id)\
.update({'name': name, 'ftpuser': ftpuser})
except IrmaDatabaseResultNotFound:
user = User(name=name, rmqvhost=rmqvhost, ftpuser=ftpuser)
session.add(user)
|
11572340
|
import os
import socket
sockname = os.environ['KAA_SOCKNAME']
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(sockname)
s.send(b'ok\n')
s.recv(4096)
s.close()
|
11572373
|
from corehq.apps.reports.filters.fixtures import AsyncLocationFilter
class RestrictedAsyncLocationFilter(AsyncLocationFilter):
def load_locations_json(self, loc_id):
user = self.request.couch_user
if user.has_permission(self.domain, 'access_all_locations'):
return super(RestrictedAsyncLocationFilter, self).load_locations_json(loc_id)
return RestrictedLocationDrillDown(domain=self.domain, user=user).get_locations_json()
|
11572398
|
import logging
import math
import ConfigParser
import numpy as np
from nn_dataflow import ConvLayer
from src.utils.utils import ceil_a_by_b, log2, lookup_pandas_dataframe
from src.simulator.stats import Stats
from src.simulator.loop_stack import LoopStack
from src.optimizer.optimizer import optimize_for_order, get_stats_fast
from src.simulator.accelerator import Accelerator
from sram.sram_stats import get_sram_dataframe, get_sram_data
import os
import pandas
class Simulator(object):
"""
Simulator class
"""
def __init__(self, config_file='conf.ini', verbose=False, energy_costs=None):
# custom energy cost
self.energy_costs = energy_costs
self.config_file = config_file
self.config = ConfigParser.ConfigParser()
self.config.read(config_file)
systolic_dim = [self.config.getint('accelerator', 'a'),
1,
self.config.getint('accelerator', 'c')]
if verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
# logging.basicConfig(level=log_level)
self.logger = logging.getLogger('{}.{}'.format(__name__, 'Simulator'))
self.logger.setLevel(log_level)
self.logger.debug("Creating Simulator Object")
self.logger.debug("Systolic Array dimentions: {}".format(systolic_dim))
mem_if_width = self.config.getint('system', 'if_width')
self.logger.debug("Memory Interface Bit-Width: {}-bits".format(mem_if_width))
pmax = self.config.getint('accelerator', 'high_prec')
pmin = self.config.getint('accelerator', 'low_prec')
self.logger.debug("High Precision: {}-bits".format(pmax))
self.logger.debug("Low Precision: {}-bits".format(pmin))
# Using half the size assuming double buffering
sram = {}
sram['act'] = self.config.getint('accelerator', 'Act_SRAM')
self.logger.debug("Activation SRAM size: {:,} Bytes".format(sram['act']))
sram['wgt'] = self.config.getint('accelerator', 'Wgt_SRAM')
self.logger.debug("Weight SRAM size: {:,} Bytes".format(sram['wgt']))
sram['out'] = self.config.getint('accelerator', 'Out_SRAM')
self.logger.debug("Output SRAM size: {:,} Bytes".format(sram['out']))
frequency = self.config.getint('accelerator', 'frequency')
self.logger.debug('Frequency: {:,} Hz'.format(frequency))
hp_peak_throughput = systolic_dim[0] * \
systolic_dim[1] * \
systolic_dim[2]
peak_throughput = hp_peak_throughput * \
(int(pmax / pmin) ** 2)
self.logger.debug('Lowest precision: Peak Throughput: {:,} Ops/cycle'.format(peak_throughput))
self.logger.debug('Highest precision: Peak Throughput: {:,} Ops/cycle'.format(hp_peak_throughput))
N = systolic_dim[0]
beta = systolic_dim[1]
M = systolic_dim[2]
assert beta == 1
self.accelerator = Accelerator(N, M, pmax, pmin, sram, mem_if_width, frequency)
##################################################
# Get stats for SRAM
frequency = self.accelerator.frequency
tech_node = 45
voltage = 0.85
sram_csv = 'hardware_sweep/sram_results.csv'
self.sram_df = get_sram_dataframe(tech_node, voltage, int(frequency * 1.e-6), './sram/data',
logpath='./sram/mcpat.sram/SampleScirpts/RunLog')
def get_area(self):
frequency = self.accelerator.frequency
##################################################
N = self.accelerator.N
M = self.accelerator.M
pmax = self.accelerator.pmax
pmin = self.accelerator.pmin
wbuf_size = self.accelerator.sram['wgt'] * 8
ibuf_size = self.accelerator.sram['act'] * 8
obuf_size = self.accelerator.sram['out'] * 8
wbuf_bank = N * 2
ibuf_bank = N * 2
obuf_bank = 2
wbuf_bits = (pmax * pmax / pmin) * M
ibuf_bits = (pmax * pmax / pmin)
obuf_bits = 32 * M
wbuf_word = ceil_a_by_b(wbuf_size, wbuf_bank * wbuf_bits)
ibuf_word = ceil_a_by_b(ibuf_size, ibuf_bank * ibuf_bits)
obuf_word = ceil_a_by_b(obuf_size, obuf_bank * obuf_bits)
##################################################
wbuf_area, wbuf_leak_power, wbuf_read_energy, wbuf_write_energy = get_sram_data(self.sram_df, wbuf_bits, wbuf_size/8, wbuf_bank, 2)
self.logger.debug('WBUF :')
self.logger.debug('\tBanks : {0:>8}'.format(wbuf_bank))
self.logger.debug('\tBitWidth : {0:>8}'.format(wbuf_bits))
self.logger.debug('\tWords : {0:>8}'.format(wbuf_word))
self.logger.debug('\tTotal Size (kBytes) : {0:>8}'.format(wbuf_size/8./1024.))
self.logger.debug('\tArea : {0:>8.2f}'.format(wbuf_area))
self.logger.debug('\tLeak Energy (per clock) : {0:>8.6f}'.format(wbuf_leak_power))
self.logger.debug('\tRead Energy (per bit) (nJ) : {0:>8.6f}'.format(wbuf_read_energy))
self.logger.debug('\tWrite Energy (per bit) (nJ) : {0:>8.6f}'.format(wbuf_write_energy))
##################################################
ibuf_area, ibuf_leak_power, ibuf_read_energy, ibuf_write_energy = get_sram_data(self.sram_df, ibuf_bits, ibuf_size/8, ibuf_bank, 2)
self.logger.debug('IBUF :')
self.logger.debug('\tBanks : {0:>8}'.format(ibuf_bank))
self.logger.debug('\tBitWidth : {0:>8}'.format(ibuf_bits))
self.logger.debug('\tWords : {0:>8}'.format(ibuf_word))
self.logger.debug('\tTotal Size (kBytes) : {0:>8}'.format(ibuf_size/8./1024.))
self.logger.debug('\tArea : {0:>8.2f}'.format(ibuf_area))
self.logger.debug('\tLeak Energy (per clock) : {0:>8.6f}'.format(ibuf_leak_power))
self.logger.debug('\tRead Energy (per bit) (nJ) : {0:>8.6f}'.format(ibuf_read_energy))
self.logger.debug('\tWrite Energy (per bit) (nJ) : {0:>8.6f}'.format(ibuf_write_energy))
##################################################
obuf_area, obuf_leak_power, obuf_read_energy, obuf_write_energy = get_sram_data(self.sram_df, obuf_bits, obuf_size/8, obuf_bank, 2)
self.logger.debug('OBUF :')
self.logger.debug('\tBanks : {0:>8}'.format(obuf_bank))
self.logger.debug('\tBitWidth : {0:>8}'.format(obuf_bits))
self.logger.debug('\tWords : {0:>8}'.format(obuf_word))
self.logger.debug('\tTotal Size (kBytes) : {0:>8}'.format(obuf_size/8./1024.))
self.logger.debug('\tArea : {0:>8.2f}'.format(obuf_area))
self.logger.debug('\tLeak Energy (per clock) : {0:>8.6f}'.format(obuf_leak_power))
self.logger.debug('\tRead Energy (per bit) (nJ) : {0:>8.6f}'.format(obuf_read_energy))
self.logger.debug('\tWrite Energy (per bit) (nJ) : {0:>8.6f}'.format(obuf_write_energy))
##################################################
# Get stats for systolic array
core_csv = os.path.join('./results', 'systolic_array_synth.csv')
core_synth_data = pandas.read_csv(core_csv)
lookup_dict = {}
lookup_dict['Max Precision (bits)'] = pmax
lookup_dict['Min Precision (bits)'] = pmin
lookup_dict['N'] = N
lookup_dict['M'] = M
core_data = lookup_pandas_dataframe(core_synth_data, lookup_dict)
if len(core_data) == 0:
lookup_dict['N'] = 4
lookup_dict['M'] = 4
core_data = lookup_pandas_dataframe(core_synth_data, lookup_dict)
assert len(core_data) == 1
core_area = float(core_data['Area (um^2)']) * 1.e-6 * (N * M) / 16.
core_dyn_power = float(core_data['Dynamic Power (nW)']) * (N * M) / 16.
core_dyn_energy = core_dyn_power / float(core_data['Frequency'])
core_leak_power = float(core_data['Leakage Power (nW)']) * (N * M) / 16.
core_leak_energy = core_leak_power / float(core_data['Frequency'])
else:
core_area = float(core_data['Area (um^2)']) * 1.e-6
core_dyn_power = float(core_data['Dynamic Power (nW)'])
core_dyn_energy = core_dyn_power / float(core_data['Frequency'])
core_leak_power = float(core_data['Leakage Power (nW)'])
core_leak_energy = core_leak_power / float(core_data['Frequency'])
self.logger.debug('Core :')
self.logger.debug('\tDimensions : {0}x{1}-systolic array'.format(N, M))
self.logger.debug('\tMax-Precision : {}'.format(pmax))
self.logger.debug('\tMin-Precision : {}'.format(pmin))
self.logger.debug('\tLeak Energy (nJ) : {}'.format(core_leak_energy))
self.logger.debug('\tDynamic Energy (nJ) : {}'.format(core_dyn_energy))
self.logger.debug('\tArea (mm^2) : {}'.format(core_area))
##################################################
return core_area, wbuf_area, ibuf_area, obuf_area
def get_energy_cost(self):
if self.energy_costs is not None:
return self.energy_costs
frequency = self.accelerator.frequency
##################################################
N = self.accelerator.N
M = self.accelerator.M
pmax = self.accelerator.pmax
pmin = self.accelerator.pmin
wbuf_size = self.accelerator.sram['wgt'] * 8
ibuf_size = self.accelerator.sram['act'] * 8
obuf_size = self.accelerator.sram['out'] * 8
wbuf_bank = N * 2
ibuf_bank = N * 2
obuf_bank = 2
wbuf_bits = (pmax * pmax / pmin) * M
ibuf_bits = (pmax * pmax / pmin)
obuf_bits = 32 * M
wbuf_word = ceil_a_by_b(wbuf_size, wbuf_bank * wbuf_bits)
ibuf_word = ceil_a_by_b(ibuf_size, ibuf_bank * ibuf_bits)
obuf_word = ceil_a_by_b(obuf_size, obuf_bank * obuf_bits)
##################################################
wbuf_area, wbuf_leak_power, wbuf_read_energy, wbuf_write_energy = get_sram_data(self.sram_df, wbuf_bits, wbuf_size/8, wbuf_bank, 2)
self.logger.debug('WBUF :')
self.logger.debug('\tBanks : {0:>8}'.format(wbuf_bank))
self.logger.debug('\tBitWidth : {0:>8}'.format(wbuf_bits))
self.logger.debug('\tWords : {0:>8}'.format(wbuf_word))
self.logger.debug('\tTotal Size (kBytes) : {0:>8}'.format(wbuf_size/8./1024.))
self.logger.debug('\tArea : {0:>8.2f}'.format(wbuf_area))
self.logger.debug('\tLeak Energy (per clock) : {0:>8.6f}'.format(wbuf_leak_power))
self.logger.debug('\tRead Energy (per bit) (nJ) : {0:>8.6f}'.format(wbuf_read_energy))
self.logger.debug('\tWrite Energy (per bit) (nJ) : {0:>8.6f}'.format(wbuf_write_energy))
##################################################
ibuf_area, ibuf_leak_power, ibuf_read_energy, ibuf_write_energy = get_sram_data(self.sram_df, ibuf_bits, ibuf_size/8, ibuf_bank, 2)
self.logger.debug('IBUF :')
self.logger.debug('\tBanks : {0:>8}'.format(ibuf_bank))
self.logger.debug('\tBitWidth : {0:>8}'.format(ibuf_bits))
self.logger.debug('\tWords : {0:>8}'.format(ibuf_word))
self.logger.debug('\tTotal Size (kBytes) : {0:>8}'.format(ibuf_size/8./1024.))
self.logger.debug('\tArea : {0:>8.2f}'.format(ibuf_area))
self.logger.debug('\tLeak Energy (per clock) : {0:>8.6f}'.format(ibuf_leak_power))
self.logger.debug('\tRead Energy (per bit) (nJ) : {0:>8.6f}'.format(ibuf_read_energy))
self.logger.debug('\tWrite Energy (per bit) (nJ) : {0:>8.6f}'.format(ibuf_write_energy))
##################################################
obuf_area, obuf_leak_power, obuf_read_energy, obuf_write_energy = get_sram_data(self.sram_df, obuf_bits, obuf_size/8, obuf_bank, 2)
self.logger.debug('OBUF :')
self.logger.debug('\tBanks : {0:>8}'.format(obuf_bank))
self.logger.debug('\tBitWidth : {0:>8}'.format(obuf_bits))
self.logger.debug('\tWords : {0:>8}'.format(obuf_word))
self.logger.debug('\tTotal Size (kBytes) : {0:>8}'.format(obuf_size/8./1024.))
self.logger.debug('\tArea : {0:>8.2f}'.format(obuf_area))
self.logger.debug('\tLeak Energy (per clock) : {0:>8.6f}'.format(obuf_leak_power))
self.logger.debug('\tRead Energy (per bit) (nJ) : {0:>8.6f}'.format(obuf_read_energy))
self.logger.debug('\tWrite Energy (per bit) (nJ) : {0:>8.6f}'.format(obuf_write_energy))
##################################################
# Get stats for systolic array
core_csv = os.path.join('./results', 'systolic_array_synth.csv')
core_synth_data = pandas.read_csv(core_csv)
lookup_dict = {}
lookup_dict['Max Precision (bits)'] = pmax
lookup_dict['Min Precision (bits)'] = pmin
lookup_dict['N'] = N
lookup_dict['M'] = M
core_data = lookup_pandas_dataframe(core_synth_data, lookup_dict)
if len(core_data) == 0:
lookup_dict['N'] = 4
lookup_dict['M'] = 4
core_data = lookup_pandas_dataframe(core_synth_data, lookup_dict)
assert len(core_data) == 1
core_area = float(core_data['Area (um^2)']) * 1.e-6 * (N * M) / 16.
core_dyn_power = float(core_data['Dynamic Power (nW)']) * (N * M) / 16.
core_dyn_energy = core_dyn_power / float(core_data['Frequency'])
core_leak_power = float(core_data['Leakage Power (nW)']) * (N * M) / 16.
core_leak_energy = core_leak_power / float(core_data['Frequency'])
else:
core_area = float(core_data['Area (um^2)']) * 1.e-6
core_dyn_power = float(core_data['Dynamic Power (nW)'])
core_dyn_energy = core_dyn_power / float(core_data['Frequency'])
core_leak_power = float(core_data['Leakage Power (nW)'])
core_leak_energy = core_leak_power / float(core_data['Frequency'])
self.logger.debug('Core :')
self.logger.debug('\tDimensions : {0}x{1}-systolic array'.format(N, M))
self.logger.debug('\tMax-Precision : {}'.format(pmax))
self.logger.debug('\tMin-Precision : {}'.format(pmin))
self.logger.debug('\tLeak Energy (nJ) : {}'.format(core_leak_energy))
self.logger.debug('\tDynamic Energy (nJ) : {}'.format(core_dyn_energy))
self.logger.debug('\tArea (mm^2) : {}'.format(core_area))
##################################################
total_leak_energy = core_leak_energy + (wbuf_leak_power + ibuf_leak_power + obuf_leak_power) * 1.e9 / frequency
return total_leak_energy, core_dyn_energy, wbuf_read_energy, wbuf_write_energy, ibuf_read_energy, ibuf_write_energy, obuf_read_energy, obuf_write_energy
def __str__(self):
ret = ''
ret += 'Simulator object'
ret += '\n'
ret += '\tMax supported precision: {}'.format(self.accelerator.pmax)
ret += '\n'
ret += '\tMin supported precision: {}'.format(self.accelerator.pmin)
ret += '\n'
ret += '\tSystolic array size: {} -inputs x {} -outputs'.format(
self.accelerator.N,
self.accelerator.M)
ret += '\n'
ret += '\tWbuf size: {:,} Bytes'.format(self.accelerator.sram['wgt'])
ret += '\n'
ret += '\tIbuf size: {:,} Bytes'.format(self.accelerator.sram['act'])
ret += '\n'
ret += '\tObuf size: {:,} Bytes'.format(self.accelerator.sram['out'])
ret += '\n'
ret += 'Double buffering enabled. Sizes of SRAM are halved'
return ret
def loop_estimate_stats(self, loop_instruction, verbose=False):
"""
args:
loop_instruction: Loops for the NN.
index 0 = outer loop
index -1 = inner loop
"""
# The following loop promotes Memory accesses to improve reuse
loop_instruction.promote_mem_ops(self.accelerator.sram)
# get stats
stats = loop_instruction.get_stats(self.accelerator, verbose)
return stats
def get_FC_cycles(self, Ni, No,
iprec, wprec,
batch_size=1):
"""
Get number of cycles required for Fully-Connected Layer.
args:
Ni: Input neurons
No: Output neurons
batch_size: Batch size for FC layer
iprec: Precision for activations (bits)
wprec: Precision for weights (bits)
batch_size: Batch size for the layer
description:
This function calls the get_conv_cycles function
"""
total_cycles = self.get_conv_cycles(1, 1, 1, Ni, No, iprec, wprec, batch_size)
return total_cycles
def get_perf_factor(self, iprec, wprec):
iprec = max(iprec, self.accelerator.pmin)
wprec = max(wprec, self.accelerator.pmin)
return int(self.accelerator.pmax / iprec) * int(self.accelerator.pmax / wprec)
def get_conv_cycles(self, K, O, S, IC, OC, iprec, wprec, batch_size=1, im2col=False):
"""
Get number of cycles required for Fully-Connected Layer.
args:
K: Kernel Size
O: Output Size
S: Input Stride
IC: Input Channels
OC: Output Channels
iprec: Precision for activations (bits)
wprec: Precision for weights (bits)
batch_size: Batch size for the layer
description:
This functions does an exhaustive search for finding the optimal
Tiling and Ordering parameters
assumptions:
(1) uses an estimate of the compute cycles, instead of actually
simulating the number of cycles
"""
B = batch_size
I = (O - 1) * S + K
# We do not tile the "K" dimension and compute an entire 2-D conv at a
# time
num_O_tiles = int(math.ceil(log2(O))) + 1
num_IC_tiles = int(math.ceil(log2(IC))) + 1
num_OC_tiles = int(math.ceil(log2(math.ceil(float(OC)/self.accelerator.M)))) + 1
num_B_tiles = int(math.ceil(log2(B))) + 1
self.logger.debug('Number of O Tiles: {}'.format(num_O_tiles))
self.logger.debug('Number of IC Tiles: {}'.format(num_IC_tiles))
self.logger.debug('Number of OC Tiles: {}'.format(num_OC_tiles))
self.logger.debug('Number of B Tiles: {}'.format(num_B_tiles))
best_instructions_dict = {}
conv_params = self.accelerator, K, O, S, IC, OC, B, iprec, wprec, im2col, self.get_energy_cost()
best_instructions, best_tiling, best_order, _, _ = optimize_for_order(conv_params)
stats = get_stats_fast(conv_params, best_tiling, best_order, verbose=False)
act_reads = stats.reads['act']
wgt_reads = stats.reads['wgt']
out_reads = stats.reads['out']
dram_reads = stats.reads['dram']
out_writes = stats.writes['out']
dram_writes = stats.writes['dram']
best_cycles = stats.total_cycles
num_ops = O * O * K * K * IC * OC * B
# self.logger.debug('Best Operations: {}'.format(best_operations))
self.logger.debug('Conv Layer')
self.logger.debug('Num of ops: {}'.format(num_ops))
self.logger.debug('Kernel Size: {}x{}x{}x{}'.format(K, K, IC, OC))
self.logger.debug('Output Size: {}x{}x{}'.format(O, O, OC))
self.logger.debug('Stride Size: {}x{}'.format(S, S))
self.logger.debug('Input Size: {}x{}x{}'.format(I, I, IC))
self.logger.debug('Max Precision: {}'.format(self.accelerator.pmax))
self.logger.debug('Min Precision: {}'.format(self.accelerator.pmin))
self.logger.debug('Activation Precision: {}'.format(iprec))
self.logger.debug('Weight Precision: {}'.format(wprec))
self.logger.debug('Performance Factor: {}'.format(self.get_perf_factor(iprec, wprec)))
self.logger.debug('Total Cycles: {:,}'.format(best_cycles))
cycles_per_batch = ceil_a_by_b(best_cycles, batch_size)
self.logger.debug('Total Cycles per batch: {:,}'.format(cycles_per_batch))
ops_per_cycle = float(num_ops) / best_cycles
self.logger.debug('Ops/Cycle: {:,.2f}'.format(ops_per_cycle))
ops_per_cycle_per_pe = float(ops_per_cycle) / (self.accelerator.N * self.accelerator.M)
self.logger.debug('Ops/Cycle/PE: {:,.4}'.format(ops_per_cycle_per_pe))
return stats, best_instructions
def get_cycles(self, layer, batch_size=1):
if isinstance(layer, ConvLayer):
return self.get_conv_cycles(layer.sfil, # K
layer.hofm, # Oh == Ow
layer.htrd, # S
layer.nifm, # NI
layer.nofm, # NO
layer.iprec, # Activation Precision
layer.wprec, # Weight Precision
batch_size, # Batch Size
im2col=layer.im2col) # Batch Size
|
11572406
|
from setuptools import setup, find_packages
setup(
name='django-node-assets',
version='0.9.9',
description='The Django application that allows install and serve assets via Node.js package '
'manager infrastructure.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/whitespy/django-node-assets',
long_description=open('README.rst').read(),
packages=find_packages(),
include_package_data=True,
keywords='django assets staticfiles Node.js npm package.json',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Framework :: Django',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Framework :: Django :: 3.1',
'Framework :: Django :: 3.2',
]
)
|
11572444
|
import os
from flask import Flask, jsonify, request
from discord_interactions import verify_key_decorator, InteractionType, InteractionResponseType, InteractionResponseFlags
CLIENT_PUBLIC_KEY = os.getenv('CLIENT_PUBLIC_KEY')
app = Flask(__name__)
@app.route('/interactions', methods=['POST'])
@verify_key_decorator(CLIENT_PUBLIC_KEY)
def interactions():
if request.json['type'] == InteractionType.APPLICATION_COMMAND:
return jsonify({
'type': InteractionResponseType.CHANNEL_MESSAGE_WITH_SOURCE,
'data': {
'content': 'Hello world'
}
})
elif request.json['type'] == InteractionType.MESSAGE_COMPONENT:
return jsonify({
'type': InteractionResponseType.CHANNEL_MESSAGE_WITH_SOURCE,
'data': {
'content': 'Hello, you interacted with a component.',
'flags': InteractionResponseFlags.EPHEMERAL
}
})
|
11572446
|
from __future__ import absolute_import
from rlib import jit
from rlib.debug import make_sure_not_resized
from rtruffle.node import Node
from som.interpreter.ast.frame import (
create_frame_args,
create_frame_3,
create_frame_2,
create_frame_1,
)
from som.vmobjects.method import AbstractMethod
def get_printable_location(node):
assert isinstance(node, AstMethod)
return node.source_section.identifier
jitdriver_1 = jit.JitDriver(
greens=["node"],
get_printable_location=get_printable_location,
reds=["rcvr"],
is_recursive=True,
# the next line is a workaround around a likely bug in RPython
# for some reason, the inlining heuristics default to "never inline" when
# two different jit drivers are involved (in our case, the primitive
# driver, and this one).
# the next line says that calls involving this jitdriver should always be
# inlined once (which means that things like Integer>>< will be inlined
# into a while loop again, when enabling this driver).
should_unroll_one_iteration=lambda self: True,
)
jitdriver_2 = jit.JitDriver(
greens=["node"],
get_printable_location=get_printable_location,
reds=["rcvr", "arg"],
is_recursive=True,
should_unroll_one_iteration=lambda self: True,
)
jitdriver_3 = jit.JitDriver(
greens=["node"],
get_printable_location=get_printable_location,
reds=["rcvr", "arg1", "arg2"],
is_recursive=True,
should_unroll_one_iteration=lambda self: True,
)
jitdriver_args = jit.JitDriver(
greens=["node"],
get_printable_location=get_printable_location,
reds=["rcvr", "args"],
is_recursive=True,
should_unroll_one_iteration=lambda self: True,
)
class _Invokable(Node):
"""
Only needed to work around RPython type system.
Otherwise the parent field would point to a non-Node type (AstMethod)
"""
_immutable_fields_ = ["expr_or_sequence?"]
_child_nodes_ = ["expr_or_sequence"]
def __init__(self, expr_or_sequence):
Node.__init__(self)
self.expr_or_sequence = self.adopt_child(expr_or_sequence)
class AstMethod(AbstractMethod):
_immutable_fields_ = [
"invokable",
"_arg_inner_access[*]",
"_size_frame",
"_size_inner",
"_embedded_block_methods",
"source_section",
"_lexical_scope",
]
def __init__(
self,
signature,
expr_or_sequence,
arg_inner_access,
size_frame,
size_inner,
embedded_block_methods,
source_section,
lexical_scope,
):
AbstractMethod.__init__(self, signature)
assert isinstance(arg_inner_access, list)
make_sure_not_resized(arg_inner_access)
self._arg_inner_access = arg_inner_access
self._size_frame = size_frame
self._size_inner = size_inner
self._embedded_block_methods = embedded_block_methods
self.source_section = source_section
self.invokable = _Invokable(expr_or_sequence)
self._lexical_scope = lexical_scope
def set_holder(self, value):
self._holder = value
for method in self._embedded_block_methods:
method.set_holder(value)
@jit.elidable_promote("all")
def get_number_of_arguments(self):
return self._signature.get_number_of_signature_arguments()
def invoke_1(node, rcvr): # pylint: disable=no-self-argument
jitdriver_1.jit_merge_point(node=node, rcvr=rcvr)
frame = create_frame_1(
rcvr,
node._size_frame,
node._size_inner,
)
return node.invokable.expr_or_sequence.execute(frame)
def invoke_2(node, rcvr, arg): # pylint: disable=no-self-argument
jitdriver_2.jit_merge_point(node=node, rcvr=rcvr, arg=arg)
frame = create_frame_2(
rcvr,
arg,
node._arg_inner_access[0],
node._size_frame,
node._size_inner,
)
return node.invokable.expr_or_sequence.execute(frame)
def invoke_3(node, rcvr, arg1, arg2): # pylint: disable=no-self-argument
jitdriver_3.jit_merge_point(node=node, rcvr=rcvr, arg1=arg1, arg2=arg2)
frame = create_frame_3(
rcvr,
arg1,
arg2,
node._arg_inner_access,
node._size_frame,
node._size_inner,
)
return node.invokable.expr_or_sequence.execute(frame)
def invoke_args(node, rcvr, args): # pylint: disable=no-self-argument
assert args is not None
make_sure_not_resized(args)
jitdriver_args.jit_merge_point(node=node, rcvr=rcvr, args=args)
frame = create_frame_args(
rcvr,
args,
node._arg_inner_access,
node._size_frame,
node._size_inner,
)
return node.invokable.expr_or_sequence.execute(frame)
def inline(self, mgenc):
mgenc.merge_into_scope(self._lexical_scope)
self.invokable.expr_or_sequence.adapt_after_inlining(mgenc)
return self.invokable.expr_or_sequence
def adapt_after_outer_inlined(self, removed_ctx_level, mgenc_with_inlined):
self.invokable.expr_or_sequence.adapt_after_outer_inlined(
removed_ctx_level, mgenc_with_inlined
)
if removed_ctx_level == 1:
self._lexical_scope.drop_inlined_scope()
|
11572458
|
import synapse.lib.module as s_module
class GovCnModule(s_module.CoreModule):
def getModelDefs(self):
modl = {
'types': (
('gov:cn:icp',
('int', {}),
{'doc': 'A Chinese Internet Content Provider ID.'},
),
('gov:cn:mucd',
('int', {}),
{'doc': 'A Chinese PLA MUCD.'},
),
),
'forms': (
('gov:cn:icp', {}, (
('org', ('ou:org', {}), {
'doc': 'The org with the Internet Content Provider ID.',
}),
)),
# TODO - Add 'org' as a secondary property to mcud?
('gov:cn:mucd', {}, ()),
)
}
name = 'gov:cn'
return ((name, modl), )
|
11572535
|
import math
from bluegraph.core.utils import *
from bluegraph.core.utils import _aggregate_values
def test_normalize_to_set():
assert(
normalize_to_set({1, 2, 3}) == {1, 2, 3})
assert(
normalize_to_set(1) == {1})
assert(
normalize_to_set(math.nan) == set())
assert(
normalize_to_set("lala") == {"lala"})
assert(
normalize_to_set("lala") == set(["lala"]))
def test_safe_intersection():
assert(
safe_intersection("lala", {"lala", 1}) == {"lala"})
assert(
safe_intersection("lala", "lala") == {"lala"})
assert(
safe_intersection("lala", math.nan) == set())
def test_str_to_set():
assert(str_to_set("{'1', '2', '3'}") == {'1', '2', '3'})
def test_top_n():
d = {
"a": 4,
"b": 5,
"c": 1,
"d": 6,
"e": 10
}
assert(top_n(d, 3) == ['e', 'd', 'b'])
assert(top_n(d, 3, smallest=True) == ["c", "a", "b"])
def test_aggregate_values():
assert(
_aggregate_values(["1", {"1", "2"}, {"2", "3"}]) ==
{"1", "2", "3"})
def test_element_has_type():
assert(element_has_type({"M", "N", "O"}, {"M", "N"}))
assert(not element_has_type({"M", "N", "O"}, {"M", "K"}))
|
11572601
|
import sys
import os
import argparse
import xmltodict
import json
from datetime import datetime
from glob import glob
from concurrent.futures import ThreadPoolExecutor
import re
MAX_WORKERS = 50
FORCE_LIST = ('course', 'section', 'schedule',
'instructor', 'attribute', 'tag',
'learningObjective')
MAX_SECTIONS = 20
BASE_TIME = datetime.strptime('', '')
TIME_FORMAT = '%I:%M:%S %p'
def postprocessor(path, key, value):
if value and key[:-1] in FORCE_LIST:
return key, value[key[:-1]]
else:
return key, value
def parse_time(s):
if not s:
return 0
dt = datetime.strptime(s, TIME_FORMAT)
return (dt - BASE_TIME).total_seconds()
def parse_courses_xml(doc):
body = xmltodict.parse(doc, force_list=FORCE_LIST,
postprocessor=postprocessor)
courses = body['xml']['courses']
if not courses:
return []
result = []
for course in courses:
PLUCK_KEYS = ('year', 'subject', 'code', 'title', 'description',
'repeatable', 'grading', 'unitsMin', 'unitsMax')
generated = {k: course[k] for k in PLUCK_KEYS}
generated['sections'] = []
generated['gers'] = []
# Metadata
generated['objectID'] = course['administrativeInformation']['courseId']
generated['number'] = '{} {}'.format(course['subject'], course['code'])
# GER list for filtering
if course['gers']:
generated['gers'] = [x.strip() for x in course['gers'].split(',')]
# Generate all possible units counts (for filtering)
min_units = int(course['unitsMin'])
max_units = int(course['unitsMax'])
generated['units'] = list(range(min_units, max_units + 1))
if course['sections']:
total_sections = len(course['sections'])
generated['totalSections'] = total_sections
course['sections'].sort(key=lambda c: c['sectionNumber'])
for section in course['sections'][:MAX_SECTIONS]:
PLUCK_SECTION_KEYS = ('term', 'termId', 'sectionNumber',
'component', 'notes', 'classId',
'currentClassSize', 'maxClassSize')
gen_section = {k: section[k] for k in PLUCK_SECTION_KEYS}
gen_section['schedules'] = []
# Timestamps for sort / filter
for schedule in section['schedules']:
PLUCK_SCHEDULE_KEYS = ('startTime', 'endTime', 'location')
gen_schedule = {k: schedule[k]
for k in PLUCK_SCHEDULE_KEYS}
if schedule['instructors']:
gen_schedule['instructors'] = [({
'name': i['name'], 'sunet': i['sunet']
}) for i in schedule['instructors']]
else:
gen_schedule['instructors'] = []
gen_schedule['startTimestamp'] = parse_time(
schedule['startTime'])
gen_schedule['endTimestamp'] = parse_time(
schedule['endTime'])
# Fix whitespace in days
if schedule['days']:
gen_schedule['days'] = \
re.sub(r'\s+', ' ', schedule['days']).strip()
else:
gen_schedule['days'] = None
gen_section['schedules'].append(gen_schedule)
generated['sections'].append(gen_section)
result.append(generated)
return result
def process_file(name, f, dest):
print(' Processing', name)
try:
courses = parse_courses_xml(f.read())
with open(dest, 'w+') as f:
json.dump(courses, f, indent=4)
except Exception as e:
print(' Error encountered processing', name, e)
raise e
print(' Processed', name)
def main():
parser = argparse.ArgumentParser(description='fast-courses parser')
parser.add_argument('--key', '-k', type=str)
parser.add_argument('--outdir', '-o', type=str, required=True)
parser.add_argument('--pattern', '-p', type=str)
parser.add_argument('files', nargs='*', type=argparse.FileType('r'),
default=[sys.stdin])
args = parser.parse_args()
print('Serializing ExploreCourses XML to JSON...')
os.makedirs(args.outdir, exist_ok=True)
if args.pattern:
names = glob(args.pattern)
files = [open(n, 'r') for n in names]
else:
files = args.files
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
for f in files:
if args.key:
base = args.key
else:
base = os.path.splitext(os.path.basename(f.name))[0]
dest = os.path.join(args.outdir, base + '.json')
# process_file(base, f, dest)
executor.submit(process_file, base, f, dest)
print('Finished serializing ExploreCourses XML to JSON!')
if __name__ == '__main__':
main()
|
11572604
|
from os import system
from time import sleep
from random import randrange
def RunManyJobs():
# run many simple jobs
jobPath = 'autotest/simple_one_cpu.job'
task = ''
for i in range(0, 500):
task += 'run ' + jobPath + ';'
cmd = './prun -c "' + task + '"'
system(cmd)
jobPath = 'autotest/simple_many_cpu.job'
task = ''
for i in range(0, 500):
task += 'run ' + jobPath + ';'
cmd = './prun -c "' + task + '"'
system(cmd)
# send many large source code files
jobPath = 'autotest/many_code.job'
task = ''
for i in range(0, 1):
task += 'run ' + jobPath + ';'
cmd = './prun -c "' + task + '"'
system(cmd)
# run many medium jobs
jobPath = 'autotest/medium.job'
task = ''
for i in range(0, 50):
task += 'run ' + jobPath + ';'
cmd = './prun -c "' + task + '"'
system(cmd)
# run many heavy jobs
jobPath = 'autotest/heavy.job'
task = ''
for i in range(0, 10):
task += 'run ' + jobPath + ';'
cmd = './prun -c "' + task + '"'
system(cmd)
# run many meta jobs
jobPath = 'test.meta'
task = ''
for i in range(0, 50):
task += 'run ' + jobPath + ';'
cmd = './prun -c "' + task + '"'
system(cmd)
# run cron jobs
task = 'run cron.job; run cron.meta'
cmd = './prun -c "' + task + '"'
system(cmd)
def RunHeavyJobs():
# run many heavy jobs
jobPath = 'autotest/heavy.job'
task = ''
for i in range(0, 10):
task += 'run ' + jobPath + ';'
cmd = './prun -c "' + task + '"'
system(cmd)
def AddExistingUser():
task = ''
for i in range(0, 100):
task += 'add localhost groupx;'
cmd = './prun -c "' + task + '"'
system(cmd)
def StopAll():
cmd = './prun -c "stopall"'
system(cmd)
def DeleteGroup():
task = 'deleteg groupx; deleteg hosts_group1'
cmd = './prun -c "' + task + '"'
system(cmd)
def AddGroup():
task = 'addg hosts_group1'
cmd = './prun -c "' + task + '"'
system(cmd)
def JobInfo(jobId):
task = 'info ' + str(jobId)
cmd = './prun -c "' + task + '"'
system(cmd)
def StopJob(jobId):
task = 'stop ' + str(jobId)
cmd = './prun -c "' + task + '"'
system(cmd)
def Stat():
task = 'stat; jobs; ls;'
cmd = './prun -c "' + task + '"'
system(cmd)
# check job stopping
RunManyJobs()
AddExistingUser()
StopAll()
for i in range(0, 10):
RunManyJobs()
StopAll()
sleep(2)
StopAll()
for i in range(0, 5):
RunHeavyJobs()
sleep(1)
StopAll()
# check group removal
for i in range(0, 5):
RunManyJobs()
sleep(1)
DeleteGroup()
sleep(1)
AddGroup()
StopAll()
# check job info statistics and certain job stopping
RunManyJobs()
for i in range(0, 100):
jobId = randrange(0, 1000)
JobInfo(jobId)
jobId = randrange(0, 1000)
StopJob(jobId)
StopAll()
# check statistics
RunManyJobs()
for i in range(0, 1000):
Stat()
print('done')
|
11572615
|
import asyncio
from unittest import mock
import pytest
from aiohttp import web
from aiohttp_cors import CorsConfig, APP_CONFIG_KEY
from aiohttp_cors import ResourceOptions, CorsViewMixin, custom_cors
DEFAULT_CONFIG = {
'*': ResourceOptions()
}
CLASS_CONFIG = {
'*': ResourceOptions()
}
CUSTOM_CONFIG = {
'www.client1.com': ResourceOptions(allow_headers=['X-Host'])
}
class SimpleView(web.View, CorsViewMixin):
async def get(self):
return web.Response(text="Done")
class SimpleViewWithConfig(web.View, CorsViewMixin):
cors_config = CLASS_CONFIG
async def get(self):
return web.Response(text="Done")
class CustomMethodView(web.View, CorsViewMixin):
cors_config = CLASS_CONFIG
async def get(self):
return web.Response(text="Done")
@custom_cors(CUSTOM_CONFIG)
async def post(self):
return web.Response(text="Done")
@pytest.fixture
def _app():
return web.Application()
@pytest.fixture
def cors(_app):
ret = CorsConfig(_app, defaults=DEFAULT_CONFIG)
_app[APP_CONFIG_KEY] = ret
return ret
@pytest.fixture
def app(_app, cors):
# a trick to install a cors into app
return _app
def test_raise_exception_when_cors_not_configure():
request = mock.Mock()
request.app = {}
view = CustomMethodView(request)
with pytest.raises(ValueError):
view.get_request_config(request, 'post')
async def test_raises_forbidden_when_config_not_found(app):
app[APP_CONFIG_KEY].defaults = {}
request = mock.Mock()
request.app = app
request.headers = {
'Origin': '*',
'Access-Control-Request-Method': 'GET'
}
view = SimpleView(request)
with pytest.raises(web.HTTPForbidden):
await view.options()
def test_method_with_custom_cors(app):
"""Test adding resource with web.View as handler"""
request = mock.Mock()
request.app = app
view = CustomMethodView(request)
assert hasattr(view.post, 'post_cors_config')
assert asyncio.iscoroutinefunction(view.post)
config = view.get_request_config(request, 'post')
assert config.get('www.client1.com') == CUSTOM_CONFIG['www.client1.com']
def test_method_with_class_config(app):
"""Test adding resource with web.View as handler"""
request = mock.Mock()
request.app = app
view = SimpleViewWithConfig(request)
assert not hasattr(view.get, 'get_cors_config')
config = view.get_request_config(request, 'get')
assert config.get('*') == CLASS_CONFIG['*']
def test_method_with_default_config(app):
"""Test adding resource with web.View as handler"""
request = mock.Mock()
request.app = app
view = SimpleView(request)
assert not hasattr(view.get, 'get_cors_config')
config = view.get_request_config(request, 'get')
assert config.get('*') == DEFAULT_CONFIG['*']
|
11572678
|
import psycopg2
from flask import request
from structlog import get_logger
from tenacity import retry, wait_random_exponential, retry_if_exception_type, stop_after_delay
from entityservice import database as db
from entityservice.cache.active_runs import set_run_state_deleted
from entityservice.database import delete_run_data, get_similarity_file_for_run
from entityservice.views.auth_checks import abort_if_run_doesnt_exist, abort_if_invalid_results_token
from entityservice.views.serialization import RunDescription
from entityservice.tasks import delete_minio_objects
logger = get_logger()
def get(project_id, run_id):
log = logger.bind(pid=project_id, rid=run_id)
log.info("request description of a run")
authorize_run_detail(project_id, run_id)
log.debug("request for run description authorized")
with db.DBConn() as conn:
log.debug("Retrieving run description from database")
run_object = db.get_run(conn, run_id)
return RunDescription().dump(run_object)
@retry(wait=wait_random_exponential(multiplier=1, max=60),
retry=retry_if_exception_type(psycopg2.IntegrityError),
stop=stop_after_delay(120))
def _delete_run(run_id, log):
# Retry if a db integrity error occurs (e.g. when a worker is writing to the same row)
# Randomly wait up to 2^x * 1 seconds between each retry until the range reaches 60 seconds, then randomly up to 60 seconds afterwards
set_run_state_deleted(run_id)
with db.DBConn() as conn:
log.debug("Retrieving run details from database")
similarity_file = get_similarity_file_for_run(conn, run_id)
delete_run_data(conn, run_id)
return similarity_file
def delete(project_id, run_id):
log = logger.bind(pid=project_id, rid=run_id)
log.debug("request to delete run")
authorize_run_detail(project_id, run_id)
log.debug("approved request to delete run")
similarity_file = _delete_run(run_id, log)
log.debug("Deleted run from database")
if similarity_file:
log.debug("Queuing task to remove similarities file from object store")
delete_minio_objects.delay([similarity_file], project_id)
return '', 204
def authorize_run_detail(project_id, run_id):
# Check the project and run resources exist
abort_if_run_doesnt_exist(project_id, run_id)
# Check the caller has a valid results token. Yes it should be renamed.
abort_if_invalid_results_token(project_id, request.headers.get('Authorization'))
|
11572683
|
import os
import torch
import torch.nn as nn
from torch.utils.model_zoo import load_url
from .universal import UniversalProcess
from .util import remove_layer
from .util import initialize_weights
__all__ = ['resnet50']
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
}
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
base_width=64):
super(Bottleneck, self).__init__()
width = int(planes * (base_width / 64.))
self.conv1 = nn.Conv2d(inplanes, width, 1, bias=False)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(width, width, 3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000,
large_feature_map=False, attribution_method='CAM',
**kwargs):
super(ResNet, self).__init__()
stride_l3 = 1 if large_feature_map else 2
self.inplanes = 64
self.num_classes = num_classes
self.block = block
self.attribution_method = attribution_method
self.process = UniversalProcess(attribution_method)
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=stride_l3)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
self.conv_last = nn.Conv2d(
512 * block.expansion, num_classes, kernel_size=1)
self.attention = self._get_attention()
initialize_weights(self.modules(), init_mode='xavier')
self.process.set_layers(self.conv_last, self.attention)
def forward(self, x, labels=None, superclass=None, return_cam=False):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
f_former = self.layer4(x)
f = self.conv_last(f_former)
if return_cam:
f = f.detach()
f_former = f_former.detach()
cams = self.process.get_cam(f, f_former, labels,
superclass, return_cam)
return cams
else:
inputs = self.process.input_for_loss(f, f_former)
return {'logits': inputs['logits'],
'probs': inputs['probs'],
'features': inputs['inputs']}
def _make_layer(self, block, planes, blocks, stride):
layers = self._layer(block, planes, blocks, stride)
return nn.Sequential(*layers)
def _layer(self, block, planes, blocks, stride):
downsample = get_downsampling_layer(
self.inplanes, block, planes, stride)
layers = [block(self.inplanes, planes, stride, downsample)]
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return layers
def _get_attention(self):
return nn.Sequential(
nn.Conv2d(512 * self.block.expansion, 1, kernel_size=1),
nn.ReLU(inplace=True),
)
def get_downsampling_layer(inplanes, block, planes, stride):
outplanes = planes * block.expansion
if stride == 1 and inplanes == outplanes:
return
else:
return nn.Sequential(
nn.Conv2d(inplanes, outplanes, 1, stride, bias=False),
nn.BatchNorm2d(outplanes),
)
def load_pretrained_model(model, path=None):
strict_rule = True
if path:
state_dict = torch.load(os.path.join(path, 'resnet50.pth'))
else:
state_dict = load_url(model_urls['resnet50'], progress=True)
state_dict = remove_layer(state_dict, 'fc')
strict_rule = False
model.load_state_dict(state_dict, strict=strict_rule)
return model
def resnet50(pretrained=False, pretrained_path=None, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model = load_pretrained_model(model, path=pretrained_path)
return model
|
11572695
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('dashboard', views.dashboard, name='dashboard'),
path('corpus', views.tstart, name='corpus'),
path('translate', views.new, name='translate'),
path('corpusinput', views.corpusinput, name='corpusinput'),
path('getinput', views.getinput, name='getinput'),
path('translate_new', views.translate_new, name='translate_new'),
path('add_control_scheme', views.add_control_scheme, name='add_control_scheme'),
#path('translate_single_sentence', views.translate_single_sentence, name='translate_single_sentence'),
path('pushoutput', views.pushoutput, name='pushoutput'),
path('getoutput', views.getoutput, name='getoutput'),
path('preview', views.end, name='preview'),
path('transdelete', views.transdelete, name='transdelete'),
path('exportcsv', views.export_keystroke_csv, name='export_keystroke_csv'),
path('set_keyboard_controls', views.set_keyboard_controls, name = 'set_keyboard_controls'),
path('iscontrolschemedefined', views.isControlSchemeDefined, name = 'iscontrolschemedefined' )
]
|
11572701
|
import numpy as np
import param
from holoviews import Store, Options, Layout, HoloMap, Histogram, Dimension
try: # HoloViews <1.8
from holoviews.operation import TreeOperation
except: # HoloViws >=1.8
from featuremapper.analysis import TreeOperation
class WeightIsotropy(TreeOperation):
"""
Computes a histogram of azimuths between the positional
preferences of pre- and post-synaptic neurons weighted
by the connection strength and normalized relative to
the orientation preference.
Useful for determining whether lateral connection are
anisotropic along the axis of preferred orientation.
"""
num_bins = param.Integer(default=20, doc="""
Number of bins in the histogram.""")
roi = param.NumericTuple(default=(-0.5, -0.5, 0.5, 0.5), doc="""
Region of interest supplied as a four-tuple of the
form (left, bottom, right, top)""")
projections = param.List(default=[], doc="""
List of tuples of the form (sheet, projection).""")
threshold = param.Integer(default=70, doc="""
Threshold as a percentile of the weight field.""")
min_dist = param.Number(default=0.1, doc="""
Minimum distance in sheet coordinates""")
symmetric = param.Boolean(default=True, doc="""
Whether to make isotropy symmetric.""")
@classmethod
def _last(cls, obj):
return obj.last if isinstance(obj, HoloMap) else obj
def _process(self, tree, key=None):
layout = Layout()
for s, p in self.p.projections:
# Get last element
orelmnt = self._last(tree.OrientationPreference[s])
selelmnt = self._last(tree.OrientationSelectivity[s])
xelmnt = self._last(tree.XPreference[s])
yelmnt = self._last(tree.YPreference[s])
# If any preference has not been supplied skip analysis
if any(not o for o in [orelmnt, xelmnt, yelmnt]):
return Layout()
# Flatten preferences
xpref_arr = xelmnt.dimension_values(2)
ypref_arr = yelmnt.dimension_values(2)
sel_arr = selelmnt.dimension_values(2)
# Iterate over CFs in ROI
l, b, r, t = self.p.roi
lat_weights = tree.CFs[p]
azimuths, weights = [], []
for key, cf in lat_weights[l:r, b:t].items():
# Get preferences for a particular unit
unit_angle, unit_x, unit_y = orelmnt[key], xelmnt[key], yelmnt[key]
unit_sel = selelmnt[key]
weight_arr = cf.situated.dimension_values(2)
mask = weight_arr>np.percentile(weight_arr, self.p.threshold)
sel = sel_arr[mask]
weight = weight_arr[mask] * unit_sel * sel
xpref = xpref_arr[mask]
ypref = ypref_arr[mask]
# Compute x/y-preference differences between
# pre- and post-synaptic neurons
dx = xpref - unit_x
dy = ypref - unit_y
d = np.sqrt(dx**2 + dy**2)
# Compute angle between position preferences
# of the pre- and post-synaptic neurons
# Correcting for preferred orientation
conn_angle = np.arctan2(dy, dx)
delta = conn_angle - unit_angle
delta[delta<0] += np.pi*2
delta2 = delta + np.pi
delta2 %= np.pi*2
azimuths.append(delta[d>self.p.min_dist])
weights.append(weight[d>self.p.min_dist])
if self.p.symmetric:
azimuths.append(delta2[d>self.p.min_dist])
weights.append(weight[d>self.p.min_dist])
# Combine azimuths and weights for all CFs
azimuths = np.concatenate(azimuths)
weights = np.concatenate(weights)
# Compute histogram
bins, edges = np.histogram(azimuths, range=(0, 2*np.pi),
bins=self.p.num_bins, weights=weights, normed=True)
# Construct Elements
label =' '.join([s, p])
histogram = Histogram(bins, edges, group="Weight Isotropy",
kdims=[Dimension('Azimuth')], label=label)
layout.WeightIsotropy['_'.join([s, p])] = histogram
return layout
def circular_dist(a, b, period):
"""
Returns the distance between a and b (scalars) in a domain with `period` period.
"""
return np.minimum(np.abs(a - b), period - np.abs(a - b))
class WeightDistribution(TreeOperation):
"""
Computes histogram of the difference in feature
preference between pre- and post-synaptic neurons
weighted by the connection strength between them.
"""
feature = param.String(default='Orientation', doc="""
Feature to compute the distribution over""")
num_bins = param.Integer(default=10, doc="""
Number of histogram bins.""")
normalized = param.Boolean(default=False, doc="""
Whether to normalize the histogram""")
projections = param.List(default=[], doc="""
List of tuples of the form (sheet, projection).""")
weighted = param.Boolean(default=True)
def _process(self, tree, key=None):
preferences = tree[self.p.feature+'Preference']
layout = Layout()
for s, p in self.p.projections:
if s not in preferences:
continue
featurepref = preferences[s]
if isinstance(featurepref, HoloMap):
featurepref = featurepref.last
feature = featurepref.value_dimensions[0]
feature_arr = featurepref.data.flat
cfs = tree.CFs[p]
deltas, weights = [], []
for k, cf in cfs.items():
preferred = featurepref[k]
weight_arr = cf.situated.data.flat
feature_slice = feature_arr[weight_arr>0]
weight_slice = weight_arr[weight_arr>0]
if feature.cyclic:
feature_delta = circular_dist(preferred, feature_slice, feature.range[1])
else:
feature_delta = np.abs(feature_slice-preferred)
deltas.append(feature_delta)
weights.append(weight_slice)
deltas = np.concatenate(deltas)
weights = np.concatenate(weights)
bin_range = (0, feature.range[1]/2.) if feature.cyclic else None
bins, edges = np.histogram(deltas, range=bin_range, bins=self.p.num_bins,
weights=weights, normed=self.p.normalized)
# Construct Elements
label = ' '.join([s,p])
group = '%s Weight Distribution' % self.p.feature
histogram = Histogram(bins, edges, group=group, label=label,
kdims=[' '.join([self.p.feature, 'Difference'])],
vdims=['Weight'])
layout.WeightDistribution['_'.join([s,p])] = histogram
return layout
options = Store.options(backend='matplotlib')
options.Histogram.Weight_Isotropy = Options('plot', projection='polar', show_grid=True)
|
11572716
|
import unittest
from katas.kyu_6.group_in_10s import group_in_10s
class GroupInTensTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(group_in_10s(1, 2, 3), [[1, 2, 3]])
def test_equals_2(self):
self.assertEqual(group_in_10s(8, 12, 38, 3, 17, 19, 25, 35, 50),
[[3, 8], [12, 17, 19], [25], [35, 38], None, [50]])
def test_equals_3(self):
self.assertEqual(group_in_10s(12, 10, 11, 13, 25, 28, 29, 49, 51, 90),
[None, [10, 11, 12, 13], [25, 28, 29], None,
[49], [51], None, None, None, [90]])
def test_equals_4(self):
self.assertEqual(group_in_10s(), [])
def test_equals_5(self):
self.assertEqual(group_in_10s(100), [
None, None, None, None, None, None, None, None, None, None, [100]])
|
11572736
|
from abc import ABC, abstractmethod
from typing import Optional
from datahub.metadata.com.linkedin.pegasus2avro.schema import SchemaMetadata
class KafkaSchemaRegistryBase(ABC):
@abstractmethod
def get_schema_metadata(
self, topic: str, platform_urn: str
) -> Optional[SchemaMetadata]:
pass
|
11572743
|
FILEPATHS = ["/valid/file1", "/valid/file2"]
DIRPATHS = ["/valid/dir1", "/valid/dir2"]
USERS = ['xdsuper', 'samsuper']
GROUPS = ['xdgroup', 'samgroup']
PORTS = ["3000", "3001"]
NAMESERVICES = ["ns1", "ns2"]
INTERFACES = ["eth1", "eth2"]
IPS = ["127.0.0.1"]
PERMISSIONMASKS = ["007", "002"]
PERMISSIONCODES = ["rwx------", "rwxrwx---"]
ZKPORTADDRS = ["0.0.0.0:3000", "0.0.0.0:3001"]
ZKLIMIT = ["1", "10"]
ZKSIZE = ["1", "10"]
key_phrases_plural = [
"values are:",
"values are",
"options are:",
"options are",
"types are:",
"types are",
"Value may be:",
"Value may be",
"One of:",
"One of",
]
key_phrases_singular = [
"set to",
"replaced with",
]
|
11572781
|
from django.core.cache import cache
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from rendertron.storage.base import RendertronStorage
def get_cache_key(request):
return "rendertron:{}".format(request.path)
class DjangoCacheStorage(RendertronStorage):
""" A storage class that uses Django's cache as storage """
@staticmethod
def get_default_options():
return {"TIMEOUT": DEFAULT_TIMEOUT, "VERSION": None}
def get_stored_response(self, request):
cache_key = get_cache_key(request)
cached = cache.get(cache_key, default=None, version=self.options.get("VERSION"))
if cached is not None:
return cached["response"], cached["meta"]
return None, None
def store_response(self, request, response, meta):
cache_key = get_cache_key(request)
cache.set(
cache_key,
{"response": response, "meta": meta},
timeout=self.options.get("TIMEOUT"),
version=self.options.get("VERSION"),
)
|
11572787
|
import os
import numpy as np
import math
import keras.backend as K
import matplotlib.pyplot as plt
import pickle
import time
import itertools
from scipy.ndimage.filters import gaussian_filter
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM, TimeDistributed, Dropout
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from itertools import product
from random import randint
from scipy import signal
import sys
from numpy import NaN, Inf, arange, isscalar, asarray, array
# Build the model
def construct_model(hidden = 32, lstm_layers = 2, input_dim = 15, output_dim = 1):
model = Sequential()
model.add(LSTM(input_shape = (input_dim,),input_dim=input_dim, output_dim=hidden, return_sequences=True))
for i in range(lstm_layers-1):
model.add(LSTM(output_dim = hidden, return_sequences=True))
model.add(TimeDistributed(Dense(output_dim, activation='sigmoid')))
model.compile(loss=weighted_binary_crossentropy, optimizer='adam', metrics=['accuracy'])
return model
def weighted_binary_crossentropy(y_true, y_pred):
a1 = K.mean(np.multiply(K.binary_crossentropy(y_pred[0:1,:], y_true[0:1,:]),(y_true[0:1,:] + 0.01)), axis=-1)
# a2 = K.mean(np.multiply(K.binary_crossentropy(y_pred[1:2,:], y_true[1:2,:]),(y_true[1:2,:] + 0.01)), axis=-1)
# a1 = K.mean(np.multiply(K.binary_crossentropy(y_pred, y_true),(y_true + 0.01)), axis=-1)
return a1 #+ a2
# Build the model
def construct_model(hidden = 32, lstm_layers = 2, input_dim = 15, output_dim = 2):
model = Sequential()
model.add(LSTM(input_shape = (input_dim,),input_dim=input_dim, output_dim=hidden, return_sequences=True))
for i in range(lstm_layers-1):
model.add(LSTM(output_dim = hidden / 2**i, return_sequences=True))
model.add(TimeDistributed(Dense(output_dim, activation='sigmoid')))
model.compile(loss=weighted_binary_crossentropy, optimizer='adam', metrics=['accuracy'])
return model
def plot_history(history):
nepoch = len(history.history['loss'])
plt.plot(range(nepoch),history.history['loss'],'r')
plt.plot(range(nepoch),history.history['val_loss'],'b')
axes = plt.gca()
axes.set_ylim([0.001,0.005])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
def peakdet(v, delta, x = None):
"""
Converted from MATLAB script at http://billauer.co.il/peakdet.html
Returns two arrays
function [maxtab, mintab]=peakdet(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = PEAKDET(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = PEAKDET(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
% <NAME>, 3.4.05 (Explicitly not copyrighted).
% This function is released to the public domain; Any use is allowed.
"""
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx-delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn+delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return array(maxtab), array(mintab)
def load_file(filename, input_dim, output_dim, nseqlen = 128):
try:
R = np.loadtxt(filename, delimiter=',')
except:
return None
# find first event
positives1 = np.where(R[:,input_dim] > 0.5)
positives2 = np.where(R[:,input_dim + 1] > 0.5)
if len(positives1[0]) == 0 or len(positives2[0]) == 0:
return None
nstart = max(positives1[0][0], positives2[0][0])
nstart = nstart - randint(15,nseqlen / 2)
if R.shape[0] < (nstart + nseqlen):
return None
X = R[nstart:(nstart + nseqlen),0:input_dim]
Y = R[nstart:(nstart + nseqlen),input_dim:(input_dim + output_dim)]
# Y = gaussian_filter(Y * 1.0, 1.0)
if (not Y.any()):
return None
if R[0,90] > R[R.shape[1]-1,90]:
cols = [ i for i in range(30,99) if (i % 3) == 0 or (i%3)==2]
X[:,cols] = -X[:,cols]
return X, Y.astype(int)[:,0:output_dim]
def load_data(fdir, input_dim, output_dim, nseqlen, nsamples = 100000):
files = os.listdir(fdir)
# Merge inputs from different files together
ids = []
inputs = np.zeros((len(files), nseqlen, input_dim))
outputs = np.zeros((len(files), nseqlen, output_dim))
n = 0
for i,filename in enumerate(files):
fname = "%s/%s" % (fdir, filename)
data = load_file(fname, input_dim, output_dim, nseqlen)
if not data:
continue
X, Y = data
inputs[n,:,:] = X
outputs[n,:,:] = Y
ids.append(filename)
n = n + 1
if n >= nsamples:
break
return inputs[0:n,:,:], outputs[0:n,:,:], ids
def peak_cmp(annotated, predicted):
dist = []
if len(predicted) == 0 or len(annotated) == 0:
return -1
if len(predicted) != len(annotated):
return -1
for a in annotated:
# if a > 120:
# continue
dist = dist + [min(np.abs(predicted - a))]
if not len(dist):
return -1
return min(dist)
def eval_prediction(likelihood, true, patient, plot = True, shift = 0):
sdist = []
peakind = peakdet(likelihood[:,0],0.5)
for k,v in peakind[0]:
if plot:
plt.axvline(x=k)
sdist.append(peak_cmp(np.where(true[:,0] > 0.5)[0], [k + shift for k,v in peakind[0]]))
# peakind = peakdet(likelihood[:,1],0.5)
# for k,v in peakind[0]:
# if plot:
# plt.axvline(x=k)
# sdist.append(peak_cmp(np.where(true[:,1] > 0.5)[0], [k for k,v in peakind[0]]))
if plot:
plt.plot(likelihood) # continous likelihood process
plt.plot(true) # spikes on events
plt.title(patient)
axes = plt.gca()
axes.set_xlim([0,true.shape[0]])
plt.show()
return sdist
def plot_stats(sdist):
plt.hist(sdist,100,[0, 100])
filtered = [k for k in sdist if k >= 0]
def off_by(threshold, filtered):
ob = [k for k in filtered if k <= threshold]
nel = float(len(filtered))
print("<= %d: %f" % (threshold, len(ob) / float(nel)))
print("Error distribution:")
off_by(1, filtered)
off_by(3, filtered)
off_by(5, filtered)
off_by(10, filtered)
off_by(60, filtered)
print("Mean distance: %f" % (np.mean(filtered)))
def plot_kinematics(filename, fdir="", ids = None, fromfile=False, input_dim = 15, output_dim = 15, model = None, cols = None):
if not fromfile:
ntrial = ids.index(filename)
X = inputs[ntrial,:,cols]
Y = outputs[ntrial,:,0:output_dim]
else:
R = np.loadtxt("%s/%s" % (fdir, filename), delimiter=',')
X = R[:,cols]
Y = R[:,input_dim:(input_dim + output_dim)]
likelihood = model.predict(X.reshape((1,-1,len(cols))))[0]
pylab.rcParams['figure.figsize'] = (5, 4)
eval_prediction(likelihood, Y, filename)
pylab.rcParams['figure.figsize'] = (15, 20)
print("Kinematics of %s" % (filename))
for i in range(15):
ax = plt.subplot(5,3,1+i)
ax.plot(X[:,i])
ax.set_xlim([0,X.shape[0]])
for x in np.where(Y[:,0] > 0.5)[0]:
plt.axvline(x=x, color='g', linewidth=2)
# for x in np.where(Y[:,1] > 0.5)[0]:
# plt.axvline(x=x,color="r")
plt.show()
|
11572817
|
import unittest
from katas.beta.credit_card_validifier import credit
class CreditTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(credit(6011364837263748), 'Discover')
def test_equals_2(self):
self.assertEqual(credit(5318273647283745), 'Master')
def test_equals_3(self):
self.assertEqual(credit(12345678910), 'Invalid')
def test_equals_4(self):
self.assertEqual(credit(371236473823676), 'AMEX')
def test_equals_5(self):
self.assertEqual(credit(4128374839283), 'VISA')
|
11572818
|
from dbeditor.database import Database
from dbeditor.loaders import import_to_table
from dbeditor.loaders.csv_loader import CSVLoader
def test_import_to_table(database: Database, csv_loader: CSVLoader) -> None:
table = database.get_table("second")
import_to_table(table, database.session, csv_loader)
with database.session as s:
assert s.query(table).all() == [
(1, 42, "example"),
(2, 7, "lorem ipsum"),
]
|
11572836
|
from .checkerro import CSPReportOnlyChecker
from .cspcheck_unsafeeval import CSPCheckUnsafeEval
class CSPReportOnlyUnsafeEvalChecker(CSPReportOnlyChecker):
def check(self, headers, opt_options=dict()):
csp = self.getcsp(headers)
if not csp:
return []
directive = csp.directive.SCRIPT_SRC
values = self.effectiveDirectiveValues(headers,directive, opt_options)
return CSPCheckUnsafeEval(csp,directive, values).check()
|
11572902
|
import importlib
import six
from scrapy.utils.misc import load_object
from . import connection, defaults
# TODO: add SCRAPY_JOB support.
class Scheduler(object):
"""Redis-based scheduler
Settings
--------
SCHEDULER_PERSIST : bool (default: False)
Whether to persist or clear redis queue.
SCHEDULER_FLUSH_ON_START : bool (default: False)
Whether to flush redis queue on start.
SCHEDULER_IDLE_BEFORE_CLOSE : int (default: 0)
How many seconds to wait before closing if no message is received.
SCHEDULER_QUEUE_KEY : str
Scheduler redis key.
SCHEDULER_QUEUE_CLASS : str
Scheduler queue class.
DUPEFILTER_KEY : str
Scheduler dupefilter redis key.
DUPEFILTER_CLASS : str
Scheduler dupefilter class.
SCHEDULER_SERIALIZER : str
Scheduler serializer.
"""
def __init__(self, server, persist, flush_on_start, queue_key, queue_cls, dupefilter_key, dupefilter_cls, idle_before_close):
"""Initialize scheduler.
Parameters
----------
server : Redis
The redis server instance.
persist : bool
Whether to flush requests when closing. Default is False.
flush_on_start : bool
Whether to flush requests on start. Default is False.
queue_key : str
Requests queue key.
queue_cls : str
Importable path to the queue class.
dupefilter_key : str
Duplicates filter key.
dupefilter_cls : str
Importable path to the dupefilter class.
idle_before_close : int
Timeout before giving up.
"""
if idle_before_close < 0:
raise TypeError("idle_before_close cannot be negative")
self.server = server
self.persist = persist
self.flush_on_start = flush_on_start
self.queue_key = queue_key
self.queue_cls = queue_cls
self.dupefilter_cls = dupefilter_cls
self.dupefilter_key = dupefilter_key
self.idle_before_close = idle_before_close
self.stats = None
def __len__(self):
return len(self.queue)
@classmethod
def from_settings(cls, settings):
kwargs = {
'persist': defaults.SCHEDULER_PERSIST,
'flush_on_start': defaults.SCHEDULER_FLUSH_ON_START,
'queue_key': defaults.SCHEDULER_QUEUE_KEY,
'queue_cls': defaults.SCHEDULER_QUEUE_CLASS,
'dupefilter_cls': defaults.DUPEFILTER_CLASS,
'dupefilter_key': defaults.DUPEFILTER_KEY,
'idle_before_close': defaults.SCHEDULER_IDLE_BEFORE_CLOSE,
}
optional = {
'SCHEDULER_PERSIST': 'persist',
'SCHEDULER_FLUSH_ON_START': 'flush_on_start',
'SCHEDULER_QUEUE_KEY': 'queue_key',
'SCHEDULER_QUEUE_CLASS': 'queue_cls',
'DUPEFILTER_CLASS': 'dupefilter_cls',
'DUPEFILTER_KEY': 'dupefilter_key',
'SCHEDULER_IDLE_BEFORE_CLOSE': 'idle_before_close',
}
for setting_name, name in optional.items():
val = settings.get(setting_name)
if val:
kwargs[name] = val
server = connection.from_settings(settings)
return cls(server=server, **kwargs)
@classmethod
def from_crawler(cls, crawler):
instance = cls.from_settings(crawler.settings)
# FIXME: for now, stats are only supported from this constructor
instance.stats = crawler.stats
return instance
def open(self, spider):
self.spider = spider
try:
self.queue = load_object(self.queue_cls)(
server=self.server,
spider=spider,
key=self.queue_key % {'spider': spider.name}
)
except TypeError as e:
raise ValueError("Failed to instantiate queue class '%s': %s",
self.queue_cls, e)
try:
if self.dupefilter_cls == 'scrapy_redis_bloomfilter_block_cluster.dupefilter.LockRFPDupeFilter':
self.df = load_object(self.dupefilter_cls)(
server=self.server,
key=self.dupefilter_key % {'spider': spider.name},
debug=spider.settings.getbool('DUPEFILTER_DEBUG', defaults.DUPEFILTER_DEBUG),
bit=spider.settings.getint('BLOOMFILTER_BIT', defaults.BLOOMFILTER_BIT),
hash_number=spider.settings.getint('BLOOMFILTER_HASH_NUMBER', defaults.BLOOMFILTER_HASH_NUMBER),
block_num=spider.settings.getint('BLOOMFILTER_BLOCK_NUM', defaults.BLOOMFILTER_BLOCK_NUM),
lock_key=spider.settings.get('DUPEFILTER_LOCK_KEY', defaults.DUPEFILTER_LOCK_KEY) % {'spider': spider.name},
lock_num=spider.settings.getint('DUPEFILTER_LOCK_NUM', defaults.DUPEFILTER_LOCK_NUM),
lock_timeout=spider.settings.getint('DUPEFILTER_LOCK_TIMEOUT', defaults.DUPEFILTER_LOCK_TIMEOUT)
)
elif self.dupefilter_cls == 'scrapy_redis_bloomfilter_block_cluster.dupefilter.ListLockRFPDupeFilter':
self.df = load_object(self.dupefilter_cls)(
server=self.server,
key=self.dupefilter_key % {'spider': spider.name},
debug=spider.settings.getbool('DUPEFILTER_DEBUG', defaults.DUPEFILTER_DEBUG),
bit=spider.settings.getint('BLOOMFILTER_BIT', defaults.BLOOMFILTER_BIT),
hash_number=spider.settings.getint('BLOOMFILTER_HASH_NUMBER', defaults.BLOOMFILTER_HASH_NUMBER),
block_num=spider.settings.getint('BLOOMFILTER_BLOCK_NUM', defaults.BLOOMFILTER_BLOCK_NUM),
lock_key=spider.settings.get('DUPEFILTER_LOCK_KEY', defaults.DUPEFILTER_LOCK_KEY) % {'spider': spider.name},
lock_num=spider.settings.getint('DUPEFILTER_LOCK_NUM', defaults.DUPEFILTER_LOCK_NUM),
lock_timeout=spider.settings.getint('DUPEFILTER_LOCK_TIMEOUT', defaults.DUPEFILTER_LOCK_TIMEOUT),
rules_list=spider.rules_list,
key_list=spider.settings.get('DUPEFILTER_KEY_LIST', defaults.DUPEFILTER_KEY_LIST) % {'spider': spider.name},
bit_list=spider.settings.getint('BLOOMFILTER_BIT_LIST', defaults.BLOOMFILTER_BIT_LIST),
hash_number_list=spider.settings.getint('BLOOMFILTER_HASH_NUMBER_LIST', defaults.BLOOMFILTER_HASH_NUMBER_LIST),
block_num_list=spider.settings.getint('BLOOMFILTER_BLOCK_NUM_LIST', defaults.BLOOMFILTER_BLOCK_NUM_LIST)
)
else:
self.df = load_object(self.dupefilter_cls)(
server=self.server,
key=self.dupefilter_key % {'spider': spider.name},
debug=spider.settings.getbool('DUPEFILTER_DEBUG', defaults.DUPEFILTER_DEBUG),
bit=spider.settings.getint('BLOOMFILTER_BIT', defaults.BLOOMFILTER_BIT),
hash_number=spider.settings.getint('BLOOMFILTER_HASH_NUMBER', defaults.BLOOMFILTER_HASH_NUMBER),
block_num=spider.settings.getint('BLOOMFILTER_BLOCK_NUM', defaults.BLOOMFILTER_BLOCK_NUM)
)
except TypeError as e:
raise ValueError("Failed to instantiate dupefilter class '%s': %s",
self.dupefilter_cls, e)
if self.flush_on_start:
self.flush()
# notice if there are requests already in the queue to resume the crawl
if len(self.queue):
spider.log("Resuming crawl from redis(%d requests scheduled)" % len(self.queue))
def close(self, reason):
if not self.persist:
self.flush()
def flush(self):
self.df.clear()
self.queue.clear()
def enqueue_request(self, request):
if not request.dont_filter and self.df.request_seen(request):
self.df.log(request, self.spider)
return False
if self.stats:
self.stats.inc_value('scheduler/enqueued/redis', spider=self.spider)
self.queue.push(request)
return True
def next_request(self):
request = self.queue.pop(self.idle_before_close) # PriorityQueue 不支持 self.idle_before_close
if request and self.stats:
self.stats.inc_value('scheduler/dequeued/redis', spider=self.spider)
return request
def has_pending_requests(self):
return len(self) > 0
|
11572932
|
import torch
from torch_geometric.nn import DMoNPooling
def test_dmon_pooling():
batch_size, num_nodes, channels, num_clusters = (2, 20, 16, 10)
x = torch.randn((batch_size, num_nodes, channels))
adj = torch.ones((batch_size, num_nodes, num_nodes))
mask = torch.randint(0, 2, (batch_size, num_nodes), dtype=torch.bool)
pool = DMoNPooling([channels, channels], num_clusters)
assert str(pool) == 'DMoNPooling(16, num_clusters=10)'
s, x, adj, spectral_loss, ortho_loss, cluster_loss = pool(x, adj, mask)
assert s.size() == (2, 20, 10)
assert x.size() == (2, 10, 16)
assert adj.size() == (2, 10, 10)
assert -1 <= spectral_loss <= 0
assert 0 <= ortho_loss <= 2
assert -1 <= cluster_loss <= 0
|
11572939
|
import cv2
import numpy as np
from numpy.core.arrayprint import printoptions
import skimage.io as io
# Create a VideoCapture object and read from input file
# If the input is the camera, pass 0 instead of the video file name
cap = cv2.VideoCapture('trimmed1.mp4')
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
picList=[]
# Read until video is completed
i=200
while(cap.read()):
# Capture frame-by-frame
i-=1
if(i==0):
break
ret, frame = cap.read()
if ret == True:
if(i==2):
print(frame.shape)
picList.append(frame)
# Display the resulting frame
#cv2.imshow('Frame',frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
# cap.release()
# Closes all the frames
# cv2.destroyAllWindows()
arr = np.asarray(picList)
print(arr.shape)
meanimage=np.sum(arr,axis=0)//len(picList)
print(meanimage.shape)
io.imshow(meanimage)
print(meanimage)
#cv2.imshow(meanimage)
cv2.imshow("lol", meanimage)
# waits for user to press any key
# (this is necessary to avoid Python kernel form crashing)
cv2.waitKey(0)
# closing all open windows
cv2.destroyAllWindows()
|
11572942
|
class Subtracao:
def __init__(self, expressao_esquerda, expressao_direita):
self.__expressao_esquerda = expressao_esquerda
self.__expressao_direita = expressao_direita
def avalia(self):
return (
self.__expressao_esquerda.avalia()
- self.__expressao_direita.avalia()
)
class Soma:
def __init__(self, expressao_esquerda, expressao_direita):
self.__expressao_esquerda = expressao_esquerda
self.__expressao_direita = expressao_direita
def avalia(self):
return (
self.__expressao_esquerda.avalia()
+ self.__expressao_direita.avalia()
)
class Numero:
def __init__(self, numero):
self.__numero = numero
def avalia(self):
return self.__numero
if __name__ == '__main__':
expressao_esquerda = Soma(Numero(10), Numero(20))
expressao_direita = Soma(Numero(5), Numero(2))
expressao_conta = Soma(expressao_esquerda, expressao_direita)
print(expressao_conta.avalia())
expressao_conta2 = Subtracao(Numero(100), Numero(70))
print(expressao_conta2.avalia())
|
11572975
|
import threading
import time
import sys
from unittest import TestCase
from apiritif.http import _EventRecorder, Event
class EventGenerator(threading.Thread):
def __init__(self, recorder, index, events_count):
self.recorder = recorder
self.index = index
self.events_count = events_count
self.events = [Event(response=self.index * (i + 1)) for i in range(self.events_count)]
super(EventGenerator, self).__init__(target=self._record_events)
def _record_events(self):
for event in self.events:
self.recorder.record_event(event)
time.sleep(0.1)
self.result_events = self.recorder.pop_events(from_ts=-1, to_ts=sys.maxsize)
class TestRecorder(TestCase):
# _EventRecorder class have to store separate events for separate thread.
# Here each fake thread (EventGenerator) generate an event for common recorder.
# Then each thread read this event from this recorder with some delay.
# As the result written and read events should be the same.
def test_recorder_events_per_thread(self):
recorder = _EventRecorder()
event_generators = [EventGenerator(recorder, i, 3) for i in range(5)]
for generator in event_generators:
generator.start()
for generator in event_generators:
generator.join()
for generator in event_generators:
self.assertEqual(generator.events, generator.result_events)
|
11572978
|
import os
def remove_files(*filenames):
"""
Remove the files created during the test
"""
for filename in filenames:
try:
os.remove(filename)
except FileNotFoundError:
pass
|
11573010
|
class Solution:
def canPermutePalindrome(self, s: str) -> bool:
char_set = set()
for char in s:
if char in char_set:
char_set.remove(char)
else:
char_set.add(char)
return len(char_set) <= 1
|
11573011
|
import re
import secrets
from yarl import URL
from tests.e2e.conftest import RunCLI
# TODO: remove --local-executor when projoect.yml parsing is fixed
async def test_seq_batch(run_cli: RunCLI) -> None:
random_text = secrets.token_hex(20)
captured = await run_cli(
["bake", "--local-executor", "seq", "--param", "token", random_text]
)
assert f"task_b_out: {random_text}" in captured.out
# Now test the cache:
captured = await run_cli(
["bake", "--local-executor", "seq", "--param", "token", random_text]
)
assert "cached" in captured.out
async def test_batch_with_local(
run_cli: RunCLI,
run_neuro_cli: RunCLI,
project_id: str,
username: str,
project_role: str,
cluster_name: str,
) -> None:
captured = await run_cli(
[
"bake",
"--local-executor",
"print-local",
"--param",
"file_path",
"rw_dir/initial_file",
]
)
assert f"file_content: initial_file_content" in captured.out
m = re.search(r"Task print_readme (job-\S+) is", captured.out)
assert m is not None
job_id = m[1]
job_uri = URL.build(scheme="job", host=cluster_name) / username / job_id
captured = await run_neuro_cli(["acl", "list", "--shared", str(job_uri)])
assert sorted(line.split() for line in captured.out.splitlines()) == [
[f"job:{job_id}", "write", project_role],
]
async def test_batch_action(run_cli: RunCLI) -> None:
captured = await run_cli(["bake", "prime-checks", "--local-executor"])
assert f"5 is prime" in captured.out
assert f"4 is not prime" in captured.out
assert f"3 is prime" in captured.out
|
11573049
|
import random
class PonyNamesGenerator:
def get_next_pony(self, epoch=0):
"""
"Бесконечный" рандомный генератор имен поней из My Little Pony.
Имена образуются путем рекомбинации половинок оригинальных имен из сериала.
Исчерпав все варианты, повторяет их заново (но в другом порядке), добавив постфикс с римской записью поколения. К примеру:
<NAME> II
<NAME> II
Rari Dash II
Генератор реализован через рекурсию, так что технически он не совсем бесконечный, но на несколько десятков тысяч вариантов точно можно расчитывать (при проверке на MacBook 70 000 комбинаций создавались, а 80 000 уже нет).
"""
names = self.new_names_portion()
random.shuffle(names)
for name in names:
if epoch == 0:
yield name
else:
yield f'{name} {self.roman_numerals(epoch + 1)}'
epoch += 1
yield from self.get_next_pony(epoch=epoch)
@classmethod
def new_names_portion(cls):
"""
Данный метод возвращает список имен, причем всегда одинаковый и в одинаковом порядке.
"""
container = []
cls.halfs_combinations(
container,
first_halfs = [
'Twilight',
'Apple',
'Flutter',
'Rari',
'Pinkie',
'Rainbow',
'Derpy',
],
second_halfs = [
' Sparkle',
'jack',
'shy',
'ty',
' Pie',
' Dash',
' Hooves',
],
)
cls.halfs_combinations(
container,
first_halfs = [
'Cad',
'Sky',
'Amo',
'Cele',
'Lu',
],
second_halfs = [
'ance',
'star',
're',
'stia',
'na',
],
prefix='Princess '
)
return container
@staticmethod
def halfs_combinations(container, first_halfs, second_halfs, prefix=''):
"""
Берем два списка с половинками имен и кладем их декартово произведение в список container.
При необходимости, добавляем префиксы.
"""
for half in first_halfs:
for half_2 in second_halfs:
new_name = f'{prefix}{half}{half_2}'
container.append(new_name)
@staticmethod
def roman_numerals(number):
"""
Генератор римских цифр, взят отсюда:
https://py.checkio.org/mission/roman-numerals/publications/mdeakyne/python-3/first/share/53882d47af904f942fc8daf06c0ed270/
"""
if number > 0:
ones = ["", "I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX"]
tens = ["", "X", "XX", "XXX", "XL", "L", "LX", "LXX", "LXXX", "XC"]
hunds = ["", "C", "CC", "CCC", "CD", "D", "DC", "DCC", "DCCC", "CM"]
thous = ["", "M", "MM", "MMM", "MMMM"]
thous = thous[number // 1000]
hunds = hunds[number // 100 % 10]
tens = tens[number // 10 % 10]
ones = ones[number % 10]
return thous + hunds + tens + ones
|
11573070
|
import setuptools
from distutils.core import setup
setup(
name="open_energy_view",
version="0.1dev",
author="<NAME>",
author_email="<EMAIL>",
packages=setuptools.find_packages(),
license="MIT",
long_description=open("README.md").read(),
)
|
11573088
|
from collections import defaultdict
from enum import Enum
from sklearn.base import ClassifierMixin, RegressorMixin, BaseEstimator
from sklearn_pmml import pmml
from sklearn_pmml.convert.utils import pmml_row, assert_equal
from sklearn_pmml.convert.features import *
from pyxb.utils.domutils import BindingDOMSupport as bds
import numpy as np
class TransformationContext(object):
"""
Context holder object
"""
def __init__(self, schemas=None):
"""
:type schemas: dict[Schema, list[Feature]] | None
"""
if schemas is None:
schemas = {}
self.schemas = schemas
class ModelMode(Enum):
CLASSIFICATION = 'classification'
REGRESSION = 'regression'
class Schema(Enum):
INPUT = ('input', True, True)
"""
Schema used to define input variables. Short names allowed
"""
OUTPUT = ('output', True, True)
"""
Schema used to define output variables. Short names allowed. For the categorical variables the continuous
probability variables will be automatically created as <feature_name>.<feature_value>
"""
DERIVED = ('derived', False, False)
"""
Schema used to define derived features. Short names not allowed due to potential overlap with input variables.
"""
NUMERIC = ('numeric', False, False)
"""
Schema used to encode categorical features as numbers. Short names not allowed due to their overlap with
input variables
"""
MODEL = ('model', True, False)
"""
Schema used to define features fed into the sklearn estimator.
Short names allowed because these variables are not going into PMML.
"""
INTERNAL = ('internal', False, True)
"""
This schema may be used by complex converters to hide the variables used for internal needs
(e.g. the raw predictions of GBRT)
"""
CATEGORIES = ('categories', True, False)
"""
This schema is used to extend categorical outputs with probabilities of categories
"""
def __init__(self, name, short_names_allowed, data_dict_elibigle):
self._name = name
self._short_names_allowed = short_names_allowed
self._data_dict_elibigle = data_dict_elibigle
@property
def namespace(self):
"""
The namespace corresponding to the schema
"""
return self._name
@property
def short_names_allowed(self):
"""
The schema allows usage of short names instead of fully-qualified names
"""
return self._short_names_allowed
@property
def eligible_for_data_dictionary(self):
"""
The variables defined in the schema should appear in the DataDictionary
"""
return self._data_dict_elibigle
def extract_feature_name(self, f):
"""
Extract the printed name of the feature.
:param f: feature to work with
:type f: Feature|str
"""
if self.short_names_allowed:
if isinstance(f, str):
return f
else:
return f.full_name
else:
return "{}.{}".format(self.namespace, f if isinstance(f, str) else f.full_name)
class EstimatorConverter(object):
"""
A new base class for the estimator converters
"""
EPSILON = 0.00001
SCHEMAS_IN_MINING_MODEL = {Schema.INPUT, Schema.INTERNAL}
def __init__(self, estimator, context, mode):
self.model_function = mode
self.estimator = estimator
self.context = context
assert not any(isinstance(_, DerivedFeature) for _ in context.schemas[Schema.INPUT]), \
'Input schema represents the input fields only'
assert all(isinstance(_, DerivedFeature) for _ in context.schemas[Schema.DERIVED]), \
'Derived schema represents the set of automatically generated fields'
assert not any(isinstance(_, DerivedFeature) for _ in context.schemas[Schema.OUTPUT]), \
'Only regular features allowed in output schema; use Output transformation if you want to transform values'
# create a new schema for categories probabilities
categories = []
for feature in context.schemas[Schema.OUTPUT]:
if isinstance(feature, CategoricalFeature):
for value in feature.value_list:
categories.append(RealNumericFeature(
name=value,
namespace=feature.name
))
context.schemas[Schema.CATEGORIES] = categories
def data_dictionary(self):
"""
Build a data dictionary and return a DataDictionary element.
DataDictionary contains feature types for all variables used in the PMML,
except the ones defined as Derived Features
"""
dd = pmml.DataDictionary()
for schema, fields in sorted(self.context.schemas.items(), key=lambda x: x[0].name):
assert isinstance(schema, Schema)
if schema.eligible_for_data_dictionary:
for f in fields:
data_field = pmml.DataField(
dataType=f.data_type.value,
name=schema.extract_feature_name(f),
optype=f.optype.value)
dd.DataField.append(data_field)
if isinstance(f, CategoricalFeature):
for v in f.value_list:
data_field.append(pmml.Value(value_=v))
return dd
def output(self):
"""
Output section of PMML contains all model outputs.
:return: pmml.Output
"""
output = pmml.Output()
# the response variables
for feature in self.context.schemas[Schema.OUTPUT]:
output_field = pmml.OutputField(
name=Schema.OUTPUT.extract_feature_name(feature),
feature='predictedValue'
)
output.append(output_field)
return output
def transformation_dictionary(self):
"""
Build a transformation dictionary and return a TransformationDictionary element
"""
td = pmml.TransformationDictionary()
# define a schema with all variables available for a model
encoded_schema = []
self.context.schemas[Schema.NUMERIC] = encoded_schema
idx = {}
# First, populate transformation dictionary for _all_ derived fields, because they can be requested later
for f in self.context.schemas[Schema.DERIVED]:
ef = RealNumericFeature(name=f.name)
df = pmml.DerivedField(
name=ef.full_name,
optype=ef.optype.value,
dataType=ef.data_type.value
)
df.append(f.transformation)
td.append(df)
assert f.name not in idx, 'Duplicate field definition: {}'.format(f.name)
idx[f.name] = ef
# second, define the numeric transformations for the categorical variables
for f in self.context.schemas[Schema.INPUT]:
assert f.name not in idx, 'Duplicate field definition: {}'.format(f.name)
if isinstance(f, CategoricalFeature):
ef = RealNumericFeature(name=f.name, namespace=Schema.NUMERIC.namespace)
# create a record in transformation dictionary with mapping from raw values into numbers
df = pmml.DerivedField(
name=ef.full_name,
optype=ef.optype.value,
dataType=ef.data_type.value
)
mv = pmml.MapValues(outputColumn='output', dataType=ef.data_type.value)
mv.append(pmml.FieldColumnPair(field=f.full_name, column='input'))
it = pmml.InlineTable()
for i, v in enumerate(f.value_list):
it.append(pmml_row(input=v, output=i))
td.append(df.append(mv.append(it)))
idx[f.name] = ef
else:
idx[f.name] = f
# now we can build a mirror of model schema into the numeric schema
self.context.schemas[Schema.NUMERIC] = [idx[f.name] for f in self.context.schemas[Schema.MODEL]]
return td
def model(self, verification_data=None):
"""
Build a mining model and return one of the MODEL-ELEMENTs
"""
pass
def model_verification(self, verification_data):
"""
Use the input verification_data, apply the transformations, evaluate the model response and produce the
ModelVerification element
:param verification_data: list of dictionaries or data frame
:type verification_data: dict[str, object]|pd.DataFrame
:return: ModelVerification element
"""
verification_data = pd.DataFrame(verification_data)
assert len(verification_data) > 0, 'Verification data can not be empty'
verification_input = pd.DataFrame(index=verification_data.index)
verification_model_input = pd.DataFrame(index=verification_data.index)
for key in self.context.schemas[Schema.INPUT]:
# all input features MUST be present in the verification_data
assert key.full_name in verification_data.columns, 'Missing input field "{}"'.format(key.full_name)
verification_input[Schema.INPUT.extract_feature_name(key)] = verification_data[key.full_name]
if isinstance(key, CategoricalFeature):
verification_model_input[Schema.INPUT.extract_feature_name(key)] = np.vectorize(key.to_number)(verification_data[key.full_name])
else:
verification_model_input[Schema.INPUT.extract_feature_name(key)] = verification_data[key.full_name]
for key in self.context.schemas[Schema.DERIVED]:
assert isinstance(key, DerivedFeature), 'Only DerivedFeatures are allowed in the DERIVED schema'
verification_model_input[key.full_name] = key.apply(verification_input)
# at this point we can check that MODEL schema contains only known features
for key in self.context.schemas[Schema.MODEL]:
assert Schema.MODEL.extract_feature_name(key) in verification_model_input.columns, \
'Unknown feature "{}" in the MODEL schema'.format(key.full_name)
# TODO: we can actually support multiple columns, but need to figure out the way to extract the data
# TODO: from the estimator properly
# building model results
assert len(self.context.schemas[Schema.OUTPUT]) == 1, 'Only one output is currently supported'
key = self.context.schemas[Schema.OUTPUT][0]
model_input = verification_model_input[list(map(Schema.MODEL.extract_feature_name, self.context.schemas[Schema.MODEL]))].values
model_results = np.vectorize(key.from_number)(self.estimator.predict(X=model_input))
if key.full_name in verification_data:
# make sure that if results are provided, the expected and actual values are equal
assert_equal(key, model_results, verification_data[key.full_name].values)
verification_input[Schema.OUTPUT.extract_feature_name(key)] = model_results
if isinstance(key, CategoricalFeature):
probabilities = self.estimator.predict_proba(X=model_input)
for i, key in enumerate(self.context.schemas[Schema.CATEGORIES]):
verification_input[Schema.CATEGORIES.extract_feature_name(key)] = probabilities[:, i]
fields = []
field_names = []
for s in [Schema.INPUT, Schema.OUTPUT, Schema.CATEGORIES]:
fields += self.context.schemas[s]
field_names += list(map(s.extract_feature_name, self.context.schemas[s]))
mv = pmml.ModelVerification(recordCount=len(verification_input), fieldCount=len(fields))
# step one: build verification schema
verification_fields = pmml.VerificationFields()
for key in fields:
if isinstance(key, NumericFeature):
vf = pmml.VerificationField(field=key.name, column=key.name, precision=self.EPSILON)
else:
vf = pmml.VerificationField(field=key.name, column=key.name)
verification_fields.append(vf)
mv.append(verification_fields)
# step two: build data table
it = pmml.InlineTable()
for data in verification_input.iterrows():
data = data[1]
row = pmml.row()
row_empty = True
for key in field_names:
if verification_input[key].dtype == object or not np.isnan(data[key]):
col = bds().createChildElement(key)
bds().appendTextChild(data[key], col)
row.append(col)
row_empty = False
if not row_empty:
it.append(row)
mv.append(it)
return mv
def mining_schema(self):
"""
Mining schema contains the model input features.
NOTE: In order to avoid duplicates, I've decided to remove output features from MiningSchema
NOTE: We don't need to specify any DERIVED/NUMERIC fields here, because PMML interpreter will create them
in a lazy manner.
"""
ms = pmml.MiningSchema()
if Schema.INPUT in self.SCHEMAS_IN_MINING_MODEL:
for f in sorted(self.context.schemas[Schema.INPUT], key=lambda _: _.full_name):
ms.append(pmml.MiningField(invalidValueTreatment=f.invalid_value_treatment.value, name=f.full_name))
for s in [Schema.OUTPUT, Schema.INTERNAL]:
if s in self.SCHEMAS_IN_MINING_MODEL:
for f in self.context.schemas.get(s, []):
ms.append(pmml.MiningField(
name=s.extract_feature_name(f),
usageType="predicted"
))
return ms
def header(self):
"""
Build and return Header element
"""
return pmml.Header()
def pmml(self, verification_data=None):
"""
Build PMML from the context and estimator.
Returns PMML element
"""
p = pmml.PMML(version="4.2")
p.append(self.header())
p.append(self.data_dictionary())
p.append(self.transformation_dictionary())
p.append(self.model(verification_data))
return p
class ClassifierConverter(EstimatorConverter):
"""
Base class for classifier converters.
It is required that the output schema contains only categorical features.
The serializer will output result labels as output::feature_name and probabilities for each value of result feature
as output::feature_name::feature_value.
"""
def __init__(self, estimator, context):
"""
:param estimator: Estimator to convert
:type estimator: BaseEstimator
:param context: context to work with
:type context: TransformationContext
"""
super(ClassifierConverter, self).__init__(estimator, context, ModelMode.CLASSIFICATION)
assert isinstance(estimator, ClassifierMixin), 'Classifier converter should only be applied to the classification models'
for f in context.schemas[Schema.OUTPUT]:
assert isinstance(f, CategoricalFeature), 'Only categorical outputs are supported for classification task'
# create hidden variables for each categorical output
internal_schema = list(filter(lambda x: isinstance(x, CategoricalFeature), self.context.schemas[Schema.OUTPUT]))
self.context.schemas[Schema.INTERNAL] = internal_schema
def output(self):
"""
Output section of PMML contains all model outputs.
Classification tree output contains output variable as a label,
and <variable>.<value> as a probability of a value for a variable
:return: pmml.Output
"""
output = pmml.Output()
# the response variables
for feature in self.context.schemas[Schema.OUTPUT]:
output_field = pmml.OutputField(
name=Schema.OUTPUT.extract_feature_name(feature),
feature='predictedValue',
optype=feature.optype.value,
dataType=feature.data_type.value
)
output.append(output_field)
# the probabilities for categories; should only be populated for classification jobs
for feature in self.context.schemas[Schema.CATEGORIES]:
output_field = pmml.OutputField(
name=Schema.CATEGORIES.extract_feature_name(feature),
optype=feature.optype.value,
dataType=feature.data_type.value,
feature='probability',
targetField=Schema.INTERNAL.extract_feature_name(feature.namespace),
value_=feature.name
)
output.append(output_field)
return output
class RegressionConverter(EstimatorConverter):
def __init__(self, estimator, context):
super(RegressionConverter, self).__init__(estimator, context, ModelMode.REGRESSION)
|
11573095
|
import unittest
import math
import numpy
import pyglet
from pygly.viewport import Viewport
class test_viewport( unittest.TestCase ):
def setUp( self ):
pass
def tearDown( self ):
pass
def test_viewport_creation( self ):
window = pyglet.window.Window(
fullscreen = False,
width = 1024,
height = 512
)
viewport = Viewport(
[
[0, 0],
[1024, 512]
]
)
self.assertEqual(
viewport.x,
0,
"Viewport x incorrect"
)
self.assertEqual(
viewport.y,
0,
"Viewport y incorrect"
)
self.assertEqual(
viewport.width,
1024,
"Viewport width incorrect"
)
self.assertEqual(
viewport.height,
512,
"Viewport height incorrect"
)
self.assertEqual(
viewport.aspect_ratio,
2.0,
"Viewport aspect ratio incorrect"
)
if __name__ == '__main__':
unittest.main()
|
11573111
|
import unittest
from wifipumpkin3.exceptions.errors.dhcpException import DHCPServerSettingsError
from wifipumpkin3.core.common.platforms import Linux
from wifipumpkin3.core.ui.dhcpConfig import ui_DhcpSettingsClass
class TestUIDHCPSettings(unittest.TestCase):
def test_dhcp_error_message(self):
with self.assertRaises(DHCPServerSettingsError):
raise DHCPServerSettingsError("", "")
def test_read_file_exception(self):
pass
#self.result_content = ui_DhcpSettingsClass(self)
#self.result_content.start()
# def test_raise(self):
# raise DHCPServerSettingsError('DHCP Server', 'range ip error')
if __name__ == "__main__":
unittest.main()
|
11573117
|
import torch as ch
import numpy as np
from robustness.model_utils import make_and_restore_model
from robustness.tools.helpers import save_checkpoint
import sys
import utils
def chuck_inf_means(senses):
chucked = []
for i in range(senses.shape[0]):
x = senses[i]
chucked.append(np.mean(x[x != np.inf]))
return np.array(chucked)
if __name__ == "__main__":
model_arch = sys.argv[1]
model_type = sys.argv[2]
random_drop = sys.argv[3] == 'random'
num_drop = int(sys.argv[4])
model_path = sys.argv[5]
if random_drop not in ['random', 'most', 'least']:
raise ValueError("Method of selecting neurons to drop not supported")
constants = utils.CIFAR10()
ds = constants.get_dataset()
model_kwargs = {
'arch': model_arch,
'dataset': ds,
'resume_path': model_type
}
# Get scaled delta values
senses = constants.get_deltas(model_type, model_arch)
(mean, std) = constants.get_stats(model_type, model_arch)
# Load model
model, _ = make_and_restore_model(**model_kwargs)
model.eval()
print("Dropping %d out of %d neurons" % (num_drop, senses.shape[0]))
# Random weight drop-out if negative factor
if random_drop:
print("Random drop-out!")
worst_n = np.random.permutation(senses.shape[0])[:num_drop]
else:
# 99.7% interval
threshold = mean + 3 * std
# Only consider neurons with any hopes of attackking (delta within some sensible range)
senses = utils.scaled_values(senses, mean, std)
senses = chuck_inf_means(senses)
if random_drop == 'most':
worst_n = np.argsort(np.abs(senses))[:num_drop]
else:
worst_n = np.argsort(-np.abs(senses))[:num_drop]
# Extract final weights matrix from model
with ch.no_grad():
model.state_dict().get("module.model.classifier.weight")[:, worst_n] = 0
# Save modified model
sd_info = {
'model': model.state_dict(),
'epoch': 1
}
save_checkpoint(sd_info, False, model_path)
|
11573149
|
from flask import current_app
from lxml import etree
from zeus.config import db
from zeus.constants import Severity
from zeus.models import StyleViolation
from .base import ArtifactHandler
class CheckstyleHandler(ArtifactHandler):
supported_types = frozenset(["application/x-checkstyle+xml", "text/xml+checkstyle"])
def process(self, fp):
try:
root = etree.fromstring(fp.read())
except Exception:
current_app.logger.exception("Failed to parse XML")
return []
job = self.job
# <file name="/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js">
# <error line="1" column="10" severity="error" message="'addOne' is defined but never used. (no-unused-vars)" source="eslint.rules.no-unused-vars" />
for f_node in root.iter("file"):
# name
filename = f_node.get("name")
for e_node in f_node.iter("error"):
# line, column, severity, message, source
db.session.add(
StyleViolation(
job=job,
repository_id=job.repository_id,
filename=filename,
severity=Severity[e_node.get("severity")],
message=e_node.get("message"),
source=e_node.get("source"),
lineno=e_node.get("line"),
colno=e_node.get("column"),
)
)
db.session.flush()
|
11573151
|
import sys
import os
import unittest
import shutil
import copy
from io import StringIO
sys.path.append(".")
from mock_gff3 import Create_generator
from mock_helper import import_data, gen_file
import annogesiclib.optimize_TSSpredator as ot
from mock_args_container import MockClass
class Mock_func(object):
def __init__(self):
self.example = Example()
def mock_run_TSSpredator_paralle(
self, config_files, tsspredator_path, processes):
pass
def mock_convert2gff(self, out_path, gff_files, args, test):
if not os.path.exists("test_folder/gffs"):
os.mkdir("test_folder/gffs")
gen_file("test_folder/gffs/aaa.gff", self.example.gff_file)
gff_files.append("test_folder/gffs/aaa.gff")
class TestOptimizeTSSpredator(unittest.TestCase):
def setUp(self):
self.example = Example()
self.mock_args = MockClass()
self.test_folder = "test_folder"
if (not os.path.exists(self.test_folder)):
os.mkdir(self.test_folder)
def tearDown(self):
if os.path.exists(self.test_folder):
shutil.rmtree(self.test_folder)
def test_initiate(self):
args = self.mock_args.mock()
args.height = 0.9
args.height_reduction = 0.8
args.factor = 0.9
args.factor_reduction = 0.8
args.base_height = 0.01
args.enrichment = 0.5
args.processing = 0.5
max_num, best_para, current_para, indexs = ot.initiate(args)
self.assertDictEqual(max_num, {
're_factor': 0.8, 'processing': 0.5, 'enrichment': 0.5,
'height': 0.9, 'base_height': 0.01, 're_height': 0.8,
'factor': 0.9})
self.assertDictEqual(best_para, {
're_factor': 0, 'processing': 0, 'enrichment': 0,
'height': 0, 'base_height': 0, 're_height': 0, 'factor': 0})
self.assertDictEqual(current_para, {
're_factor': 0, 'processing': 0, 'enrichment': 0,
'height': 0, 'base_height': 0, 're_height': 0,
'factor': 0})
self.assertDictEqual(indexs, {
'step': 0, 'change': False, 'num': 0, 'first': True,
'length': 0, 'exist': False, 'switch': 0, 'extend': False,
'count': 0})
def test_read_predict_manual_gff(self):
gff = os.path.join(self.test_folder, "test.gff")
gen_file(gff, self.example.gff_file)
args = self.mock_args.mock()
args.gene_length = 1000
num, gffs = ot.read_predict_manual_gff(gff, 1000)
self.assertEqual(num, 1)
self.assertEqual(gffs[0].start, 633)
def test_scoring_function(self):
stat_value = {"tp_rate": 0.8, "fp_rate": 0.0003, "tp": 100, "fp": 3}
best = {"tp_rate": 0.8, "fp_rate": 0.0005, "tp": 100,
"fp": 31, "fn": 45, "missing_ratio": 0.004}
ot.scoring_function(best, stat_value, self.example.indexs, 1000)
self.assertTrue(self.example.indexs["change"])
self.example.indexs["change"] = False
stat_value = {"tp_rate": 0.8, "fp_rate": 0.0004, "tp": 100, "fp": 13}
best = {"tp_rate": 0.8, "fp_rate": 0.0003, "tp": 100, "fp": 3}
ot.scoring_function(best, stat_value, self.example.indexs, 1000)
self.assertFalse(self.example.indexs["change"])
def test_load_stat_csv(self):
stat_file = os.path.join(self.test_folder, "stat.csv")
gen_file(stat_file, self.example.stat)
list_num = []
best_para = {}
datas = ot.load_stat_csv(
self.test_folder, list_num, self.example.best,
best_para, self.example.indexs, 1000, stat_file)
self.assertEqual(datas[0], 2)
self.assertDictEqual(datas[1], {
'fp': 230.0, 'tp': 789.0, 'missing_ratio': 0.29991126885536823,
'fp_rate': 8.15542105020548e-05, 'tp_rate': 0.7000887311446318,
'fn': 338.0})
self.assertDictEqual(datas[2], {
'processing': 5.2, 'base_height': 0.086, 'factor': 7.6,
're_height': 2.3, 're_factor': 5.5, 'enrichment': 3.1,
'height': 2.4})
def test_reload_data(self):
stat_file = os.path.join(self.test_folder, "stat.csv")
gen_file(stat_file, self.example.stat)
list_num = []
best_para = {}
log = open(os.path.join(self.test_folder, "test.log"), "w")
datas = ot.reload_data(self.test_folder, list_num, self.example.best,
best_para, self.example.indexs, 1000, stat_file, log)
self.assertDictEqual(datas[0], {
'base_height': 0.086, 'processing': 5.2,
'height': 2.4, 'enrichment': 3.1, 're_factor': 5.5,
're_height': 2.3, 'factor': 7.6})
self.assertDictEqual(datas[1], {
'tp_rate': 0.7000887311446318, 'tp': 789.0,
'fn': 338.0, 'fp': 230.0, 'fp_rate': 8.15542105020548e-05,
'missing_ratio': 0.29991126885536823})
def test_extend_data(self):
best_para = copy.deepcopy(self.example.best_para)
current_para = ot.extend_data(self.test_folder, self.example.best,
best_para, 100, "aaa")
self.assertDictEqual(current_para, best_para)
def test_run_random_part(self):
list_num = []
current_para = copy.deepcopy(self.example.ref_para)
para = ot.run_random_part(current_para, list_num,
self.example.max_nums, 1000,
self.example.indexs)
self.assertTrue(para != self.example.ref_para)
def test_run_large_change_part(self):
list_num = []
seeds = {"seed": 0, "pre_seed": []}
features = {"feature": "r", "pre_feature": ""}
current_para = copy.deepcopy(self.example.ref_para)
best_para = copy.deepcopy(self.example.best_para)
para = ot.run_large_change_part(seeds, features, self.example.indexs,
current_para, self.example.max_nums,
best_para, list_num)
self.assertTrue(para != self.example.ref_para)
self.assertTrue(para != best_para)
def test_gen_large_random(self):
list_num = []
index_large = {0: "height", 1: "re_height", 2: "factor",
3: "re_factor", 4:"base_height",
5: "enrichment", 6: "processing"}
best_para = copy.deepcopy(self.example.best_para)
para = ot.gen_large_random(self.example.max_nums, "height", 0.2,
list_num, 0.3, best_para, index_large,
self.example.indexs)
self.assertTrue(para != best_para)
self.assertTrue(para["height"] > para["re_height"])
def test_run_small_change_part(self):
seeds = {"seed": 0, "pre_seed": []}
features = {"feature": "l", "pre_feature": ""}
current_para = copy.deepcopy(self.example.ref_para)
list_num = []
best_para = copy.deepcopy(self.example.best_para)
para = ot.run_small_change_part(
seeds, features, self.example.indexs, current_para,
best_para, list_num, self.example.max_nums)
self.assertTrue(para != best_para)
def test_small_change(self):
list_num = []
best_para = copy.deepcopy(self.example.best_para)
para = ot.small_change(0.9, "height", 0.2, list_num, 0.5, best_para)
self.assertTrue(para != 0.5)
self.assertTrue(para > 0.2)
def test_plus_process(self):
list_num = []
actions = {"plus": False, "minus": False}
best_para = copy.deepcopy(self.example.best_para)
para = ot.plus_process("height", best_para, 0.9,
0.5, actions, list_num, 0.2)
self.assertEqual(para, 0.4)
def test_minus_process(self):
list_num = []
actions = {"plus": False, "minus": False}
best_para = copy.deepcopy(self.example.best_para)
para = ot.minus_process("height", best_para, 0.9,
0.5, actions, list_num, 0.1)
self.assertEqual(para, 0.2)
def test_compare_manual_predict(self):
out = StringIO()
manual = os.path.join(self.test_folder, "manual.gff")
predict = os.path.join(self.test_folder, "predict.gff")
gen_file(manual, self.example.manual_file)
gen_file(predict, self.example.gff_file)
para_list = [copy.deepcopy(self.example.best_para)]
args = self.mock_args.mock()
args.manual = manual
args.cores = 1
args.gene_length = 2000
args.cluster = 3
ot.compare_manual_predict(1000, para_list, [predict], self.test_folder,
out, args, self.example.mans, 3, 2000)
self.assertEqual(
out.getvalue(),
"1000\the_0.3_rh_0.2_fa_0.7_rf_0.3_bh_0.0_ef_2.5_pf_3.3\tTP=0\tTP_rate=0.0\tFP=2\tFP_rate=0.00100150225338007\tFN=2\tmissing_ratio=0.6666666666666666\n")
def test_compute_stat(self):
list_num = [self.example.best_para]
best_para = {'re_factor': 0.3, 'processing': 3.3, 'enrichment': 2.5,
'height': 0.5, 'base_height': 0.0, 're_height': 0.2,
'factor': 0.7}
self.example.indexs["change"] = True
best = {"tp_rate": 0.6, "fp_rate": 0.0025, "tp": 40, "fp": 32,
"fn": 45, "missing_ratio": 0.004}
datas = ot.compute_stat(self.example.best, best, best_para, 1,
list_num, self.test_folder,
self.example.indexs, "aaa")
self.assertDictEqual(datas[0], self.example.best_para)
self.assertDictEqual(datas[1], self.example.best)
def test_run_tss_and_stat(self):
list_num = [self.example.best_para]
seeds = {"seed": 0, "pre_seed": []}
features = {"feature": "l", "pre_feature": ""}
best_para = {'re_factor': 0.3, 'processing': 3.3, 'enrichment': 2.5,
'height': 0.5, 'base_height': 0.0, 're_height': 0.2,
'factor': 0.7}
current_para = {'re_factor': 0.3, 'processing': 2.3, 'enrichment': 2.5,
'height': 0.5, 'base_height': 0.2, 're_height': 0.2,
'factor': 0.7}
stat_out = StringIO()
wig = os.path.join(self.test_folder, "wig")
fasta = os.path.join(self.test_folder, "aaa.fa")
gff = os.path.join(self.test_folder, "aaa.gff")
if not os.path.exists(wig):
os.mkdir(wig)
gen_file(fasta, self.example.fasta)
gen_file(gff, self.example.gff_file)
output_prefix = ["test_aaa"]
ot.run_TSSpredator_paralle = Mock_func().mock_run_TSSpredator_paralle
ot.convert2gff = Mock_func().mock_convert2gff
args = self.mock_args.mock()
args.steps = 2000
args.cores = 1
args.tsspredator_path = "test"
args.libs = self.example.libs
args.program = "TSS"
args.cluster = 3
args.utr = 200
args.replicate = "all_2"
args.replicate_name = "rep"
args.project_strain = "aaa"
args.manual = os.path.join(self.test_folder, "manual.gff")
args.gene_length = 2000
log = open(os.path.join(self.test_folder, "test.log"), "w")
gen_file(args.manual, self.example.manual_file)
datas, set_config, run_tss = ot.run_tss_and_stat(
self.example.indexs, list_num, seeds, 0.4, 0.3,
self.test_folder, stat_out, best_para, current_para,
wig, fasta, gff, self.example.best, 3, args, "aaa",
self.example.mans, 2000, log, True, True)
self.assertFalse(datas[0])
def test_gen_config(self):
wig = os.path.join(self.test_folder, "wig")
if not os.path.exists(wig):
os.mkdir(wig)
fasta = os.path.join(self.test_folder, "aaa.fa")
gff = os.path.join(self.test_folder, "aaa.gff")
gen_file(fasta, self.example.fasta)
gen_file(gff, self.example.gff_file)
args = self.mock_args.mock()
args.libs = self.example.libs
args.cores = 1
args.cluster = 3
args.program = "TSS"
args.project_strain = "aaa"
args.replicate = "all_1"
args.utr = 200
args.replicate_name = "test"
filename = ot.gen_config(self.example.best_para, self.test_folder,
1, wig, fasta, gff, args, "aaa")
self.assertEqual(filename, "test_folder/config_1.ini")
data = import_data("test_folder/config_1.ini")
self.assertEqual("\n".join(data), self.example.config)
def test_comparison(self):
nums = {"overlap": 0, "predict": 0, "manual": 0}
for index in range(0, 3):
self.example.mans[index].attributes["print"] = False
self.example.gffs[index].attributes["print"] = False
args = self.mock_args.mock()
args.cluster = 3
args.gene_length = 2000
ot.comparison(self.example.mans, self.example.gffs, nums, args, 2000)
self.assertDictEqual(nums, {'manual': 1, 'predict': 2, 'overlap': 1})
def test_check_overlap(self):
nums = {"overlap": 0, "predict": 0, "manual": 0}
datas = ot.check_overlap(True, None, nums, 2000, self.example.mans[0],
self.example.gffs[0], 100)
self.assertFalse(datas[0])
self.assertEqual(datas[1], 140)
def test_print_lib(self):
libs = [{"condition": 1, "replicate": "a", "wig": "test_1.wig"},
{"condition": 2, "replicate": "a", "wig": "test_2.wig"}]
out = StringIO()
ot.print_lib(2, libs, out, self.test_folder, "aaa", ["a"])
self.assertEqual(
out.getvalue(),
"aaa_1a = test_folder/test_1.wig\naaa_2a = test_folder/test_2.wig\n")
def test_import_lib(self):
out = StringIO()
if not os.path.exists(os.path.join(self.test_folder, "wigs")):
os.mkdir(os.path.join(self.test_folder, "wigs"))
wig_folder = os.path.join(self.test_folder, "wigs", "tmp")
if not os.path.exists(wig_folder):
os.mkdir(wig_folder)
lib_dict = {"fp": [], "fm": [], "np": [], "nm": []}
gen_file(os.path.join(
wig_folder,
"GSM1649587_Hp26695_ML_B1_HS1_-TEX_forward_STRAIN_aaa.wig"),
"test")
gen_file(os.path.join(
wig_folder,
"GSM1649587_Hp26695_ML_B1_HS1_-TEX_reverse_STRAIN_aaa.wig"),
"test")
gen_file(os.path.join(
wig_folder,
"GSM1649588_Hp26695_ML_B1_HS1_-TEX_forward_STRAIN_aaa.wig"),
"test")
gen_file(os.path.join(
wig_folder,
"GSM1649588_Hp26695_ML_B1_HS1_-TEX_reverse_STRAIN_aaa.wig"),
"test")
args = self.mock_args.mock()
args.project_strain = "aaa"
args.program = "TSS"
args.libs = self.example.libs
lib_num = ot.import_lib(wig_folder, set(), lib_dict, out, "aaa.gff",
[], "aaa.fa", args, "aaa")
self.assertEqual(lib_num, 1)
def test_optimization_process(self):
current_para = copy.deepcopy(self.example.ref_para)
best_ref_para = copy.deepcopy(self.example.best_para)
list_num = [best_ref_para]
indexs = copy.deepcopy(self.example.indexs)
best_para = {'re_factor': 0.3, 'processing': 3.3, 'enrichment': 2.5,
'height': 0.6, 'base_height': 0.0, 're_height': 0.2,
'factor': 0.7}
stat_out = StringIO()
output_prefix = ["test_1"]
gen_file(os.path.join(self.test_folder, "manual.gff"),
self.example.manual_file)
if not os.path.exists(os.path.join(self.test_folder, "wigs")):
os.mkdir(os.path.join(self.test_folder, "wigs"))
wig_folder = os.path.join(self.test_folder, "wigs", "tmp")
if not os.path.exists(wig_folder):
os.mkdir(wig_folder)
gen_file(os.path.join(
wig_folder,
"GSM1649587_Hp26695_ML_B1_HS1_-TEX_forward_STRAIN_aaa.wig"),
"test")
gen_file(os.path.join(
wig_folder,
"GSM1649587_Hp26695_ML_B1_HS1_-TEX_reverse_STRAIN_aaa.wig"),
"test")
gen_file(os.path.join(
wig_folder,
"GSM1649588_Hp26695_ML_B1_HS1_-TEX_forward_STRAIN_aaa.wig"),
"test")
gen_file(os.path.join(
wig_folder,
"GSM1649588_Hp26695_ML_B1_HS1_-TEX_reverse_STRAIN_aaa.wig"),
"test")
ot.run_TSSpredator_paralle = Mock_func().mock_run_TSSpredator_paralle
ot.convert2gff = Mock_func().mock_convert2gff
args = self.mock_args.mock()
args.libs = self.example.libs
args.cores = 1
args.cluster = 3
args.program = "TSS"
args.project_strain = "aaa"
args.replicate = "all_1"
args.utr = 200
args.replicate_name = "test"
args.steps = 2
args.tsspredator_path = "test"
args.gene_length = 2000
args.manual = os.path.join(self.test_folder, "manual.gff")
log = open(os.path.join(self.test_folder, "test.log"), "w")
ot.optimization_process(
indexs, current_para, list_num, self.example.max_nums, best_para,
self.test_folder, stat_out, self.example.best, wig_folder,
"aaa.fa", "aaa.gff", 2, True, args, "aaa", self.example.mans, 2000, log)
self.assertDictEqual(best_para, {
're_height': 0.2, 'factor': 0.7, 'processing': 3.3,
'height': 0.6, 'base_height': 0.0, 're_factor': 0.3,
'enrichment': 2.5})
def test_optimization(self):
ot.run_TSSpredator_paralle = Mock_func().mock_run_TSSpredator_paralle
ot.convert2gff = Mock_func().mock_convert2gff
if not os.path.exists(os.path.join(self.test_folder, "wigs")):
os.mkdir(os.path.join(self.test_folder, "wigs"))
wig_folder = os.path.join(self.test_folder, "wigs", "tmp")
if not os.path.exists(wig_folder):
os.mkdir(wig_folder)
fasta = os.path.join(self.test_folder, "aaa.fa")
gff = os.path.join(self.test_folder, "aaa.gff")
gen_file(fasta, self.example.fasta)
gen_file(gff, self.example.gff_file)
output_prefix = ["test_1"]
args = self.mock_args.mock()
args.libs = self.example.libs
args.cores = 1
args.cluster = 3
args.program = "TSS"
args.project_strain = "aaa"
args.replicate = "all_1"
args.utr = 200
args.steps = 2
args.gene_length = 2000
args.height = 0.9
args.height_reduction = 0.8
args.factor = 0.9
args.factor_reduction = 0.8
args.base_height = 0.01
args.enrichment = 0.5
args.processing = 0.5
args.length = None
args.replicate_name = "test"
args.tsspredator_path = "test"
args.manual = os.path.join(self.test_folder, "manual.gff")
gen_file(args.manual, self.example.manual_file)
log = open(os.path.join(self.test_folder, "test.log"), "w")
args.output_folder = self.test_folder
os.mkdir(os.path.join(self.test_folder, "optimized_TSSpredator"))
ot.optimization(wig_folder, fasta, gff, args, args.manual, 2000, "aaa", log)
self.assertTrue(os.path.exists(os.path.join(
self.test_folder, "optimized_TSSpredator", "stat_aaa.csv")))
class Example(object):
libs = ["GSM1649587_Hp26695_ML_B1_HS1_-TEX_forward.wig:notex:1:a:+",
"GSM1649587_Hp26695_ML_B1_HS1_-TEX_reverse.wig:notex:1:a:-",
"GSM1649588_Hp26695_ML_B1_HS1_+TEX_forward.wig:tex:1:a:+",
"GSM1649588_Hp26695_ML_B1_HS1_+TEX_reverse.wig:tex:1:a:-"]
best = {"tp_rate": 0.8, "fp_rate": 0.0005, "tp": 100,
"fp": 3, "fn": 45, "missing_ratio": 0.004}
best_para = {'re_factor': 0.3, 'processing': 3.3, 'enrichment': 2.5,
'height': 0.3, 'base_height': 0.0, 're_height': 0.2,
'factor': 0.7}
max_nums = {'re_factor': 0.9, 'processing': 9.0, 'enrichment': 9.0,
'height': 0.9, 'base_height': 0.1, 're_height': 0.9,
'factor': 0.9}
ref_para = {'re_factor': 0.4, 'processing': 0.3, 'enrichment': 2.0,
'height': 0.5, 'base_height': 0.0, 're_height': 0.1,
'factor': 0.5}
indexs = {'step': 0, 'change': False, 'num': 0, 'first': True,
'length': 0, 'exist': False, 'switch': 0, 'extend': False,
'count': 0}
fasta = """>aaa
AGACTTCCTGATAGTTAAACACATGAGATGTTGGCGTACACACCCGGTGTTTACGTATACGTTACTATGATATTTAGAAAAAACCCGTGTATACGTTCGTGA"""
gff_file = """NC_000915.1 RefSeq TSS 633 633 . - . Name=nusB;gene=nusB;locus_tag=HP0001;ID=gene0;Dbxref=GeneID:898756;gbkey=Gene
NC_000915.1 RefSeq TSS 1105 1105 . - . Name=ribH;gene=ribH;locus_tag=HP0002;ID=gene1;Dbxref=GeneID:898768;gbkey=Gene"""
manual_file = """NC_000915.1 RefSeq TSS 633 633 . - . Name=nusB;gene=nusB;locus_tag=HP0001;ID=gene0;Dbxref=GeneID:898756;gbkey=Gene
NC_000915.1 RefSeq TSS 1125 1125 . - . Name=ribH;gene=ribH;locus_tag=HP0002;ID=gene1;Dbxref=GeneID:898768;gbkey=Gene"""
stat = """0 he_2.4_rh_2.3_fa_7.6_rf_5.5_bh_0.086_ef_3.1_pf_5.2 TP=789 TP_rate=0.7000887311446318 FP=230 FP_rate=8.15542105020548e-05 FN=338 missing_ratio=0.29991126885536823
1 he_1.4_rh_1.2_fa_7.5_rf_2.5_bh_0.149_ef_5.2_pf_5.0 TP=595 TP_rate=0.5279503105590062 FP=195 FP_rate=6.91437871647856e-05 FN=532 missing_ratio=0.4720496894409938"""
gffs_dict = [
{"seq_id": "aaa", "source": "Refseq", "feature": "TSS", "start": 140,
"end": 140, "phase": ".", "strand": "+", "score": "."},
{"seq_id": "aaa", "source": "Refseq", "feature": "TSS", "start": 30,
"end": 30, "phase": ".", "strand": "+", "score": "."},
{"seq_id": "aaa", "source": "Refseq", "feature": "TSS", "start": 430,
"end": 430, "phase": ".", "strand": "-", "score": "."}]
attributes_gffs = [
{"ID": "TSS0", "Name": "TSS_0", "locus_tag": "AAA_00001"},
{"ID": "TSS1", "Name": "TSS_1", "locus_tag": "AAA_00002"},
{"ID": "TSS2", "Name": "TSS_2", "locus_tag": "AAA_00003"}]
mans_dict = [
{"seq_id": "aaa", "source": "Refseq", "feature": "TSS", "start": 140,
"end": 142, "phase": ".", "strand": "+", "score": "."},
{"seq_id": "aaa", "source": "Refseq", "feature": "TSS", "start": 40,
"end": 40, "phase": ".", "strand": "+", "score": "."},
{"seq_id": "aaa", "source": "Refseq", "feature": "TSS", "start": 5167,
"end": 5167, "phase": ".", "strand": "-", "score": "."}]
attributes_mans = [
{"ID": "TSS0", "Name": "TSS_0", "locus_tag": "AAA_00001"},
{"ID": "TSS1", "Name": "TSS_1", "locus_tag": "AAA_00002"},
{"ID": "TSS2", "Name": "TSS_2", "locus_tag": "AAA_00003"}]
gffs = []
mans = []
for index in range(0, 3):
gffs.append(Create_generator(
gffs_dict[index], attributes_gffs[index], "gff"))
mans.append(Create_generator(
mans_dict[index], attributes_mans[index], "gff"))
config = """TSSinClusterSelectionMethod = HIGHEST
allowedCompareShift = 1
allowedRepCompareShift = 1
annotation_1 = test_folder/aaa.gff
fivePrimeMinus_1a = test_folder/wig/GSM1649588_Hp26695_ML_B1_HS1_+TEX_reverse.wig
fivePrimePlus_1a = test_folder/wig/GSM1649588_Hp26695_ML_B1_HS1_+TEX_forward.wig
genome_1 = test_folder/aaa.fa
idList = 1
maxASutrLength = 100
maxGapLengthInGene = 500
maxNormalTo5primeFactor = 3.3
maxTSSinClusterDistance = 4
maxUTRlength = 200
min5primeToNormalFactor = 2.5
minCliffFactor = 0.7
minCliffFactorDiscount = 0.3
minCliffHeight = 0.3
minCliffHeightDiscount = 0.2
minNormalHeight = 0.0
minNumRepMatches = 1
minPlateauLength = 0
mode = cond
normPercentile = 0.9
normalMinus_1a = test_folder/wig/GSM1649587_Hp26695_ML_B1_HS1_-TEX_reverse.wig
normalPlus_1a = test_folder/wig/GSM1649587_Hp26695_ML_B1_HS1_-TEX_forward.wig
numReplicates = 1
numberOfDatasets = 1
outputDirectory = test_folder/MasterTable_1
outputPrefix_1 = t
outputPrefix_2 = e
outputPrefix_3 = s
outputPrefix_4 = t
projectName = aaa
superGraphCompatibility = igb
texNormPercentile = 0.5
writeGraphs = 0
writeNocornacFiles = 0"""
table = """SuperPos SuperStrand mapCount detCount Genome detected enriched stepHeight stepFactor enrichmentFactor classCount Pos Strand Locus_tag sRNA/asRNA Product UTRlength GeneLength Primary Secondary Internal Antisense Automated Manual Putative sRNA Putative asRNA Comment Sequence -50 nt upstream + TSS (51nt)
179 - 1 1 test 1 1 4.45 31.93 8.69 1 179 - orphan orphan NA NA 0 0 0 0 1 0 0 0 ACCCTTGAATTGAGGGTGTTTTATACCTAAATTTAAAAAATGATGCTATAA
681 - 1 1 test 1 1 4.2 3.0 3.54 2 681 - HP0001 transcription antitermination protein NusB 48 417 1 0 0 0 1 0 0 0 GATTGAAAGAGCGGGCAGTAAAGCCGGCAATAAGGGCTTTGAAGCGATGAG
681 - 1 1 test 1 1 4.2 3.0 3.54 2 681 - HP0002 6%2C7-dimethyl-8-ribityllumazine synthase NA 471 0 0 1 0 1 0 0 0 GATTGAAAGAGCGGGCAGTAAAGCCGGCAATAAGGGCTTTGAAGCGATGAG
1361 - 1 1 test 1 1 24.16 6.31 3.98 2 1361 - HP0002 6%2C7-dimethyl-8-ribityllumazine synthase 256 471 1 0 0 0 1 0 0 0 TATGGGAATTTAGTGGTGGATATGCGCTCTTTAAAAATCATGCGAGAATTT
1361 - 1 1 test 1 1 24.16 6.31 3.98 2 1361 - HP0003 2-dehydro-3-deoxyphosphooctonate aldolase NA 831 0 0 1 0 1 0 0 0 TATGGGAATTTAGTGGTGGATATGCGCTCTTTAAAAATCATGCGAGAATTT
2689 - 1 1 test 1 1 10.15 10.67 11.97 2 2689 - HP0004 carbonic anhydrase IcfA 92 666 1 0 0 0 1 0 0 1 CAATGCGACACATAATTGCATGAAAGCCCTTTAAAGTGTAAAATAACGCCA
2689 - 1 1 test 1 1 10.15 10.67 11.97 2 2689 - HP0005 orotidine 5%27-phosphate decarboxylase NA 684 0 0 0 1 1 0 0 1 CAATGCGACACATAATTGCATGAAAGCCCTTTAAAGTGTAAAATAACGCCA"""
if __name__ == "__main__":
unittest.main()
|
11573159
|
import sys
def main():
new_version = sys.argv[1]
with open("Changelog.md") as stream:
contents = stream.read()
if not new_version in contents:
sys.exit("new_version: %s not found in Changelog" % new_version)
if __name__ == "__main__":
main()
|
11573169
|
from ._package_utils \
import \
parse_package_full_name
from .errors \
import \
DepSolverError
from .constraints \
import \
Equal, GEQ, GT, LEQ, LT, Not
from .package \
import \
parse_package_full_name
from .requirement_parser \
import \
RawRequirementParser
from .version \
import \
MaxVersion, MinVersion, SemanticVersion
class Requirement(object):
"""Requirements instances represent a 'package requirement', that is a
package + version constraints.
Arguments
---------
name: str
PackageInfo name
specs: seq
Sequence of constraints
"""
@classmethod
def from_string(cls, requirement_string, version_factory=SemanticVersion.from_string):
"""Creates a new Requirement from a requirement string.
Arguments
---------
requirement_string: str
The requirement string, e.g. 'numpy >= 1.3.0'
Examples
--------
# This creates a requirement that will match any version of numpy
>>> Requirement.from_string("numpy")
numpy *
# This creates a requirement that will only version of numpy >= 1.3.0
>>> Requirement.from_string("numpy >= 1.3.0")
numpy >= 1.3.0
"""
parser = RequirementParser(version_factory)
requirements = parser.parse(requirement_string)
if len(requirements) != 1:
raise DepSolverError("Invalid requirement string %r" % requirement_string)
else:
return requirements[0]
@classmethod
def from_package_string(cls, package_string, version_factory=SemanticVersion.from_string):
"""Creates a new Requirement from a package string.
This is equivalent to the requirement 'package.name == package.version'
Arguments
---------
package_string: str
The package string, e.g. 'numpy-1.3.0'
"""
name, version = parse_package_full_name(package_string)
return cls(name, [Equal(version)], version_factory)
def __init__(self, name, specs, version_factory=SemanticVersion.from_string):
self.name = name
self._min_bound = MinVersion()
self._max_bound = MaxVersion()
# transform GE and LE into NOT + corresponding GEQ/LEQ
# Take the min of GEQ, max of LEQ
equals = set(req for req in specs if isinstance(req, Equal))
if len(equals) > 1:
self._cannot_match = True
self._equal = None
elif len(equals) == 1:
self._cannot_match = False
self._equal = version_factory(equals.pop().version)
self._min_bound = self._max_bound = self._equal
else:
self._cannot_match = False
self._equal = None
self._not_equals = set(version_factory(req.version) for req in specs if isinstance(req, Not))
gts = [req for req in specs if isinstance(req, GT)]
lts = [req for req in specs if isinstance(req, LT)]
geq = [req for req in specs if isinstance(req, GEQ)]
geq.extend(gts)
geq_versions = [version_factory(g.version) for g in geq]
if len(geq_versions) > 0:
self._min_bound = max(geq_versions)
leq = [req for req in specs if isinstance(req, LEQ)]
leq.extend(lts)
leq_versions = [version_factory(l.version) for l in leq]
if len(leq_versions) > 0:
self._max_bound = min(leq_versions)
self._not_equals.update(version_factory(gt.version) for gt in gts)
self._not_equals.update(version_factory(lt.version) for lt in lts)
if self._min_bound > self._max_bound:
self._cannot_match = True
def __repr__(self):
r = []
if self._cannot_match:
r.append("%s None" % self.name)
elif self._equal:
r.append("%s == %s" % (self.name, self._equal))
else:
if self._min_bound != MinVersion():
if self._min_bound in self._not_equals:
operator_string = ">"
else:
operator_string = ">="
r.append("%s %s %s" % (self.name, operator_string, self._min_bound))
if self._max_bound != MaxVersion():
if self._max_bound in self._not_equals:
operator_string = "<"
else:
operator_string = "<="
r.append("%s %s %s" % (self.name, operator_string, self._max_bound))
if self.is_universal:
r.append("%s *" % self.name)
for neq in self._not_equals:
if neq > self._min_bound and neq < self._max_bound:
r.append("%s != %s" % (self.name, neq))
return ", ".join(r)
@property
def is_universal(self):
"""Returns True if the requirement can matche any version."""
return self._min_bound == MinVersion() and self._max_bound == MaxVersion() \
and len(self._not_equals) == 0
def __eq__(self, other):
return repr(self) == repr(other)
def __hash__(self):
return hash(repr(self))
def _nonempty_interval_intersection(self, provider):
return self._max_bound >= provider._min_bound and provider._max_bound >= self._min_bound
def matches(self, provider):
"""Return True if provider requirement and this requirement are
compatible.
Arguments
---------
provider: Requirement
The requirement to match
Examples
--------
>>> req = Requirement.from_string("numpy >= 1.3.0")
>>> req.matches(Requirement.from_string("numpy"))
True
>>> req.matches(Requirement.from_string("numpy >= 1.2.0"))
True
>>> req.matches(Requirement.from_string("numpy >= 1.4.0"))
True
"""
if self.name != provider.name:
return False
if self._cannot_match:
return False
if self._nonempty_interval_intersection(provider):
if self._equal or provider._equal:
return self._equal not in provider._not_equals \
and provider._equal not in self._not_equals
else:
return True
else:
return False
class RequirementParser(object):
def __init__(self, version_factory=SemanticVersion.from_string):
self._parser = RawRequirementParser()
self.version_factory = version_factory
def iter_parse(self, requirement_string):
for distribution_name, specs in self._parser.parse(requirement_string).items():
yield Requirement(distribution_name, specs, self.version_factory)
def parse(self, requirement_string):
return [r for r in self.iter_parse(requirement_string)]
|
11573187
|
import upwork
from upwork.routers.hr.freelancers import applications
from unittest.mock import patch
@patch.object(upwork.Client, "get")
def test_get_list(mocked_method):
applications.Api(upwork.Client).get_list({})
mocked_method.assert_called_with("/hr/v4/contractors/applications", {})
@patch.object(upwork.Client, "get")
def test_get_specific(mocked_method):
applications.Api(upwork.Client).get_specific("reference")
mocked_method.assert_called_with("/hr/v4/contractors/applications/reference")
|
11573199
|
import argparse
import tasks
def modify_command_options(opts):
if opts.dataset == 'voc':
opts.num_classes = 21
if opts.dataset == 'ade':
opts.num_classes = 150
if not opts.visualize:
opts.sample_num = 0
if opts.where_to_sim == 'GPU_server':
opts.net_pytorch = False
if opts.method is not None:
if opts.method == 'FT':
pass
if opts.method == 'LWF':
opts.loss_kd = 100
if opts.method == 'CIL':
opts.loss_CIL == 1
if opts.method == 'LWF-MC':
opts.icarl = True
opts.icarl_importance = 10
if opts.method == 'ILT':
opts.loss_kd = 100
opts.loss_de = 100
if opts.method == 'EWC':
opts.regularizer = "ewc"
opts.reg_importance = 1000
if opts.method == 'RW':
opts.regularizer = "rw"
opts.reg_importance = 1000
if opts.method == 'PI':
opts.regularizer = "pi"
opts.reg_importance = 1000
if opts.method == 'MiB':
opts.loss_kd = 10
opts.unce = True
opts.unkd = True
opts.init_balanced = True
if opts.method == 'SDR':
# Note: for the best results these hyperparameters may need to be changed.
# Typical ranges are:
# loss_kd : 1 - 100
# loss_de_prototypes : 1e-3 - 1e-1
# lfc (same value is used for both attractive and repulsive) : 1e-3 - 1e-2
# lfs : 1e-5 - 1e-3
# A kick-start could be to use loss_kd 10, loss_de_prototypes 1e-2, lfc 1e-3 and lfs 1e-4
opts.loss_kd = 100
opts.unce = True
opts.unkd = True
opts.loss_featspars = 1e-3
opts.lfs_normalization = 'max_maskedforclass'
opts.lfs_shrinkingfn = 'exponential'
opts.lfs_loss_fn_touse = 'ratio'
opts.loss_de_prototypes = 0.01
opts.loss_de_prototypes_sumafter = True
opts.lfc_sep_clust = 1e-3
opts.loss_fc = 1e-3
opts.no_overlap = not opts.overlap
opts.no_cross_val = not opts.cross_val
return opts
def get_argparser():
parser = argparse.ArgumentParser()
# NB: on CPU not feasible because of inplace_ABN functions.
# on GPU_windows need to remove apex since not supported
# on GPU_server code as it has been downloaded
parser.add_argument('--where_to_sim', type=str, choices=['GPU_windows', 'GPU_server', 'CPU', 'CPU_windows'], default='GPU_server')
parser.add_argument("--net_pytorch", action='store_false', default=True,
help='whether to use default resnet from pytorch or to use the network as in MiB (default: True)')
# Performance Options
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--random_seed", type=int, default=42,
help="random seed (default: 42)")
parser.add_argument("--num_workers", type=int, default=1,
help='number of workers (default: 1)')
# Dataset Options
parser.add_argument("--data_root", type=str, default='data',
help="path to Dataset")
parser.add_argument("--dataset", type=str, default='voc',
choices=['voc', 'ade'], help='Name of dataset')
parser.add_argument("--num_classes", type=int, default=None,
help="num classes (default: None), set by method modify_command_options()")
# Method Options
# BE CAREFUL USING THIS, THEY WILL OVERRIDE ALL THE OTHER PARAMETERS.
# This argument serves to use default parameters for the methods defined in function: modify_command_options()
parser.add_argument("--method", type=str, default=None,
choices=['FT', 'LWF', 'LWF-MC', 'ILT', 'EWC', 'RW', 'PI', 'MiB', 'CIL', 'SDR'],
help="The method you want to use. BE CAREFUL USING THIS, IT MAY OVERRIDE OTHER PARAMETERS.")
# Train Options
parser.add_argument("--epochs", type=int, default=30,
help="epoch number (default: 30)")
parser.add_argument("--fix_bn", action='store_true', default=False,
help='fix batch normalization during training (default: False)')
parser.add_argument("--batch_size", type=int, default=8,
help='batch size (default: 8)')
parser.add_argument("--crop_size", type=int, default=512,
help="crop size (default: 513)")
parser.add_argument("--lr", type=float, default=0.007,
help="learning rate (default: 0.007)")
parser.add_argument("--momentum", type=float, default=0.9,
help='momentum for SGD (default: 0.9)')
parser.add_argument("--weight_decay", type=float, default=1e-4,
help='weight decay (default: 1e-4)')
parser.add_argument("--lr_policy", type=str, default='poly',
choices=['poly', 'step'], help="lr schedule policy (default: poly)")
parser.add_argument("--lr_decay_step", type=int, default=5000,
help="decay step for stepLR (default: 5000)")
parser.add_argument("--lr_decay_factor", type=float, default=0.1,
help="decay factor for stepLR (default: 0.1)")
parser.add_argument("--lr_power", type=float, default=0.9,
help="power for polyLR (default: 0.9)")
parser.add_argument("--bce", default=False, action='store_true',
help="Whether to use BCE or not (default: no)")
# whether to consider clustering on feature spaces as loss
parser.add_argument("--loss_fc", type=float, default=0., # Features Clustering
help="Set this hyperparameter to a value greater than "
"0 to enable features clustering loss")
parser.add_argument("--lfc_L2normalized", action='store_true', default=False,
help="enable features clustering loss L2 normalized (default False)")
parser.add_argument("--lfc_nobgr", action='store_true', default=False,
help="enable features clustering loss without background (default False)")
parser.add_argument("--lfc_orth_sep", action='store_true', default=False,
help="Orthogonal separation loss applied on the current prototypes only")
parser.add_argument("--lfc_orth_maxonly", action='store_true', default=False,
help="Orthogonal separation loss, only the maximum value is considered")
parser.add_argument("--lfc_sep_clust", type=float, default=0., # Separation of Clusters
help="Set this hyperparameter to a value greater than "
"0 to enable separation between clusters loss")
parser.add_argument("--lfc_sep_clust_ison_proto", action='store_true', default=False,
help="enable separation clustering loss on prototypes (default False)")
# whether to consider Soft Nearest Neighbor Loss (SNNL) as loss at features space
parser.add_argument("--loss_SNNL", type=float, default=0., # SNNL
help="Set this hyperparameter to a value greater than "
"0 to enable SNNL at feature level")
parser.add_argument("--loss_featspars", type=float, default=0., # features sparsification
help="Set this hyperparameter to a value greater than "
"0 to enable features sparsification loss")
parser.add_argument("--lfs_normalization", type=str, default='max_foreachfeature',
choices=['L1', 'L2', 'max_foreachfeature', 'max_maskedforclass', 'max_overall', 'softmax'],
help="The method you want to use to normalize lfs")
parser.add_argument("--lfs_shrinkingfn", type=str, default='squared',
choices=['squared', 'power3', 'exponential'],
help="The method you want to use to shrink the lfs")
parser.add_argument("--lfs_loss_fn_touse", type=str, default='ratio',
choices=['ratio', 'max_minus_ratio', 'lasso', 'entropy'],
help="The loss function you want to use for the lfs")
parser.add_argument("--loss_bgruncertainty", type=float, default=0.,
help="Set this hyperparameter to a value greater than "
"0 to enable background uncertainty loss")
parser.add_argument("--lbu_inverse", action='store_true', default=False,
help="enable inverse on lbu loss")
parser.add_argument("--lbu_mean", action='store_true', default=False,
help="enable lbu_mean on lbu loss")
parser.add_argument("--loss_CIL", type=float, default=0.,
help="Set this hyperparameter to a value greater than "
"0 to enable loss of CIL paper")
parser.add_argument("--feat_dim", type=float, default=2048,
help="Dimensionality of the features space (default: 2048 as in Resnet-101)")
# Validation Options
parser.add_argument("--val_on_trainset", action='store_true', default=False,
help="enable validation on train set (default: False)")
parser.add_argument("--cross_val", action='store_true', default=False,
help="If validate on training or on validation (default: Train)")
parser.add_argument("--crop_val", action='store_false', default=True,
help='do crop for validation (default: True)')
# Logging Options
parser.add_argument("--logdir", type=str, default='./logs',
help="path to Log directory (default: ./logs)")
parser.add_argument("--name", type=str, default='Experiment',
help="name of the experiment - to append to log directory (default: Experiment)")
parser.add_argument("--sample_num", type=int, default=0,
help='number of samples for visualization (default: 0)')
parser.add_argument("--debug", action='store_true', default=False,
help="verbose option")
parser.add_argument("--visualize", action='store_false', default=True,
help="visualization on tensorboard (def: Yes)")
parser.add_argument("--print_interval", type=int, default=10,
help="print interval of loss (default: 10)")
parser.add_argument("--val_interval", type=int, default=15,
help="epoch interval for eval (default: 15)")
parser.add_argument("--ckpt_interval", type=int, default=1,
help="epoch interval for saving model (default: 1)")
# Model Options
parser.add_argument("--backbone", type=str, default='resnet101',
choices=['resnet50', 'resnet101'], help='backbone for the body (def: resnet50)')
parser.add_argument("--output_stride", type=int, default=16,
choices=[8, 16], help='stride for the backbone (def: 16)')
parser.add_argument("--no_pretrained", action='store_true', default=False,
help='Wheather to use pretrained or not (def: True)')
parser.add_argument("--norm_act", type=str, default="iabn_sync",
choices=['iabn_sync', 'iabn', 'abn', 'std'], help='Which BN to use (def: abn_sync')
parser.add_argument("--fusion-mode", metavar="NAME", type=str, choices=["mean", "voting", "max"], default="mean",
help="How to fuse the outputs. Options: 'mean', 'voting', 'max'")
parser.add_argument("--pooling", type=int, default=32,
help='pooling in ASPP for the validation phase (def: 32)')
# Test and Checkpoint options
parser.add_argument("--test", action='store_true', default=False,
help="Whether to train or test only (def: train and test)")
parser.add_argument("--ckpt", default=None, type=str,
help="path to trained model. Leave it None if you want to retrain your model")
# Parameters for Knowledge Distillation of ILTSS (https://arxiv.org/abs/1907.13372)
parser.add_argument("--freeze", action='store_true', default=False,
help="Use this to freeze the feature extractor in incremental steps")
parser.add_argument("--loss_de", type=float, default=0., # Distillation on Encoder
help="Set this hyperparameter to a value greater than "
"0 to enable distillation on Encoder (L2)")
parser.add_argument("--loss_de_maskedold", default=False, action='store_true',
help="If enabled, loss_de is masked to consider only old classes features (default: False)")
parser.add_argument("--loss_de_prototypes", type=float, default=0., # Distillation on Encoder
help="Set this hyperparameter to a value greater than "
"0 to enable loss_de with prototypes (idea 1b)")
parser.add_argument("--loss_de_prototypes_sumafter", action='store_true', default=False,
help="Whether to sum after of average during loss_DE")
parser.add_argument("--loss_de_cosine", action='store_true', default=False,
help="Use cosine similarity ad distillation function on the encoded features")
parser.add_argument("--loss_kd", type=float, default=0., # Distillation on Output
help="Set this hyperparameter to a value greater than "
"0 to enable Knowledge Distillation (Soft-CrossEntropy)")
# Parameters for EWC, RW, and SI (from <NAME> https://arxiv.org/abs/1801.10112)
parser.add_argument("--regularizer", default=None, type=str, choices=['ewc', 'rw', 'pi'],
help="regularizer you want to use. Default is None")
parser.add_argument("--reg_importance", type=float, default=1.,
help="set this par to a value greater than 0 to enable regularization")
parser.add_argument("--reg_alpha", type=float, default=0.9,
help="Hyperparameter for RW and EWC that controls the update of Fisher Matrix")
parser.add_argument("--reg_no_normalize", action='store_true', default=False,
help="If EWC, RW, PI must be normalized or not")
parser.add_argument("--reg_iterations", type=int, default=10,
help="If RW, the number of iterations after each the update of the score is done")
# Arguments for ICaRL (from https://arxiv.org/abs/1611.07725)
parser.add_argument("--icarl", default=False, action='store_true',
help="If enable ICaRL or not (def is not)")
parser.add_argument("--icarl_importance", type=float, default=1.,
help="the regularization importance in ICaRL (def is 1.)")
parser.add_argument("--icarl_disjoint", action='store_true', default=False,
help="Which version of icarl is to use (def: combined)")
parser.add_argument("--icarl_bkg", action='store_true', default=False,
help="If use background from GT (def: No)")
# METHODS
parser.add_argument("--init_balanced", default=False, action='store_true',
help="Enable Background-based initialization for new classes")
parser.add_argument("--unkd", default=False, action='store_true',
help="Enable Unbiased Knowledge Distillation instead of Knowledge Distillation")
parser.add_argument("--alpha", default=1., type=float,
help="The parameter to hard-ify the soft-labels. Def is 1.")
parser.add_argument("--unce", default=False, action='store_true',
help="Enable Unbiased Cross Entropy instead of CrossEntropy")
# Incremental parameters
parser.add_argument("--task", type=str, default="19-1", choices=tasks.get_task_list(),
help="Task to be executed (default: 19-1)")
parser.add_argument("--step", type=int, default=0,
help="The incremental step in execution (default: 0)")
# Consider the dataset as done in
# http://openaccess.thecvf.com/content_ICCVW_2019/papers/TASK-CV/Michieli_Incremental_Learning_Techniques_for_Semantic_Segmentation_ICCVW_2019_paper.pdf
# and https://arxiv.org/pdf/1911.03462.pdf : same as disjoint scenario (default) but with label of old classes in
# new images, if present.
parser.add_argument("--no_mask", action='store_true', default=False,
help="Use this to not mask the old classes in new training set, i.e. use labels of old classes"
" in new training set (if present)")
parser.add_argument("--overlap", action='store_true', default=False,
help="Use this to not use the new classes in the old training set")
parser.add_argument("--step_ckpt", default=None, type=str,
help="path to trained model at previous step. Leave it None if you want to use def path")
parser.add_argument('--opt_level', type=str, choices=['O0', 'O1', 'O2', 'O3'], default='O0')
return parser
|
11573223
|
import warnings
from pathlib import Path
import context # noqa
import typer
from edsnlp.conjugator import conjugate
from edsnlp.pipelines.qualifiers.hypothesis.patterns import verbs_eds, verbs_hyp
from edsnlp.pipelines.qualifiers.negation.patterns import verbs as neg_verbs
from edsnlp.pipelines.qualifiers.reported_speech.patterns import verbs as rspeech_verbs
warnings.filterwarnings("ignore")
def conjugate_verbs(
output_path: Path = typer.Argument(
"edsnlp/resources/verbs.csv", help="Path to the output CSV table."
)
) -> None:
"""
Convenience script to automatically conjugate a set of verbs,
using mlconjug3 library.
"""
all_verbs = set(neg_verbs + rspeech_verbs + verbs_eds + verbs_hyp)
typer.echo(f"Conjugating {len(all_verbs)} verbs...")
df = conjugate(list(all_verbs))
typer.echo(f"Saving to {output_path}")
output_path.parent.mkdir(exist_ok=True, parents=True)
df.to_csv(output_path, index=False)
typer.echo("Done !")
if __name__ == "__main__":
typer.run(conjugate_verbs)
|
11573229
|
from __future__ import annotations
from typing import List, Dict
from reamber.algorithms.playField import PlayField
from reamber.algorithms.playField.parts.PFDrawable import PFDrawable
from reamber.base.RAConst import RAConst
class PFDrawBeatLines(PFDrawable):
def __init__(self,
divisions: List = None,
default_color: str = "#666666",
division_colors: Dict = None):
""" Draws beat lines by division specified
Supported Default Snap Colors: 1, 2, 3, 4, 5, 6, 8, 12, 16, 24, 32.
The colors can be found in RAConst.
You can specify non-default snaps (including floats) and your custom divisionColors.
The new colors will override the default colors if they overlap.
:param divisions: Defaults to [1, 2, 4], will draw 1/1, 1/2, 1/4 lines in the field
:param default_color: Default color to use when the snap color is not supported.
:param division_colors: A custom color dictionary to use. This can be specified if you want to override colors. \
A template can be found in RAConst.DIVISION_COLOR.
"""
self.divisions = divisions if divisions else [1, 2, 4] # Default divisions
self.default_color = default_color
# The unpacking operator resolves the dictionary as specified in the docstring
self.division_colors = \
{**RAConst.DIVISION_COLORS, **division_colors} if division_colors else RAConst.DIVISION_COLORS
def draw(self, pf: PlayField) -> PlayField:
""" Refer to __init__ """
# Need to draw it from most common to least common, else it'll overlap incorrectly
for division in sorted(self.divisions, reverse=True):
if division not in self.division_colors.keys():
color = self.default_color # Default color if val not found
else:
color = self.division_colors[division]
for beat in pf.m.bpms.snap_offsets(nths=division, last_offset=pf.m.stack().offset.max()):
pf.canvas_draw.line([pf.get_pos(beat),
pf.get_pos(beat, pf.keys)],
# [(0, pf.canvasH - int((beat - pf.start) / pf.durationPerPx)),
# (pf.canvasW - pf.padding, pf.canvasH - int((beat - pf.start) / pf.durationPerPx))],
fill=color)
return pf
|
11573230
|
from torch.utils.data import Dataset
import torch
import config
import numpy as np
from scipy.stats import norm
from tqdm import tqdm
class DKTDataset(Dataset):
def __init__(self, group, max_seq, min_seq, overlap_seq, user_performance, n_levels, mu_itv):
self.samples = group
self.max_seq = max_seq
self.min_seq = min_seq
self.overlap_seq = overlap_seq
self.user_performance = user_performance
self.data = []
self.n_levels = n_levels
self.mu_itv = mu_itv
self.mu_levels, self.std_levels = self._fit_norm(user_performance)
for user_id, (exercise, part, correctness, elapsed_time, lag_time_s, lag_time_m, lag_time_d, p_explanation) in tqdm(self.samples.items(), total=len(self.samples), desc="Loading Dataset"):
content_len = len(exercise)
if content_len < self.min_seq:
continue # skip sequence with too few contents
if content_len > self.max_seq:
initial = content_len % self.max_seq
if initial >= self.min_seq:
self.data.extend([(user_id, np.append([config.START], exercise[:initial]),
np.append([config.START], part[:initial]),
np.append([config.START], correctness[:initial]),
np.append([config.START], elapsed_time[:initial]),
np.append([config.START], lag_time_s[:initial]),
np.append([config.START], lag_time_m[:initial]),
np.append([config.START], lag_time_d[:initial]),
np.append([config.START], p_explanation[:initial]))])
for seq in range(content_len // self.max_seq):
start = initial + seq * self.max_seq
end = initial + (seq + 1) * self.max_seq
self.data.extend([(user_id, np.append([config.START], exercise[start: end]),
np.append([config.START], part[start: end]),
np.append([config.START], correctness[start: end]),
np.append([config.START], elapsed_time[start: end]),
np.append([config.START], lag_time_s[start: end]),
np.append([config.START], lag_time_m[start: end]),
np.append([config.START], lag_time_d[start: end]),
np.append([config.START], p_explanation[start: end]))])
else:
self.data.extend([(user_id, np.append([config.START], exercise),
np.append([config.START], part),
np.append([config.START], correctness),
np.append([config.START], elapsed_time),
np.append([config.START], lag_time_s),
np.append([config.START], lag_time_m),
np.append([config.START], lag_time_d),
np.append([config.START], p_explanation))])
def _fit_norm(self, user_perf):
data = [d for d in user_perf.values()]
mu, std = norm.fit(data)
mu_levels = [mu - (self.n_levels - 1) * self.mu_itv / 2 + i * self.mu_itv for i in range(self.n_levels)]
std_levels = [np.sqrt(std ** 2 / self.n_levels) for _ in range(self.n_levels)]
return mu_levels, std_levels
def _predict_level(self, user_perf, mu_levels, std_levels):
probs = []
for mu, std in zip(mu_levels, std_levels):
probs.append(norm.pdf(user_perf, mu, std))
probs = np.array(probs)
probs = probs / sum(probs)
return probs
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
raw_user_id, raw_content_ids, raw_part, raw_correctness, raw_elapsed_time, raw_lag_time_s, raw_lag_time_m, raw_lag_time_d, raw_p_explan = self.data[idx]
if raw_user_id in self.user_performance:
user_per = self.user_performance[raw_user_id]
probs = self._predict_level(user_per, self.mu_levels, self.std_levels)
else:
probs = np.ones(len(self.mu_levels))
probs /= len(self.mu_levels)
seq_len = len(raw_content_ids)
input_content_ids = np.zeros(self.max_seq, dtype=np.int64)
input_part = np.zeros(self.max_seq, dtype=np.int64)
input_correctness = np.zeros(self.max_seq, dtype=np.int64)
input_elapsed_time = np.zeros(self.max_seq, dtype=np.int64)
input_lag_time_s = np.zeros(self.max_seq, dtype=np.int64)
input_lag_time_m = np.zeros(self.max_seq, dtype=np.int64)
input_lag_time_d = np.zeros(self.max_seq, dtype=np.int64)
input_p_explan = np.zeros(self.max_seq, dtype=np.int64)
label = np.zeros(self.max_seq, dtype=np.int64)
if seq_len == self.max_seq + 1: # START token
input_content_ids[:] = raw_content_ids[1:]
input_part[:] = raw_part[1:]
input_p_explan[:] = raw_p_explan[1:]
input_correctness[:] = raw_correctness[:-1]
input_elapsed_time[:] = np.append(raw_elapsed_time[0], raw_elapsed_time[2:])
input_lag_time_s[:] = np.append(raw_lag_time_s[0], raw_lag_time_s[2:])
input_lag_time_m[:] = np.append(raw_lag_time_m[0], raw_lag_time_m[2:])
input_lag_time_d[:] = np.append(raw_lag_time_d[0], raw_lag_time_d[2:])
label[:] = raw_correctness[1:] - 2
else:
input_content_ids[-(seq_len - 1):] = raw_content_ids[1:] # Delete START token
input_part[-(seq_len - 1):] = raw_part[1:]
input_p_explan[-(seq_len - 1):] = raw_p_explan[1:]
input_correctness[-(seq_len - 1):] = raw_correctness[:-1]
input_elapsed_time[-(seq_len - 1):] = np.append(raw_elapsed_time[0], raw_elapsed_time[2:])
input_lag_time_s[-(seq_len - 1):] = np.append(raw_lag_time_s[0], raw_lag_time_s[2:])
input_lag_time_m[-(seq_len - 1):] = np.append(raw_lag_time_m[0], raw_lag_time_m[2:])
input_lag_time_d[-(seq_len - 1):] = np.append(raw_lag_time_d[0], raw_lag_time_d[2:])
label[-(seq_len - 1):] = raw_correctness[1:] - 2
_input = {"content_id": input_content_ids.astype(np.int64),
"part": input_part.astype(np.int64),
"correctness": input_correctness.astype(np.int64),
"elapsed_time": input_elapsed_time.astype(np.int64),
"lag_time_s": input_lag_time_s.astype(np.int64),
"lag_time_m": input_lag_time_m.astype(np.int64),
"lag_time_d": input_lag_time_d.astype(np.int64),
"prior_explan": input_p_explan.astype(np.int64)}
return _input, label, probs
|
11573233
|
import os
import sys
import copy
import logging
from checker import *
from .ofp import register_ofp_creators
from .ofp import OfpBase
from .ofp_match import SCE_MATCH
from .ofp_match import OfpMatchCreator
from .ofp_instruction import SCE_INSTRUCTIONS
from .ofp_instruction import OfpInstructionCreator
# YAML:
# flow_stats:
# cookie: 0
# match:
# in_port: 1
# eth_dst: "ff:ff:ff:ff:ff:ff"
# instructions:
# - apply_actions:
# actions:
# - output:
# port: 0
SCE_FLOW_STATS = "flow_stats"
class OfpFlowStatsCreator(OfpBase):
@classmethod
def create(cls, test_case_obj, dp, ofproto, ofp_parser, params):
# FlowStats.
kws = copy.deepcopy(params)
# match.
match = None
if SCE_MATCH in params:
match = OfpMatchCreator.create(test_case_obj,
dp, ofproto,
ofp_parser,
params[SCE_MATCH])
kws[SCE_MATCH] = match
# instructions.
instructions = []
if SCE_INSTRUCTIONS in params:
instructions = OfpInstructionCreator.create(test_case_obj,
dp, ofproto,
ofp_parser,
params[SCE_INSTRUCTIONS])
kws[SCE_INSTRUCTIONS] = instructions
# create FlowStats.
msg = ofp_parser.OFPFlowStats(**kws)
msg._set_targets(["table_id", "priority",
"idle_timeout", "hard_timeout", "flags",
"cookie", "packet_count", "byte_count",
"match", "instructions"])
return msg
|
11573245
|
class MockSerial(object):
def __init__(self, response):
self.response = response
self.inputs = []
def write(self, message):
self.inputs.append(message)
def readlines(self):
return self.response
|
11573277
|
class Solution:
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
res = 0
m, n = len(grid), len(grid[0])
dirs = [(1,0),(-1,0),(0,1),(0,-1)]
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
stack = []
stack.append((i,j))
grid[i][j] = '#'
cur_area = 1
while stack:
x, y = stack.pop()
for dx, dy in dirs:
nx = dx + x
ny = dy + y
if 0<=nx<m and 0<=ny<n and grid[nx][ny] == 1:
cur_area += 1
stack.append((nx,ny))
grid[nx][ny] = '#'
res = max(res, cur_area)
return res
|
11573290
|
from contextlib import contextmanager
from mock import MagicMock
from mock import patch
import os
import chainer
from chainerrl.agents import a3c
import chainerrl.distribution
import gym
import numpy
import pytest
from chainerrl_visualizer import launch_visualizer
import chainerrl_visualizer.launcher as launcher
@contextmanager
def change_execution_dir(path):
cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(cwd)
# argument must be a subclass of Agent, using A3c is no particular meaning.
class MockAgent(MagicMock, a3c.A3C):
def __init__(self, *args, **kwargs):
super(MagicMock, self).__init__(*args, **kwargs)
@pytest.mark.parametrize('outs', [
(
chainer.Variable(numpy.zeros(1).reshape(1, 1)),
chainerrl.distribution.SoftmaxDistribution(chainer.Variable(numpy.zeros(2).reshape(1, 2)))
), (
chainerrl.action_value.DiscreteActionValue(chainer.Variable(numpy.ones(2).reshape(1, 2)))
)
])
def test_launch_visualizer(tmpdir, outs):
agent = MockAgent()
agent.model = MagicMock(side_effect=lambda *args: outs)
gymlike_env = MagicMock(spec=gym.Env)
action_meanings = {0: 'hoge', 1: 'fuga'}
# This assertion checks the instances is called correctly.
# In the target luncher function, the instance is called from forked process internally,
# So cannot use `assert_called` and shared values.
# It's not smart, and address with touch file to check called or not.
websrv_called_touch = os.path.join(tmpdir, 'websrv_called.log')
worker_called_touch = os.path.join(tmpdir, 'worker_called.log')
def assert_server_called(*args):
assert len(args) == 12
assert id(args[0]) == id(agent)
assert id(args[1]) == id(gymlike_env)
open(websrv_called_touch, 'w').close()
web_server = MagicMock(side_effect=assert_server_called)
def assert_worker_called(*args):
assert len(args) == 6
assert id(args[0]) == id(agent)
assert id(args[1]) == id(gymlike_env)
open(worker_called_touch, 'w'). close()
job_worker = MagicMock(side_effect=assert_worker_called)
webbrowser = MagicMock()
webbrowser.open_new_tab = MagicMock()
with change_execution_dir(tmpdir):
with patch('chainerrl_visualizer.launcher.modify_gym_env_render') as modify_gymenv, \
patch('chainerrl_visualizer.launcher.web_server', web_server), \
patch('chainerrl_visualizer.launcher.job_worker', job_worker), \
patch('chainerrl_visualizer.launcher.webbrowser', webbrowser):
launch_visualizer(agent, gymlike_env, action_meanings)
modify_gymenv.assert_called_once()
assert os.path.exists(websrv_called_touch)
assert os.path.exists(worker_called_touch)
def test_launch_visualizer_canceled(tmpdir):
agent = MockAgent()
gymlike_env = MagicMock()
action_meanings = {0: 'hoge', 1: 'fuga'}
os.makedirs(os.path.join(tmpdir, 'log_space'))
with change_execution_dir(tmpdir):
with patch('chainerrl_visualizer.launcher.input', side_effect='n'), \
patch('chainerrl_visualizer.launcher.inspect_agent') as inspect_agent:
launch_visualizer(agent, gymlike_env, action_meanings)
inspect_agent.assert_not_called()
@pytest.mark.parametrize('act_list,err_kw', [
((1, 'a'), 'to be dictionary'),
({}, 'number of entries'),
({0: 'a', 2: 'b'}, 'Invalid key index')
])
def test_validate_action_meanings(act_list, err_kw):
with pytest.raises(Exception) as excinfo:
launcher.validate_action_meanings(act_list)
assert err_kw in str(excinfo.value)
def test_modify_gym_env_render():
gym_env, render = MagicMock(), MagicMock()
gym_env.render = render
launcher.modify_gym_env_render(gym_env)
gym_env.render()
render.assert_called_once_with(mode='rgb_array')
@pytest.mark.parametrize(
'input,expected', [('y', True), ('Y', True), ('n', False), ('N', False), ('x', False)])
def test_prepare_log_directory_existed(tmpdir, input, expected):
log_dir = os.path.join(tmpdir, 'log_space', 'rollouts')
os.makedirs(log_dir)
with patch('chainerrl_visualizer.launcher.input', side_effect=input):
prepared = launcher.prepare_log_directory(log_dir)
assert prepared == expected
class UnsupportedDistribution(chainerrl.distribution.SoftmaxDistribution):
def __init__(self):
pass
class UnsupportedActionValue(chainerrl.action_value.DiscreteActionValue):
def __init__(self):
pass
@pytest.mark.parametrize('outs,err_kw', [
(numpy.zeros(1), 'Model output type of ndarray'),
(chainer.Variable(numpy.zeros(1)), 'Outputs of model do not contain'),
(UnsupportedDistribution(), 'Distribution type UnsupportedDistribution'),
(UnsupportedActionValue(), 'ActionValue type UnsupportedActionValue')
])
def test_inspect_agent(outs, err_kw):
agent = MagicMock()
agent.model = MagicMock(side_effect=lambda *args: outs)
gymlike_env = MagicMock()
contains_rnn = MagicMock()
with pytest.raises(Exception) as excinfo:
launcher.inspect_agent(agent, gymlike_env, contains_rnn)
assert err_kw in str(excinfo.value)
def test_inspect_agent_reccurrent_model():
# in this test case, agent, model and outs are nonsense scheme
# to check only error handling logic
outs = chainerrl.action_value.DiscreteActionValue(chainer.Variable(numpy.ones(2).reshape(1, 2)))
agent = MagicMock()
# to check working the function if agent does not have 'xp' attribute
# if process of agent check is more than one, this case should be separated into another one.
delattr(agent, 'xp')
agent.model = MagicMock(spec=chainerrl.recurrent.RecurrentChainMixin, return_value=outs)
gymlike_env = MagicMock()
contains_rnn = MagicMock()
launcher.inspect_agent(agent, gymlike_env, contains_rnn)
|
11573345
|
from __future__ import absolute_import, division, print_function
import logging
import sys
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
import numpy as np
import mdtraj as md
from demystifying import feature_extraction as fe, visualization, traj_preprocessing as tp
logger = logging.getLogger("VSD")
def run_VSD(working_dir="bio_input/VSD/", cluster_for_prediction=None, dt_for_prediction=10, multiclass=False):
data = np.load(working_dir + 'frame_i_j_contacts_dt1.npy')
cluster_indices = np.loadtxt(working_dir + 'clusters_indices.dat')
kwargs = {
'samples': data,
'labels': cluster_indices,
'filter_by_distance_cutoff': True,
'use_inverse_distances': True,
'n_splits': 3,
'n_iterations': 5,
'scaling': True,
'shuffle_datasets': True
}
if cluster_for_prediction is not None:
cluster_traj = md.load("{}/{}_dt{}.xtc".format(working_dir, cluster_for_prediction, dt_for_prediction),
top=working_dir + "alpha.pdb")
other_samples, _, _ = tp.to_distances(
traj=cluster_traj,
scheme="closest-heavy",
pairs="all-residues",
use_inverse_distances=True,
ignore_nonprotein=True,
periodic=True)
logger.debug("Loaded cluster samples for prediction of shape %s for state %s", other_samples.shape,
cluster_for_prediction)
cluster_traj = None # free memory
else:
other_samples = False
feature_extractors = [
fe.RandomForestFeatureExtractor(
classifier_kwargs={
'n_estimators': 100},
one_vs_rest=not multiclass,
**kwargs),
fe.KLFeatureExtractor(bin_width=0.1, **kwargs),
fe.MlpFeatureExtractor(
classifier_kwargs={
'hidden_layer_sizes': [100, ],
'max_iter': 100000,
'alpha': 0.0001},
activation="relu",
one_vs_rest=not multiclass,
per_frame_importance_samples=other_samples,
per_frame_importance_labels=None, # If None the method will use predicted labels for LRP
per_frame_importance_outfile="{}/mlp_perframe_importance_{}/"
"VSD_mlp_perframeimportance_{}_dt{}.txt".format(working_dir,
"multiclass" if multiclass else "binaryclass",
cluster_for_prediction,
dt_for_prediction),
**kwargs)
]
common_peaks = {
"R1-R4": [294, 297, 300, 303],
"K5": [306],
"R6": [309],
}
do_computations = True
filetype = "svg"
for extractor in feature_extractors:
logger.info("Computing relevance for extractors %s", extractor.name)
extractor.extract_features()
p = extractor.postprocessing(working_dir=working_dir,
pdb_file=working_dir + "alpha.pdb",
filter_results=False)
if do_computations:
p.average()
p.evaluate_performance()
p.persist()
else:
p.load()
visualization.visualize([[p]],
show_importance=True,
show_performance=False,
show_projected_data=False,
highlighted_residues=common_peaks,
outfile=working_dir + "{extractor}/importance_per_residue_{suffix}.{filetype}".format(
suffix="",
extractor=extractor.name,
filetype=filetype))
if do_computations:
visualization.visualize([[p]],
show_importance=False,
show_performance=True,
show_projected_data=False,
outfile=working_dir + "{extractor}/performance_{suffix}.{filetype}".format(
extractor=extractor.name,
suffix="",
filetype=filetype))
visualization.visualize([[p]],
show_importance=False,
show_performance=False,
show_projected_data=True,
outfile=working_dir + "{extractor}/projected_data_{suffix}.{filetype}".format(
extractor=extractor.name,
suffix="",
filetype=filetype))
logger.info("Done")
if __name__ == "__main__":
run_VSD(cluster_for_prediction="gamma")
|
11573383
|
import logging
from sciencebeam_parser.models.training_data import (
AbstractTeiTrainingDataGenerator
)
LOGGER = logging.getLogger(__name__)
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/TableParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = ['text', 'figure[@type="table"]']
# Note:
# The table training data generation is different to figures in
# how the following labels are mapped: `content`, `other`, `note`
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<figure_head>': ROOT_TRAINING_XML_ELEMENT_PATH + ['head'],
'<label>': ROOT_TRAINING_XML_ELEMENT_PATH + ['head', 'label'],
'<figDesc>': ROOT_TRAINING_XML_ELEMENT_PATH + ['figDesc'],
'<content>': ROOT_TRAINING_XML_ELEMENT_PATH + ['table'],
'<other>': ROOT_TRAINING_XML_ELEMENT_PATH + ['other'],
'<note>': ROOT_TRAINING_XML_ELEMENT_PATH + ['note']
}
class TableTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.table.tei.xml'
DEFAULT_DATA_FILENAME_SUFFIX = '.table'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
use_tei_namespace=False,
root_tag='tei',
default_tei_filename_suffix=(
TableTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=(
TableTeiTrainingDataGenerator.DEFAULT_DATA_FILENAME_SUFFIX
),
default_tei_sub_directory='table/corpus/tei',
default_data_sub_directory='table/corpus/raw'
)
|
11573398
|
from model_bakery import baker
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from restaurants.models import Restaurant
from .models import User
email = '<EMAIL>'
password = '<PASSWORD>'
nickname = 'nickname'
class UserRegisterTestCase(APITestCase):
url = '/users'
def test_should_create(self):
data = {
'email': email,
'password': password,
'nickname': None
}
response = self.client.post(self.url, data)
r = response.data
self.assertEqual(response.status_code, status.HTTP_201_CREATED, r)
self.assertEqual(r['email'], data['email'])
self.assertEqual(r['nickname'], data['nickname'])
self.assertFalse(User.objects.get(id=r['id']).is_active)
def test_without_email(self):
response = self.client.post(self.url, {'email': '', 'password': password})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_email_format(self):
wrong_email = 'wrong@format'
response = self.client.post(self.url, {'email': wrong_email, 'password': password})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_without_password(self):
response = self.client.post(self.url, {'email': email, 'password': ''})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_with_duplicated_email(self):
duplicated_email = '<EMAIL>'
self.user = baker.make(User, email=duplicated_email, password=password)
response = self.client.post(self.url, {'email': duplicated_email, 'password': password})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class UserRetrieveTestCase(APITestCase):
def setUp(self) -> None:
self.user = baker.make('users.User')
baker.make('users.Profile', user=self.user)
def test_success(self):
self.client.force_authenticate(user=self.user)
response = self.client.get(f'/users/{self.user.id}')
r = response.data
self.assertEqual(response.status_code, status.HTTP_200_OK, r)
self.assertEqual(self.user.profile.nickname, r['nickname'])
self.assertEqual(self.user.profile.phone_num, r['phone_num'])
self.assertEqual(self.user.email, r['email'])
class UserAuthorizePhoneNumTestCase(APITestCase):
def setUp(self) -> None:
self.user = baker.make('users.User', is_active=False)
baker.make('users.Profile', user=self.user)
self.data = {
'phone_num': '010-1111-1111'
}
self.url = f'/users/{self.user.id}/authorize_phone_num'
def test_success(self):
response = self.client.patch(self.url, data=self.data)
r = response.data
self.assertEqual(response.status_code, status.HTTP_200_OK, r)
authorizde_user = User.objects.get(id=r['id'])
self.assertEqual(self.data['phone_num'], r['phone_num'])
self.assertEqual(authorizde_user.profile.phone_num, r['phone_num'])
self.assertTrue(authorizde_user.is_active)
self.assertEqual(self.user.email, r['email'])
self.assertEqual(self.user.profile.nickname, r['nickname'])
# def test_fail_401(self):
# response = self.client.patch(self.url, data=self.data)
# self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED, response.data)
#
# def test_fail_403(self):
# self.client.force_authenticate(user=baker.make('users.User'))
# response = self.client.patch(self.url, data=self.data)
# self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data)
class UserUpdatePasswordTestCase(APITestCase):
def setUp(self) -> None:
self.old_password = '<PASSWORD>'
self.new_password = '<PASSWORD>'
self.user = baker.make('users.User', email=email, password=self.old_password)
baker.make('users.Profile', user=self.user)
self.data = {'password': <PASSWORD>}
self.url = f'/users/{self.user.id}/update_password'
def test_success(self):
self.client.force_authenticate(user=self.user)
response = self.client.patch(self.url, data=self.data)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
response = self.client.post('/users/login', {'email': email, 'password': <PASSWORD>})
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_fail_401(self):
response = self.client.patch(self.url, data=self.data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED, response.data)
def test_fail_403(self):
self.client.force_authenticate(user=baker.make('users.User'))
response = self.client.patch(self.url, data=self.data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data)
class UserUpdateNicknameTestCase(APITestCase):
def setUp(self) -> None:
self.old_nickname = 'old'
self.new_nickname = 'new'
self.user = baker.make('users.User')
baker.make('users.Profile', user=self.user, nickname=self.old_nickname)
self.data = {'nickname': self.new_nickname}
def test_success(self):
self.client.force_authenticate(user=self.user)
response = self.client.patch(f'/users/{self.user.id}', data=self.data)
r = response.data
self.assertEqual(response.status_code, status.HTTP_200_OK, r)
def test_fail_put(self):
self.client.force_authenticate(user=self.user)
response = self.client.put(f'/users/{self.user.id}', data=self.data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED, response.data)
def test_fail_401(self):
response = self.client.patch(f'/users/{self.user.id}', data=self.data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED, response.data)
def test_fail_403(self):
self.client.force_authenticate(user=baker.make('users.User'))
response = self.client.patch(f'/users/{self.user.id}', data=self.data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data)
class UserLoginTestCase(APITestCase):
url = '/users/login'
def setUp(self) -> None:
self.user = baker.make('users.User', email=email, password=password)
def test_with_correct_info(self):
response = self.client.post(self.url, {'email': email, 'password': password})
r = response.data
self.assertEqual(response.status_code, status.HTTP_201_CREATED, r)
self.assertTrue('token' in r)
self.assertTrue(Token.objects.filter(user=self.user, key=r['token']).exists())
def test_without_password(self):
response = self.client.post(self.url, {'email': email})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_with_wrong_password(self):
response = self.client.post(self.url, {'email': email, 'password': '<PASSWORD>'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_without_email(self):
response = self.client.post(self.url, {'password': password})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_with_wrong_email(self):
response = self.client.post(self.url, {'email': '<EMAIL>', 'password': password})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class UserLogoutTestCase(APITestCase):
url = '/users/logout'
def setUp(self) -> None:
self.user = baker.make('users.User', email=email, password=password)
self.token = baker.make(Token, user=self.user)
def test_should_delete_token(self):
self.client.force_authenticate(user=self.user, token=self.token)
response = self.client.delete(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertFalse(Token.objects.filter(user_id=self.user.id).exists())
def test_should_denied_delete_token(self):
response = self.client.delete(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertTrue(Token.objects.filter(user_id=self.user.id).exists())
class BookmarkListTest(APITestCase):
def setUp(self) -> None:
self.users = baker.make('users.User', _quantity=2)
baker.make('users.Bookmark', user=self.users[0])
baker.make('users.Bookmark', user=self.users[1])
def test_success(self):
self.client.force_authenticate(user=self.users[0])
response = self.client.get('/bookmarks')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
for r in response.data['results']:
self.assertTrue(Restaurant.objects.filter(id=r['id'], bookmark__user=self.users[0]).exists())
owner_comment_count = r['owner_comment_count']
self.assertTrue(owner_comment_count != 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.