id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11474313
|
import numpy as np
class ParticlesFilter:
def __init__(self, environment_config, robot_initial_config, predictor, params):
"""
Initialize the particles filter parameters.
:param environment_config: EnvironmentConfiguration
:param robot_initial_config: RobotConfiguration
:param predictor: PredictorNNSensorsNotNormalized
:param params: Params
"""
# params received from --cli execution
self.params = params
# media
self.mu = 0
# standard deviation for translation
self.sigma_xy = self.params.SIGMA_XY
# standard deviation for rotation
self.sigma_theta = self.params.SIGMA_THETA
# environment configuration parameters
self.environment_config = environment_config
# robot initial state
self.robot_previous_config = robot_initial_config
# number of particles: 300,500,1000, etc...
self.number_of_particles = self.params.PARTICLES_NUMBER
# this object contains the trained models
self.predictor = predictor
# weights or importance factors for M particles
self.weights = [1/self.number_of_particles] * self.number_of_particles
# initial state for all the particles
if not self.params.GLOBAL_LOCALIZATION:
x = np.random.normal(robot_initial_config.x, self.sigma_xy, self.number_of_particles)
y = np.random.normal(robot_initial_config.y, self.sigma_xy, self.number_of_particles)
theta = np.random.normal(robot_initial_config.theta, self.sigma_theta, self.number_of_particles)
else:
x = np.random.uniform(0, environment_config.environment_dim_x, self.number_of_particles)
y = np.random.uniform(0, environment_config.environment_dim_y, self.number_of_particles)
theta = np.random.uniform(0, 360, self.number_of_particles)
# define the set of particles as [x, y, theta, weights]
self.particles = np.array([x, y, theta, self.weights])
def reset_particles(self, number_particles, sigma_xy, sigma_theta, position):
"""
This function is useful for changing the algorithm parameters while running the simulation.
It resets the number of particles and the noise.
:param number_particles: int
:param sigma_xy: float
:param sigma_theta: float
:param position: RobotConfiguration
:return: new brand new set of particles [x, y, theta, weights]
"""
# reset parameters
self.number_of_particles = int(number_particles)
self.sigma_xy = float(sigma_xy)
self.sigma_theta = float(sigma_theta)
# reset weight
self.weights = [1/self.number_of_particles for i in range(self.number_of_particles)]
# reset particles state
x = np.random.normal(position.x, self.sigma_xy, self.number_of_particles)
y = np.random.normal(position.y, self.sigma_xy, self.number_of_particles)
theta = np.random.normal(position.theta, self.sigma_theta, self.number_of_particles)
self.particles = np.array([x, y, theta, self.weights])
def get_particles(self, delta_movement, sensors, resampling=True):
"""
This is the CORE of the particles filter algorithm.
It generates a new set of particles
given the control action delta_movement, the sensors
measurements and it applies resampling
in the case that resampling parameter is set to true.
:param delta_movement: Array containing the
control action [d_x, d_y, d_theta]
:param sensors: Array containing sensor measurements
:param resampling: Boolean if true it applies resampling
:return: [x, y, theta, self.weights] set of particles
"""
# move the particles
self._apply_movement_to(self.particles, delta_movement)
particle_transpose = self.particles[0:3].transpose()
# calculate weights based on the models and the sensor measurements
self.particles[3], bad_data = self.predictor.get_particles_weight(particle_transpose,
sensors)
# normalize weights
self.normalize_weights()
# resampling based on weights
if resampling:
indexes = range(0, self.number_of_particles)
indexes = np.random.choice(indexes,
self.number_of_particles,
p=self.particles[3],
replace=True)
self.particles[0:3] = particle_transpose[indexes].transpose()
return self.particles
def normalize_weights(self):
"""
Normalize weights to sum up to 1
"""
self.particles[3] = self.particles[3] / self.particles[3].min()
self.particles[3] = self.particles[3] / self.particles[3].sum()
def _apply_movement_to(self, particles, delta_robot_config):
"""
Apply the control action delta_robot_config to all the particles + some noise.
:param particles: [x, y, theta, weight]
:param delta_robot_config: [x, y, theta]
"""
# get noise
noise_x = np.random.normal(self.mu, self.sigma_xy, self.number_of_particles)
noise_y = np.random.normal(self.mu, self.sigma_xy, self.number_of_particles)
noise_theta = np.random.normal(self.mu, self.sigma_theta, self.number_of_particles)
# move particles
particles[0] += delta_robot_config[0] + noise_x
particles[1] += delta_robot_config[1] + noise_y
particles[2] += delta_robot_config[2] + noise_theta
# clip the positioning to not be outside the arena or grater than 360 degrees on orientation
particles[0] = np.clip(particles[0], 0, self.environment_config.environment_dim_x)
particles[1] = np.clip(particles[1], 0, self.environment_config.environment_dim_y)
particles[2] = (360 + particles[2]) % 360
|
11474360
|
from faster_than_requests import scraper
print(scraper(["https://nim-lang.org", "https://nim-lang.org"], html_tag="h1", case_insensitive=False, deduplicate_urls=False, threads=False))
|
11474361
|
from ....widgets import *
class USPhoneNumberInput(InputMask):
mask = {
'mask': '999-999-9999',
}
class USSocialSecurityNumberInput(InputMask):
mask = {
'mask': '999-99-9999',
}
class USZipCodeInput(InputMask):
mask = {
'mask': '99999-9999',
}
class USDecimalInput(DecimalInputMask):
thousands_sep = ','
decimal_sep = '.'
|
11474371
|
from constants import load_model
from model import load_gen_model, setUpModel
if __name__ == '__main__':
if load_model:
load_gen_model()
else:
setUpModel()
|
11474405
|
import torch
def convert_pytcv_model(model,model_pytcv):
sd=model.state_dict()
sd_pytcv=model_pytcv.state_dict()
convert_dict={}
for key,key_pytcv in zip(sd.keys(),sd_pytcv.keys()):
clean_key='.'.join(key.split('.')[:-1])
clean_key_pytcv='.'.join(key_pytcv.split('.')[:-1])
convert_dict[clean_key]=clean_key_pytcv
if sd[key].shape != sd_pytcv[key_pytcv].shape:
print(key,sd[key].shape,key_pytcv,sd_pytcv[key_pytcv].shape)
import pdb; pdb.set_trace()
else:
sd[key].copy_(sd_pytcv[key_pytcv])
return model
|
11474430
|
import random
a=random.randint(1,101)
print("let's start game. press enter")
print("you have only 5 chances to win the game")
b=int(input("enter the 1 no.- "))
if(b>a):
print("the no. you entered is greater the required no.")
elif(b<a):
print("the no. you entered is less thanthe required no. ")
elif(b==a):
print("you chooses the right no.")
c=int(input("enter the 2 no.- "))
if(c>a):
print("the no. you entered is greater the required no.")
elif(c<a):
print("the no. you entered is less thanthe required no. ")
elif(c==a):
print("you chooses the right no.")
d=int(input("enter the 1 no.- "))
if(d>a):
print("the no. you entered is greater the required no.")
elif(d<a):
print("the no. you entered is less thanthe required no. ")
elif(d==a):
print("you chooses the right no.")
e=int(input("enter the 1 no.- "))
if(e>a):
print("the no. you entered is greater the required no.")
elif(e<a):
print("the no. you entered is less thanthe required no. ")
elif(e==a):
print("you chooses the right no.")
f=int(input("enter the 1 no.- "))
if(f>a):
print("the no. you entered is greater the required no. you lost the game ")
elif(f<a):
print("the no. you entered is less thanthe required no. you lost the game ")
elif(f==a):
print("you chooses the right no.")
|
11474436
|
from os_dbnetget.utils import binary_stdout
from os_dbnetget.commands.qdb import QDB
from os_dbnetget.commands.qdb.get.processor import Processor
class Get(QDB):
HELP = 'get data from qdb'
DESCRIPTION = 'Get data from qdb'
def __init__(self, config=None):
super(Get, self).__init__(config)
self.config.cmd = 'get'
self.config.processor = Processor(self.config)
def description(self):
return 'Get data from qdb\n engine: [%s]' % self.ENGINE_NAME
def process_arguments(self, args):
super(Get, self).process_arguments(args)
output = None
if args.output is None:
output = binary_stdout
else:
if not hasattr(args, 'output_type'):
output = open(args.output, 'ab')
elif args.output_type == 'single':
output = open(args.output, 'ab')
elif args.output_type == 'rotate':
from os_rotatefile import open_file
output = open_file(args.output, 'w')
self.config.output = output
def add_arguments(self, parser):
super(Get, self).add_arguments(parser)
parser.add_argument('-o', '--output',
help='output file (default: stdout)',
nargs='?',
dest='output',
)
try:
import os_rotatefile
parser.add_argument('-t', '--output-type',
help='output file type (default: single)',
choices=('single', 'rotate'),
default='single',
dest='output_type',
)
except:
pass
def run(self, args):
try:
super(Get, self).run(args)
finally:
try:
self.config.output.close()
except:
pass
|
11474458
|
import argparse
import chainer
from chainer import iterators
import chainermn
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
from eval_semantic_segmentation import models
from eval_semantic_segmentation import setup
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset', choices=('cityscapes', 'ade20k', 'camvid', 'voc'))
parser.add_argument('--model', choices=sorted(models.keys()))
parser.add_argument('--pretrained-model')
parser.add_argument('--batchsize', type=int)
parser.add_argument('--input-size', type=int, default=None)
args = parser.parse_args()
comm = chainermn.create_communicator('pure_nccl')
device = comm.intra_rank
dataset, eval_, model, batchsize = setup(
args.dataset, args.model, args.pretrained_model,
args.batchsize, args.input_size)
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()
if not comm.rank == 0:
apply_to_iterator(model.predict, None, comm=comm)
return
it = iterators.MultithreadIterator(
dataset, batchsize * comm.size, repeat=False, shuffle=False)
in_values, out_values, rest_values = apply_to_iterator(
model.predict, it, hook=ProgressHook(len(dataset)), comm=comm)
# Delete an iterator of images to save memory usage.
del in_values
eval_(out_values, rest_values)
if __name__ == '__main__':
main()
|
11474488
|
import unittest
from katas.beta.sum_of_all_arguments import sum_all
class SumAllTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(sum_all(6, 2, 3), 11)
def test_equals_2(self):
self.assertEqual(sum_all(756, 2, 1, 10), 769)
def test_equals_3(self):
self.assertEqual(sum_all(76856, -32, 1981, 1076), 79881)
def test_equals_4(self):
self.assertEqual(sum_all(7, -3452, 1981, 1076), -388)
def test_false(self):
self.assertFalse(sum_all(1, -32, "codewars", 1076))
|
11474542
|
import os.path
from airflow_plugins.operators import UnzipOperator
def test_unzip_operator():
folder = os.path.dirname(os.path.realpath(__file__))
op = UnzipOperator(task_id='dag_task',
path_to_zip_folder=folder,
path_to_zip_folder_pattern='*.py')
try:
op.execute({})
except:
# swallow is not zip file
pass
# check that the file is populated
assert op.path_to_zip_file is not None
|
11474587
|
from sys import stdin
a, b, c = map(int, stdin.readline().strip().split())
if a*a + b*b == c*c or a*a + c*c == b*b or b*b + c*c == a*a: print "yes"
elif a + b <= c or a + c <= b or b + c <= a: print "not a triangle"
else: print "no"
|
11474607
|
import numpy as np
from deeplookup import qnn, vis
from deeplookup.env import MalwareEnv
ROOT_DIR = ".h5"
dqn, dqn_history = qnn.fit_dqn(MalwareEnv(), root_dir=ROOT_DIR)
ddqn, ddqn_history = qnn.fit_dqn(MalwareEnv(), dueling=True, root_dir=ROOT_DIR)
ddpg, ddpg_history = qnn.fit_ddpg(MalwareEnv(), root_dir=ROOT_DIR)
def main():
for name, agent, history in [
("dqn", dqn, dqn_history),
("ddqn", ddqn, ddqn_history),
("ddpg", ddpg, ddpg_history),
]:
env = MalwareEnv(log=True)
test = agent.test(
env,
nb_episodes=1000,
visualize=False,
start_step_policy=env.start_step_policy,
nb_max_start_steps=10,
nb_max_episode_steps=100,
)
print(
f"name: {name} - "
f"avg: {np.average(test.history['episode_reward'])} - "
f"min: {np.min(test.history['episode_reward'])} - "
f"max: {np.max(test.history['episode_reward'])}"
)
p = vis.render_moving_average(
history,
"episode_reward",
"Среднее вознаграждение за эпизод",
"Вознаграждение",
)
p.figure.savefig(f"{ROOT_DIR}/images/{name}-reward.png", **vis.SAVE_KW)
p = vis.render_moving_average(history, "loss", "Ошибка обучения", "Ошибка")
p.figure.savefig(f"{ROOT_DIR}/images/{name}-loss.png", **vis.SAVE_KW)
p = vis.render_moving_average(
history, "mean_q", "Средняя ценность действий (Q)", "Ценность"
)
p.figure.savefig(f"{ROOT_DIR}/images/{name}-mean-q.png", **vis.SAVE_KW)
p = vis.render_actions_histogram(env)
p.figure.savefig(f"{ROOT_DIR}/images/{name}-hist.png", **vis.SAVE_KW)
if __name__ == "__main__":
main()
|
11474683
|
from .testbase import display_tree
from binarytree import AvlTree
def do_task():
a = AvlTree()
for value in [10, 20, 30, 40, 50, 25, 100, 28, 140]:
a.insert(value)
print(' display tree a:')
display_tree(a)
###########################################
b = a.clone()
b.insert(22)
b.insert(29)
print('\n\n\n display tree b:')
display_tree(b)
print('\n\n height tree b:', b.height())
###########################################
print('\n\n\n display tree a (again):')
display_tree(a)
###########################################
c = a.clone()
c.clear()
print('\n\n\n display tree c:')
display_tree(c)
print()
|
11474718
|
from setuptools import find_packages, setup
setup(
name="ray_lightning",
packages=find_packages(where=".", include="ray_lightning*"),
version="0.1.2",
author="<NAME>",
description="Ray distributed plugins for Pytorch Lightning.",
long_description="Custom Pytorch Lightning distributed plugins "
"built on top of distributed computing framework Ray.",
url="https://github.com/ray-project/ray_lightning_accelerators",
install_requires=["pytorch-lightning", "ray"])
|
11474725
|
import json
import numpy as np
from model import create_model
class Generator:
def __init__(self, weights_file, id2token_file, embedding_size, hidden_size):
self._model, self._id2token = self._load_model(
weights_file,
id2token_file,
embedding_size,
hidden_size
)
self._token2id = {token: id for id, token in enumerate(self._id2token)}
def generate(self,
seeds,
forbidden_tokens=(),
min_res_len=3, max_res_len=30,
callback=None
):
results = []
forbidden_ids = [self._token2id[token] if token in self._token2id else self._token2id['<unk>'] \
for token in forbidden_tokens]
for i, seed_tokens in enumerate(seeds):
seed_ids = [self._token2id[token] if token in self._token2id else self._token2id['<unk>'] \
for token in seed_tokens]
res = self._gen_seq(
seed_ids,
min_res_len, max_res_len,
end_tokens=[self._token2id['<eoc>']],
forbidden_ids=forbidden_ids
)
res_tokens = [self._id2token[id] for id in res]
results.append(res_tokens)
if callback:
callback(i, res_tokens, seed_tokens)
return results
def _load_model(self, weights_file, id2token_file, embedding_size, hidden_size):
with open(id2token_file, 'rb') as f:
id2token = json.loads(f.read().decode('utf-8'))
model = create_model(
seq_len=1,
n_input_nodes=len(id2token),
n_embedding_nodes=embedding_size,
n_hidden_nodes=hidden_size,
batch_size=1,
stateful=True
)
model.load_weights(weights_file)
return (model, id2token)
def _gen_seq(self,
seed,
min_len, max_len,
end_tokens,
forbidden_ids
):
generated = []
for id in seed:
ni_prob = self._model.predict(np.array(id)[None, None])[0, 0]
while True:
ni_prob /= ni_prob.sum()
next_id = np.random.choice(a=ni_prob.shape[-1], p=ni_prob)
generated.append(next_id)
if len(generated) >= min_len and next_id in end_tokens:
break
if next_id in forbidden_ids:
self._model.reset_states()
return self._gen_seq(seed, min_len, max_len, end_tokens, forbidden_ids)
ni_prob = self._model.predict(np.array(next_id)[None, None])[0, 0]
self._model.reset_states()
if len(generated) > max_len:
return self._gen_seq(seed, min_len, max_len, end_tokens, forbidden_ids)
return generated
|
11474800
|
class MockRequestGET:
"""
Mocked GET response for requests.get
"""
def __init__(self, url):
self.text = open(url)
class MockMessage:
"""
Mocked `Message` object from slackbot.
Passed to decorated plugin functions as first param.
"""
def __init__(self, debug=False):
self.debug = debug
self._body = {'channel': '#test_channel'}
def reply(self, text):
if self.debug:
print(text)
return text
def send(self, text):
if self.debug:
print(text)
return text
|
11474813
|
import json
import warnings
import geopandas
from geopandas.io.arrow import (
_create_metadata,
_decode_metadata,
_encode_metadata,
_validate_dataframe,
)
from pyarrow import parquet
from .extension_types import construct_geometry_array
def _arrow_to_geopandas(table):
# NOTE this is copied and slightly adapted from geopandas
"""
Helper function with main, shared logic for read_parquet/read_feather.
"""
metadata = table.schema.metadata
if metadata is None or b"geo" not in metadata:
raise ValueError(
"""Missing geo metadata in Parquet/Feather file.
Use pandas.read_parquet/read_feather() instead."""
)
try:
metadata = _decode_metadata(metadata.get(b"geo", b""))
except (TypeError, json.decoder.JSONDecodeError):
raise ValueError("Missing or malformed geo metadata in Parquet/Feather file")
# _validate_metadata(metadata)
# Find all geometry columns that were read from the file. May
# be a subset if 'columns' parameter is used.
geometry_columns = list(set(table.column_names).intersection(metadata["columns"]))
if not len(geometry_columns):
raise ValueError(
"""No geometry columns are included in the columns read from
the Parquet/Feather file. To read this file without geometry columns,
use pandas.read_parquet/read_feather() instead."""
)
geometry = metadata["primary_column"]
# Missing geometry likely indicates a subset of columns was read;
# promote the first available geometry to the primary geometry.
if len(geometry_columns) and geometry not in geometry_columns:
geometry = geometry_columns[0]
# if there are multiple non-primary geometry columns, raise a warning
if len(geometry_columns) > 1:
warnings.warn(
"Multiple non-primary geometry columns read from Parquet/Feather "
"file. The first column read was promoted to the primary geometry."
)
# convert attributes
df = table.drop(geometry_columns).to_pandas()
# Convert the geometry columns to geopandas format
for col in geometry_columns:
df[col] = geopandas.array.GeometryArray(
table[col].chunk(0).to_numpy(), crs=metadata["columns"][col]["crs"]
)
return geopandas.GeoDataFrame(df, geometry=geometry)
def read_parquet(path, columns=None, **kwargs):
table = parquet.read_table(path, columns=columns, **kwargs)
return _arrow_to_geopandas(table)
def _geopandas_to_arrow(df, index=None):
# NOTE this is copied and slightly adapted from geopandas
"""
Helper function with main, shared logic for to_parquet/to_feather.
"""
from pyarrow import Table
_validate_dataframe(df)
# create geo metadata before altering incoming data frame
geo_metadata = _create_metadata(df)
# TODO this hard-codes "geometry" column
# convert attributes to pyarrow
df_attr = df.drop(columns=["geometry"])
table = Table.from_pandas(df_attr, preserve_index=index)
# convert geometry
geom_arr = construct_geometry_array(df.geometry.array.data)
table = table.append_column("geometry", geom_arr)
encoding = geom_arr.type.extension_name.split(".")[1]
geo_metadata["columns"]["geometry"]["encoding"] = encoding
# Store geopandas specific file-level metadata
# This must be done AFTER creating the table or it is not persisted
metadata = table.schema.metadata
metadata.update({b"geo": _encode_metadata(geo_metadata)})
return table.replace_schema_metadata(metadata)
def to_parquet(df, path, index=None, compression="snappy", **kwargs):
table = _geopandas_to_arrow(df, index=index)
parquet.write_table(table, path, compression=compression, **kwargs)
|
11474818
|
import os
from django.core.management import call_command
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sample_project.settings")
import django
print("Django Version: " + django.__version__)
django.setup()
call_command('migrate')
# call_command('makemigrations')
|
11474825
|
import numpy as np
from gym.spaces import Box, Discrete
from gym import spaces
def check_homogenize_spaces(all_spaces):
assert len(all_spaces) > 0
space1 = all_spaces[0]
assert all(
isinstance(space, space1.__class__) for space in all_spaces
), "all spaces to homogenize must be of same general shape"
if isinstance(space1, spaces.Box):
for space in all_spaces:
assert isinstance(
space, spaces.Box
), "all spaces for homogenize must be either Box or Discrete, not a mix"
assert len(space1.shape) == len(
space.shape
), "all spaces to homogenize must be of same shape"
assert (
space1.dtype == space.dtype
), "all spaces to homogenize must be of same dtype"
elif isinstance(space1, spaces.Discrete):
for space in all_spaces:
assert isinstance(
space, spaces.Discrete
), "all spaces for homogenize must be either Box or Discrete, not a mix"
else:
assert False, "homogenization only supports Discrete and Box spaces"
def pad_to(arr, new_shape, pad_value):
old_shape = arr.shape
if old_shape == new_shape:
return arr
pad_size = [ns - os for ns, os in zip(new_shape, old_shape)]
pad_tuples = [(0, ps) for ps in pad_size]
return np.pad(arr, pad_tuples, constant_values=pad_value)
def homogenize_spaces(all_spaces):
space1 = all_spaces[0]
if isinstance(space1, spaces.Box):
all_dims = np.array([space.shape for space in all_spaces], dtype=np.int32)
max_dims = np.max(all_dims, axis=0)
new_shape = tuple(max_dims)
all_lows = np.stack(
[
pad_to(space.low, new_shape, np.minimum(0, np.min(space.low)))
for space in all_spaces
]
)
all_highs = np.stack(
[
pad_to(space.high, new_shape, np.maximum(1e-5, np.max(space.high)))
for space in all_spaces
]
)
new_low = np.min(all_lows, axis=0)
new_high = np.max(all_highs, axis=0)
assert new_shape == new_low.shape
return Box(low=new_low, high=new_high, dtype=space1.dtype)
elif isinstance(space1, spaces.Discrete):
max_n = max([space.n for space in all_spaces])
return Discrete(max_n)
else:
assert False
def dehomogenize_actions(orig_action_space, action):
if isinstance(orig_action_space, spaces.Box):
# choose only the relevant action values
cur_shape = action.shape
new_shape = orig_action_space.shape
if cur_shape == new_shape:
return action
else:
assert len(cur_shape) == len(new_shape)
slices = [slice(0, i) for i in new_shape]
new_action = action[tuple(slices)]
return new_action
elif isinstance(orig_action_space, spaces.Discrete):
# extra action values refer to action value 0
n = orig_action_space.n
if action > n - 1:
action = 0
return action
else:
assert False
def homogenize_observations(obs_space, obs):
if isinstance(obs_space, spaces.Box):
return pad_to(obs, obs_space.shape, 0)
elif isinstance(obs_space, spaces.Discrete):
return obs_space
else:
assert False
|
11474831
|
import sys
import uuid
import gam
from gam.var import *
from gam import controlflow
from gam import display
from gam import gapi
from gam.gapi import directory as gapi_directory
from gam import utils
def delete():
cd = gapi_directory.build()
resourceId = sys.argv[3]
gapi.call(cd.mobiledevices(),
'delete',
resourceId=resourceId,
customerId=GC_Values[GC_CUSTOMER_ID])
def info():
cd = gapi_directory.build()
resourceId = sys.argv[3]
device_info = gapi.call(cd.mobiledevices(),
'get',
customerId=GC_Values[GC_CUSTOMER_ID],
resourceId=resourceId)
if 'deviceId' in device_info:
device_info['deviceId'] = device_info['deviceId'].encode('unicode-escape').decode(
UTF8)
attrib = 'securityPatchLevel'
if attrib in device_info and int(device_info[attrib]):
device_info[attrib] = utils.formatTimestampYMDHMS(device_info[attrib])
display.print_json(device_info)
def print_():
cd = gapi_directory.build()
todrive = False
titles = []
csvRows = []
fields = None
projection = orderBy = sortOrder = None
queries = [None]
delimiter = ' '
listLimit = 1
appsLimit = -1
i = 3
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'todrive':
todrive = True
i += 1
elif myarg in ['query', 'queries']:
queries = gam.getQueries(myarg, sys.argv[i + 1])
i += 2
elif myarg == 'delimiter':
delimiter = sys.argv[i + 1]
i += 2
elif myarg == 'listlimit':
listLimit = gam.getInteger(sys.argv[i + 1], myarg, minVal=-1)
i += 2
elif myarg == 'appslimit':
appsLimit = gam.getInteger(sys.argv[i + 1], myarg, minVal=-1)
i += 2
elif myarg == 'fields':
fields = f'nextPageToken,mobiledevices({sys.argv[i+1]})'
i += 2
elif myarg == 'orderby':
orderBy = sys.argv[i + 1].lower()
validOrderBy = [
'deviceid', 'email', 'lastsync', 'model', 'name', 'os',
'status', 'type'
]
if orderBy not in validOrderBy:
controlflow.expected_argument_exit('orderby',
', '.join(validOrderBy),
orderBy)
if orderBy == 'lastsync':
orderBy = 'lastSync'
elif orderBy == 'deviceid':
orderBy = 'deviceId'
i += 2
elif myarg in SORTORDER_CHOICES_MAP:
sortOrder = SORTORDER_CHOICES_MAP[myarg]
i += 1
elif myarg in PROJECTION_CHOICES_MAP:
projection = PROJECTION_CHOICES_MAP[myarg]
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam print mobile')
for query in queries:
gam.printGettingAllItems('Mobile Devices', query)
page_message = gapi.got_total_items_msg('Mobile Devices', '...\n')
all_mobile = gapi.get_all_pages(cd.mobiledevices(),
'list',
'mobiledevices',
page_message=page_message,
customerId=GC_Values[GC_CUSTOMER_ID],
query=query,
projection=projection,
fields=fields,
orderBy=orderBy,
sortOrder=sortOrder)
for mobile in all_mobile:
row = {}
for attrib in mobile:
if attrib in ['kind', 'etag']:
continue
if attrib in ['name', 'email', 'otherAccountsInfo']:
if attrib not in titles:
titles.append(attrib)
if listLimit > 0:
row[attrib] = delimiter.join(
mobile[attrib][0:listLimit])
elif listLimit == 0:
row[attrib] = delimiter.join(mobile[attrib])
elif attrib == 'applications':
if appsLimit >= 0:
if attrib not in titles:
titles.append(attrib)
applications = []
j = 0
for app in mobile[attrib]:
j += 1
if appsLimit and (j > appsLimit):
break
appDetails = []
for field in [
'displayName', 'packageName', 'versionName'
]:
appDetails.append(app.get(field, '<None>'))
appDetails.append(
str(app.get('versionCode', '<None>')))
permissions = app.get('permission', [])
if permissions:
appDetails.append('/'.join(permissions))
else:
appDetails.append('<None>')
applications.append('-'.join(appDetails))
row[attrib] = delimiter.join(applications)
else:
if attrib not in titles:
titles.append(attrib)
if attrib == 'deviceId':
row[attrib] = mobile[attrib].encode(
'unicode-escape').decode(UTF8)
elif attrib == 'securityPatchLevel' and int(mobile[attrib]):
row[attrib] = utils.formatTimestampYMDHMS(
mobile[attrib])
else:
row[attrib] = mobile[attrib]
csvRows.append(row)
display.sort_csv_titles(
['resourceId', 'deviceId', 'serialNumber', 'name', 'email', 'status'],
titles)
display.write_csv_file(csvRows, titles, 'Mobile', todrive)
def update():
cd = gapi_directory.build()
resourceIds = sys.argv[3]
match_users = None
doit = False
if resourceIds[:6] == 'query:':
query = resourceIds[6:]
fields = 'nextPageToken,mobiledevices(resourceId,email)'
page_message = gapi.got_total_items_msg('Mobile Devices', '...\n')
devices = gapi.get_all_pages(cd.mobiledevices(),
'list',
page_message=page_message,
customerId=GC_Values[GC_CUSTOMER_ID],
items='mobiledevices',
query=query,
fields=fields)
else:
devices = [{'resourceId': resourceIds, 'email': ['not set']}]
doit = True
i = 4
body = {}
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'action':
body['action'] = sys.argv[i + 1].lower()
validActions = [
'wipe', 'wipeaccount', 'accountwipe', 'wipe_account',
'account_wipe', 'approve', 'block',
'cancel_remote_wipe_then_activate',
'cancel_remote_wipe_then_block'
]
if body['action'] not in validActions:
controlflow.expected_argument_exit('action',
', '.join(validActions),
body['action'])
if body['action'] == 'wipe':
body['action'] = 'admin_remote_wipe'
elif body['action'].replace('_',
'') in ['accountwipe', 'wipeaccount']:
body['action'] = 'admin_account_wipe'
i += 2
elif myarg in ['ifusers', 'matchusers']:
match_users = gam.getUsersToModify(entity_type=sys.argv[i + 1].lower(),
entity=sys.argv[i + 2])
i += 3
elif myarg == 'doit':
doit = True
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam update mobile')
if body:
if doit:
print(f'Updating {len(devices)} devices')
describe_as = 'Performing'
else:
print(
f'Showing {len(devices)} changes that would be made, not actually making changes because doit argument not specified'
)
describe_as = 'Would perform'
for device in devices:
device_user = device.get('email', [''])[0]
if match_users and device_user not in match_users:
print(
f'Skipping device for user {device_user} that did not match match_users argument'
)
else:
print(
f'{describe_as} {body["action"]} on user {device_user} device {device["resourceId"]}'
)
if doit:
gapi.call(cd.mobiledevices(),
'action',
resourceId=device['resourceId'],
body=body,
customerId=GC_Values[GC_CUSTOMER_ID])
|
11474860
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.padding import ReplicationPad2d
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1)
class BasicBlock_ss(nn.Module):
def __init__(self, inplanes, planes = None, subsamp=1):
super(BasicBlock_ss, self).__init__()
if planes == None:
planes = inplanes * subsamp
self.conv1 = conv3x3(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.subsamp = subsamp
self.doit = planes != inplanes
if self.doit:
self.couple = nn.Conv2d(inplanes, planes, kernel_size=1)
self.bnc = nn.BatchNorm2d(planes)
def forward(self, x):
if self.doit:
residual = self.couple(x)
residual = self.bnc(residual)
else:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
if self.subsamp > 1:
out = F.max_pool2d(out, kernel_size=self.subsamp, stride=self.subsamp)
residual = F.max_pool2d(residual, kernel_size=self.subsamp, stride=self.subsamp)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class BasicBlock_us(nn.Module):
def __init__(self, inplanes, upsamp=1):
super(BasicBlock_us, self).__init__()
planes = int(inplanes / upsamp) # assumes integer result, fix later
self.conv1 = nn.ConvTranspose2d(inplanes, planes, kernel_size=3, padding=1, stride=upsamp, output_padding=1)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.upsamp = upsamp
self.couple = nn.ConvTranspose2d(inplanes, planes, kernel_size=3, padding=1, stride=upsamp, output_padding=1)
self.bnc = nn.BatchNorm2d(planes)
def forward(self, x):
residual = self.couple(x)
residual = self.bnc(residual)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class FresUNet(nn.Module):
"""FresUNet segmentation network."""
def __init__(self, input_nbr, label_nbr):
"""Init FresUNet fields."""
super(FresUNet, self).__init__()
self.input_nbr = input_nbr
cur_depth = input_nbr
base_depth = 8
# Encoding stage 1
self.encres1_1 = BasicBlock_ss(cur_depth, planes = base_depth)
cur_depth = base_depth
d1 = base_depth
self.encres1_2 = BasicBlock_ss(cur_depth, subsamp=2)
cur_depth *= 2
# Encoding stage 2
self.encres2_1 = BasicBlock_ss(cur_depth)
d2 = cur_depth
self.encres2_2 = BasicBlock_ss(cur_depth, subsamp=2)
cur_depth *= 2
# Encoding stage 3
self.encres3_1 = BasicBlock_ss(cur_depth)
d3 = cur_depth
self.encres3_2 = BasicBlock_ss(cur_depth, subsamp=2)
cur_depth *= 2
# Encoding stage 4
self.encres4_1 = BasicBlock_ss(cur_depth)
d4 = cur_depth
self.encres4_2 = BasicBlock_ss(cur_depth, subsamp=2)
cur_depth *= 2
# Decoding stage 4
self.decres4_1 = BasicBlock_ss(cur_depth)
self.decres4_2 = BasicBlock_us(cur_depth, upsamp=2)
cur_depth = int(cur_depth/2)
# Decoding stage 3
self.decres3_1 = BasicBlock_ss(cur_depth + d4, planes = cur_depth)
self.decres3_2 = BasicBlock_us(cur_depth, upsamp=2)
cur_depth = int(cur_depth/2)
# Decoding stage 2
self.decres2_1 = BasicBlock_ss(cur_depth + d3, planes = cur_depth)
self.decres2_2 = BasicBlock_us(cur_depth, upsamp=2)
cur_depth = int(cur_depth/2)
# Decoding stage 1
self.decres1_1 = BasicBlock_ss(cur_depth + d2, planes = cur_depth)
self.decres1_2 = BasicBlock_us(cur_depth, upsamp=2)
cur_depth = int(cur_depth/2)
# Output
self.coupling = nn.Conv2d(cur_depth + d1, label_nbr, kernel_size=1)
self.sm = nn.LogSoftmax(dim=1)
def forward(self, x1, x2):
x = torch.cat((x1, x2), 1)
# pad5 = ReplicationPad2d((0, x53.size(3) - x5d.size(3), 0, x53.size(2) - x5d.size(2)))
s1_1 = x.size()
x1 = self.encres1_1(x)
x = self.encres1_2(x1)
s2_1 = x.size()
x2 = self.encres2_1(x)
x = self.encres2_2(x2)
s3_1 = x.size()
x3 = self.encres3_1(x)
x = self.encres3_2(x3)
s4_1 = x.size()
x4 = self.encres4_1(x)
x = self.encres4_2(x4)
x = self.decres4_1(x)
x = self.decres4_2(x)
s4_2 = x.size()
pad4 = ReplicationPad2d((0, s4_1[3] - s4_2[3], 0, s4_1[2] - s4_2[2]))
x = pad4(x)
# x = self.decres3_1(x)
x = self.decres3_1(torch.cat((x, x4), 1))
x = self.decres3_2(x)
s3_2 = x.size()
pad3 = ReplicationPad2d((0, s3_1[3] - s3_2[3], 0, s3_1[2] - s3_2[2]))
x = pad3(x)
x = self.decres2_1(torch.cat((x, x3), 1))
x = self.decres2_2(x)
s2_2 = x.size()
pad2 = ReplicationPad2d((0, s2_1[3] - s2_2[3], 0, s2_1[2] - s2_2[2]))
x = pad2(x)
x = self.decres1_1(torch.cat((x, x2), 1))
x = self.decres1_2(x)
s1_2 = x.size()
pad1 = ReplicationPad2d((0, s1_1[3] - s1_2[3], 0, s1_1[2] - s1_2[2]))
x = pad1(x)
x = self.coupling(torch.cat((x, x1), 1))
x = self.sm(x)
return x
|
11474929
|
from pathlib import Path
from typing import Optional
import numpy as np
from numpy import ndarray
import onnxruntime
def make_yukarin_s_forwarder(yukarin_s_model_dir: Path, device, convert=False):
session = onnxruntime.InferenceSession(str(yukarin_s_model_dir.joinpath("yukarin_s.onnx")))
def _dispatcher(length: int, phoneme_list: ndarray, speaker_id: Optional[ndarray]):
phoneme_list = np.asarray(phoneme_list)
if speaker_id is not None:
speaker_id = np.asarray(speaker_id)
speaker_id = speaker_id.reshape((1,)).astype(np.int64)
return session.run(["phoneme_length"], {
"phoneme_list": phoneme_list,
"speaker_id": speaker_id,
})[0]
return _dispatcher
|
11474983
|
class Event(list):
def __call__(self, *args, **kwargs):
for item in self:
item(*args, **kwargs)
class PropertyObservable:
def __init__(self):
self.property_changed = Event()
class Person(PropertyObservable):
def __init__(self, age=0):
super().__init__()
self._age = age
@property
def age(self):
return self._age
@age.setter
def age(self, value):
if self._age == value:
return
self._age = value
self.property_changed('age', value)
class TrafficAuthority:
def __init__(self, person):
self.person = person
person.property_changed.append(self.person_changed)
def person_changed(self, name, value):
if name == 'age':
if value < 16:
print('Sorry, you still cannot drive')
else:
print('Okay, you can drive now')
self.person.property_changed.remove(
self.person_changed
)
if __name__ == '__main__':
p = Person()
ta = TrafficAuthority(p)
for age in range(14, 20):
print(f'Setting age to {age}')
p.age = age
|
11474988
|
pkgname = "awk"
pkgver = "20211104"
pkgrel = 0
_commit="c50ef66d119d87e06a041e5522430265ccdce148"
hostmakedepends = ["byacc"]
pkgdesc = "One true awk"
maintainer = "q66 <<EMAIL>>"
license = "SMLNJ"
url = "https://github.com/onetrueawk/awk"
source = f"https://github.com/onetrueawk/awk/archive/{_commit}.tar.gz"
sha256 = "ef0fa50b7e7e2e21eafd49bb249f7d80d0b86e9cae291408724dba77484a0c6f"
# test suite uses local tools that are not present
options = ["bootstrap", "!check"]
def init_configure(self):
from cbuild.util import make
self.make = make.Make(self)
def do_build(self):
self.make.build([
"CC=" + self.get_tool("CC"),
"HOSTCC=" + self.get_tool("CC"),
"CFLAGS=" + self.get_cflags(shell = True) + " " + \
self.get_ldflags(shell = True) + " -DHAS_ISBLANK",
"YACC=byacc -H awkgram.tab.h -o awkgram.tab.c",
])
def do_check(self):
self.make.check()
def do_install(self):
self.cp("a.out", "awk")
self.install_bin("awk")
self.install_man("awk.1")
self.install_license("LICENSE")
|
11474991
|
import nvisii
import random
opt = lambda: None
opt.spp = 400
opt.width = 500
opt.height = 500
opt.noise = False
opt.out = '10_light_texture.png'
# # # # # # # # # # # # # # # # # # # # # # # # #
nvisii.initialize(headless = True, verbose = True)
if not opt.noise is True:
nvisii.enable_denoiser()
camera = nvisii.entity.create(
name = "camera",
transform = nvisii.transform.create("camera"),
camera = nvisii.camera.create(
name = "camera",
aspect = float(opt.width)/float(opt.height)
)
)
camera.get_transform().look_at(
nvisii.vec3(0,0,0), # look at (world coordinate)
nvisii.vec3(0,0,1), # up vector
nvisii.vec3(-2,0,1), # camera_origin
)
nvisii.set_camera_entity(camera)
# # # # # # # # # # # # # # # # # # # # # # # # #
# lets turn off the ambiant lights
nvisii.set_dome_light_intensity(0)
nvisii.disable_dome_light_sampling()
tex = nvisii.texture.create_from_file("tex", "content/gradient.png")
obj_entity = nvisii.entity.create(
name="light",
mesh = nvisii.mesh.create_plane('light'),
transform = nvisii.transform.create("light"),
)
obj_entity.set_light(
nvisii.light.create('light')
)
# Intensity effects the appearance of the light in
# addition to what intensity that light emits.
obj_entity.get_light().set_intensity(2)
# lets set the color texture as the color of the light
obj_entity.get_light().set_color_texture(tex)
obj_entity.get_transform().set_scale((0.6,0.6,0.2))
obj_entity.get_transform().set_position((0.5,-0.4,0.7))
obj_entity.get_transform().look_at(
at = (0,0,0),
up = (0,0,1),
)
obj_entity.get_transform().add_rotation((0,1,0,0))
obj_entity = nvisii.entity.create(
name="light_2",
mesh = nvisii.mesh.create_teapotahedron('light_2'),
transform = nvisii.transform.create("light_2"),
)
# a light is an entity with a light added to it.
obj_entity.set_light(
nvisii.light.create('light_2')
)
obj_entity.get_light().set_intensity(2)
# you can also set the light color manually
obj_entity.get_light().set_color_texture(tex)
#lets set the size and placement of the light
obj_entity.get_transform().set_scale((0.1, 0.1, 0.1))
obj_entity.get_transform().set_position((-0.5,0.4,0))
obj_entity.get_transform().set_rotation(
nvisii.angleAxis(90, (0,0,1))
)
# # # # # # # # # # # # # # # # # # # # # # # # #
# Lets set some objects in the scene
room = nvisii.entity.create(
name="room",
mesh = nvisii.mesh.create_box('room'),
transform = nvisii.transform.create("room"),
material = nvisii.material.create("room"),
)
room.get_transform().set_scale((2.0,2.0,2.0))
room.get_transform().set_position((0,0,2.0))
mat = nvisii.material.get("room")
mat.set_base_color(nvisii.vec3(0.19,0.16,0.19))
mat.set_roughness(1)
sphere = nvisii.entity.create(
name="sphere",
mesh = nvisii.mesh.create_sphere("sphere"),
transform = nvisii.transform.create("sphere"),
material = nvisii.material.create("sphere")
)
sphere.get_transform().set_position((0.4,0,0.2))
sphere.get_transform().set_scale((0.2, 0.2, 0.2))
sphere.get_material().set_base_color((0.1,0.96,0.4))
sphere.get_material().set_roughness(0.7)
sphere.get_material().set_specular(1)
sphere2 = nvisii.entity.create(
name="sphere2",
mesh = nvisii.mesh.create_sphere("sphere2"),
transform = nvisii.transform.create("sphere2"),
material = nvisii.material.create("sphere2")
)
sphere2.get_transform().set_position((-0.5,-0.1,0.1))
sphere2.get_transform().set_scale((0.1, 0.1, 0.1))
sphere2.get_material().set_base_color((0.1,0.56,1))
sphere2.get_material().set_roughness(0)
sphere2.get_material().set_specular(0)
sphere3 = nvisii.entity.create(
name="sphere3",
mesh = nvisii.mesh.create_sphere("sphere3"),
transform = nvisii.transform.create("sphere3"),
material = nvisii.material.create("sphere3")
)
sphere3.get_transform().set_position((0.6,-0.5,0.16))
sphere3.get_transform().set_scale((0.16, 0.16, 0.16))
sphere3.get_material().set_base_color((0.5,0.8,0.5))
sphere3.get_material().set_roughness(0)
sphere3.get_material().set_specular(1)
sphere3.get_material().set_metallic(1)
cone = nvisii.entity.create(
name="cone",
mesh = nvisii.mesh.create_cone("cone"),
transform = nvisii.transform.create("cone"),
material = nvisii.material.create("cone")
)
# lets set the cone up
cone.get_transform().set_position((0.08,0.35,0.2))
cone.get_transform().set_scale((0.3, 0.3, 0.3))
cone.get_material().set_base_color((245/255, 230/255, 66/255))
cone.get_material().set_roughness(1)
cone.get_material().set_specular(0)
cone.get_material().set_metallic(0)
# # # # # # # # # # # # # # # # # # # # # # # # #
nvisii.render_to_file(
width=int(opt.width),
height=int(opt.height),
samples_per_pixel=int(opt.spp),
file_path=f"{opt.out}"
)
# let's clean up the GPU
nvisii.deinitialize()
|
11475000
|
import numbers
from pathlib import Path
from typing import Tuple, List, Optional
import albumentations
import numpy as np
import pandas as pd
import torch
from albumentations.pytorch.functional import img_to_tensor
from torch.utils.data import Dataset
from utils.image import load_image, load_mask
class ImageClassificationDataset(Dataset):
def __init__(self,
root: Path,
df: pd.DataFrame,
transform: Optional[albumentations.Compose] = None,
use_cache: bool = True):
self.root = root
self.df = df
self.transform = transform
self.use_cache = use_cache
self.cache = [None] * len(self.df)
def __len__(self) -> int:
return len(self.df)
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
files = self.df.iloc[index, 0]
label = self.df.iloc[index, 1]
if self.cache[index] is None:
if isinstance(files, str):
image = load_image(self.root / files)
elif isinstance(files, List):
channels = [load_mask(self.root / it) for it in files]
image = np.stack(channels, axis=-1)
else:
raise RuntimeError(f'Failed to parse image path: {files}')
if self.use_cache:
self.cache[index] = image
else:
image = self.cache[index]
if self.transform is not None:
augmented = self.transform(image=image)
image = augmented['image']
image_tensor = img_to_tensor(image)
# There is no need to convert labels to tensors, when you create a test dataset,
# where the label type is not a number
if isinstance(label, numbers.Number):
label = torch.tensor(label)
return image_tensor, label
|
11475011
|
STOP_WORDS = {'Chinese': frozenset(['的'])}
BUILD_IN_MODELS = {
'newsgroups': (
'newsgroups.tar.gz',
'https://raw.githubusercontent.com/Windsooon/cherry_datasets/master/newsgroups.tar.gz',
'25952d9167b86d96503356d8272860d38d3929a31284bbb83d2737f50d23015e',
'latin1',
),
'review': (
'review.tar.gz',
'https://raw.githubusercontent.com/Windsooon/cherry_datasets/master/review.tar.gz',
'9c46684a48054b1ffb6d74f0b0bfff7cda538c7e097f7a4f8e3d20b1f1e561db',
'latin1',
),
'email': (
'email.tar.gz',
'https://raw.githubusercontent.com/Windsooon/cherry_datasets/master/email.tar.gz',
'901d7c5721ec4f72ad39bcf245c52c214a9d96ad96604ddaac86605bf9f910e2',
'latin1',
)
}
|
11475018
|
from django.contrib.auth.decorators import user_passes_test, login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django_filters.views import FilterView
from django_tables2 import SingleTableMixin
from guardian.mixins import LoginRequiredMixin
from resource_tracker.filters.resource_group_filter import ResourceGroupFilter
from resource_tracker.forms import ResourceGroupForm
from resource_tracker.models import ResourceGroup
from resource_tracker.tables.resource_group_tables import ResourceGroupTable
@method_decorator(login_required, name='dispatch')
class ResourceGroupListView(LoginRequiredMixin, SingleTableMixin, FilterView):
table_pagination = {'per_page': 10}
table_class = ResourceGroupTable
model = ResourceGroup
template_name = 'generics/list.html'
filterset_class = ResourceGroupFilter
def dispatch(self, *args, **kwargs):
if not self.request.user.is_superuser:
raise PermissionDenied
return super(ResourceGroupListView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = "Resource groups"
return context
@user_passes_test(lambda u: u.is_superuser)
def resource_group_edit(request, resource_group_id):
resource_group = get_object_or_404(ResourceGroup, id=resource_group_id)
form = ResourceGroupForm(request.POST or None, instance=resource_group)
if form.is_valid():
form.save()
return redirect("resource_tracker:resource_group_list")
breadcrumbs = [
{'text': 'Resource groups', 'url': reverse('resource_tracker:resource_group_list')},
{'text': resource_group.name, 'url': ""},
]
context = {'form': form, 'resource_group': resource_group, 'breadcrumbs': breadcrumbs, 'action': 'edit'}
return render(request,
'resource_tracking/resource_group/resource-group-edit.html', context)
@user_passes_test(lambda u: u.is_superuser)
def resource_group_create(request):
if request.method == 'POST':
form = ResourceGroupForm(request.POST)
if form.is_valid():
form.save()
return redirect("resource_tracker:resource_group_list")
else:
form = ResourceGroupForm()
breadcrumbs = [
{'text': 'Resource groups', 'url': reverse('resource_tracker:resource_group_list')},
{'text': 'Create a new resource group', 'url': ""},
]
context = {'form': form, 'breadcrumbs': breadcrumbs, 'action': 'create'}
return render(request, 'resource_tracking/resource_group/resource-group-create.html', context)
@user_passes_test(lambda u: u.is_superuser)
def resource_group_delete(request, resource_group_id):
resource_group = get_object_or_404(ResourceGroup, id=resource_group_id)
if request.method == 'POST':
# delete all resource attributes
resource_group.resources.all().delete()
resource_group.attribute_definitions.all().delete()
resource_group.text_attribute_definitions.all().delete()
resource_group.delete()
# delete all resources
return redirect("resource_tracker:resource_group_list")
breadcrumbs = [
{'text': 'Resource groups', 'url': reverse('resource_tracker:resource_group_list')},
{'text': resource_group.name, 'url': ""},
]
context = {'resource_group': resource_group, 'breadcrumbs': breadcrumbs}
return render(request,
'resource_tracking/resource_group/resource-group-delete.html', context)
|
11475103
|
import envexamples
from raytracing import *
nRays = 1000000 # Increase for better resolution
minHeight = -5
maxHeight = 5
minTheta = -0.5 # rad
maxTheta = +0.5
# define a list of rays with uniform distribution
inputRays = RandomUniformRays(yMin = minHeight,
yMax = maxHeight,
maxCount = nRays,
thetaMax = maxTheta,
thetaMin = minTheta)
inputRays.display()
|
11475137
|
import importlib
from pipeline import msg
from PySide import QtGui
def import_module(modname,packagename=None):
module=None
try:
module=importlib.import_module(modname, packagename)
msg.logMessage(("Imported", modname), msg.DEBUG)
except ImportError as ex:
msg.logMessage('Module could not be loaded: ' + modname)
missingpackage = ex.message.replace('No module named ', '')
import config
if missingpackage in ['qt.qtapi','pandas','tomopy','matplotlib.pyplot']:
return None
if config.settings['Ignored Modules']:
if missingpackage in config.settings['Ignored Modules']:
return None
msgBox = QtGui.QMessageBox()
msgBox.setText("A python package is missing! Xi-cam can try to install this for you.")
msgBox.setInformativeText("Would you like to install " + missingpackage + "?")
msgBox.setStandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.No | QtGui.QMessageBox.Ignore)
msgBox.setDefaultButton(QtGui.QMessageBox.Yes)
response = msgBox.exec_()
if response == QtGui.QMessageBox.Yes:
import pip
if not hasattr(pip, 'main'):
import pip._internal as pip
failure=pip.main(['install', '--user', missingpackage])
if failure:
failure=pip.main(['install', missingpackage])
if not failure:
msgBox = QtGui.QMessageBox()
msgBox.setText('Success! The missing package, ' + missingpackage + ', has been installed!')
msgBox.setInformativeText('Please restart Xi-cam now.')
msgBox.setStandardButtons(QtGui.QMessageBox.Ok)
msgBox.exec_()
exit(0)
else:
if modname.strip('.') == 'MOTD':
from xicam import debugtools
debugtools.frustration()
msgBox = QtGui.QMessageBox()
msgBox.setText(
'Sorry, ' + missingpackage + ' could not be installed. This is a Xi-cam critical library.')
msgBox.setInformativeText('Xi-cam cannot be loaded . Please install ' + missingpackage + ' manually.')
msgBox.setStandardButtons(QtGui.QMessageBox.Ok)
msgBox.exec_()
exit(1)
else:
from xicam import debugtools
debugtools.frustration()
msgBox = QtGui.QMessageBox()
msgBox.setText(
'Sorry, ' + missingpackage + ' could not be installed. Try installing this package yourself, or contact the package developer.')
msgBox.setInformativeText('Would you like to continue loading Xi-cam?')
msgBox.setStandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
response = msgBox.exec_()
if response == QtGui.QMessageBox.No:
exit(1)
elif response == QtGui.QMessageBox.Ignore and modname.strip('.') != 'MOTD':
import config
if config.settings['Ignored Modules']:
config.settings['Ignored Modules']+=[missingpackage]
else:
config.settings['Ignored Modules']=[missingpackage]
msgBox = QtGui.QMessageBox()
msgBox.setText('Xi-cam will no longer prompt you to install this package, however some plugins may be disabled.')
msgBox.setStandardButtons(QtGui.QMessageBox.Ok)
msgBox.exec_()
if modname.strip('.') == 'MOTD':
from xicam import debugtools
debugtools.frustration()
msgBox = QtGui.QMessageBox()
msgBox.setText(
'Sorry, ' + missingpackage + ' is a Xi-cam critical library. This must be installed to run Xi-cam!')
msgBox.setInformativeText('Xi-cam cannot be loaded . Please install ' + modname.strip('.') + ' manually.')
msgBox.setStandardButtons(QtGui.QMessageBox.Ok)
msgBox.exec_()
exit(1)
if not module: msg.logMessage('Failed to import '+modname,msg.CRITICAL)
return module
|
11475145
|
import math
import altair as alt
import pandas as pd
from aequitas.plot.commons.helpers import (
no_axis,
transform_ratio,
calculate_chart_size_from_elements,
to_list,
format_number,
)
from aequitas.plot.commons.tooltips import (
get_tooltip_text_group_size,
get_tooltip_text_disparity_explanation,
get_tooltip_text_parity_test_explanation,
)
from aequitas.plot.commons.style.classes import (
Title,
Subtitle,
Parity_Result,
Annotation,
Legend,
)
from aequitas.plot.commons.style.text import FONT
from aequitas.plot.commons.style.sizes import Summary_Chart
from aequitas.plot.commons import initializers as Initializer
from aequitas.plot.commons import validators as Validator
# Altair 2.4.1 requires that all chart receive a dataframe, for charts that don't need it
# (like most annotations), we pass the following dummy dataframe to reduce the complexity of the resulting vega spec.
DUMMY_DF = pd.DataFrame({"a": [1, 1], "b": [0, 0]})
def __get_scales(max_num_groups):
"""Creates an Altair scale for the color of the parity test result, and another
for the x axis of the group circles subplot."""
scales = dict()
# COLOR
scales["color"] = alt.Scale(
domain=["Reference", "Pass", "Fail"], range=Parity_Result.color_palette
)
# GROUP CIRCLES X AXIS
scales["circles_x"] = alt.Scale(
domain=[-max_num_groups, max_num_groups], nice=False
)
return scales
def __get_size_constants(
chart_height, chart_width, num_attributes, num_metrics, max_num_groups
):
"""Calculates the heights, widths and spacings of the components of the summary chart
based on the provided desired overall chart height and width, as well as the number of
attributes (columns) and metrics (lines)."""
size_constants = dict(
# Chart sizes
attribute_titles_height=0.15 * chart_height,
line_spacing=0.2 * chart_height / num_metrics,
line_height=Summary_Chart.line_height_ratio * chart_height / num_metrics,
metric_titles_width=0.1 * chart_width,
column_spacing=0.15 * chart_width / num_attributes,
column_width=Summary_Chart.column_width_ratio * chart_width / num_attributes,
# Circle size
## Conditional definition of the size where for each additional unit in
## max_num_groups, we subtract 25 squared pixels from the area of the
## circle, which has the base value of 350 for 0 groups. From max_num_groups
## equal to 13 or more, we keep the size at the minimum value of 25 to
## make sure the circles are visible.
group_circle_size=-25 * max_num_groups + 350 if max_num_groups < 13 else 25,
)
return size_constants
def __draw_attribute_title(attribute, width, size_constants):
"""Draws a single attribute's title."""
return (
alt.Chart(DUMMY_DF)
.mark_text(
align="center",
baseline="middle",
font=FONT,
size=Title.font_size,
color=Title.font_color,
fontWeight=Title.font_weight,
)
.encode(
text=alt.value(attribute.title()),
)
.properties(width=width, height=size_constants["attribute_titles_height"])
)
def __draw_metric_line_titles(metrics, size_constants):
"""Draws left hand side titles for metrics."""
metric_line_titles = []
for metric in metrics:
# METRIC TITLE
metric_title = (
alt.Chart(DUMMY_DF)
.transform_calculate(y_position="1.2")
.mark_text(
align="center",
baseline="middle",
font=FONT,
fontWeight=Title.font_weight,
size=Title.font_size,
color=Title.font_color,
)
.encode(
alt.Y("y_position:Q", scale=alt.Scale(domain=[3, 1]), axis=no_axis()),
text=alt.value(metric.upper()),
)
)
# GROUPS TEXT
group_circles_title = (
alt.Chart(DUMMY_DF)
.transform_calculate(y_position="2")
.mark_text(
align="center",
baseline="middle",
font=FONT,
size=Subtitle.font_size,
color=Subtitle.font_color,
)
.encode(
alt.Y("y_position:Q", scale=alt.Scale(domain=[3, 1]), axis=no_axis()),
text=alt.value("Groups"),
)
)
# PERCENT. POP TEXT
population_percentage_title = (
alt.Chart(DUMMY_DF)
.transform_calculate(y_position="2.7")
.mark_text(
align="center",
baseline="middle",
font=FONT,
size=Subtitle.font_size,
color=Subtitle.font_color,
)
.encode(
alt.Y("y_position:Q", scale=alt.Scale(domain=[3, 1]), axis=no_axis()),
text=alt.value("% Pop."),
)
)
metric_line_titles.append(
(
metric_title + group_circles_title + population_percentage_title
).properties(
height=size_constants["line_height"],
width=size_constants["metric_titles_width"],
)
)
# EMPTY CORNER SPACE
# To make sure that the attribute columns align properly with the title column, we need to create a blank
# space of the same size of the attribute titles. For this purpose, we use the same function (__draw_attribute_title)
# and pass in an empty string so that nothing is actually drawn.
top_left_corner_space = __draw_attribute_title(
"", size_constants["metric_titles_width"], size_constants
)
# CONCATENATE SUBPLOTS
metric_titles = alt.vconcat(
top_left_corner_space,
*metric_line_titles,
spacing=size_constants["line_spacing"],
bounds="flush",
)
return metric_titles
def __get_parity_result_variable(row, metric, fairness_threshold):
""" Creates parity test result variable for each provided row, separating the Reference group from the passing ones."""
if row["attribute_value"] == row["ref_group_value"]:
return "Reference"
elif abs(row[f"{metric}_disparity_scaled"]) < fairness_threshold - 1:
return "Pass"
else:
return "Fail"
def __draw_parity_result_text(parity_result, color_scale):
"""Draws the uppercased text result of the provided parity test (Pass, Fail or Reference),
color-coded according to the provided Altair scale."""
return (
alt.Chart(pd.DataFrame({"parity_result": parity_result}, index=[0]))
.transform_calculate(y_position="1")
.mark_text(
align="center",
baseline="middle",
font=FONT,
size=Parity_Result.font_size,
fontWeight=Parity_Result.font_weight,
)
.encode(
alt.Y("y_position:Q", scale=alt.Scale(domain=[3, 1]), axis=no_axis()),
alt.Color(
"parity_result:O", scale=color_scale, legend=alt.Legend(title="")
),
text=alt.value(parity_result.upper()),
)
)
def __draw_population_bar(population_bar_df, metric, color_scale):
""" Draws a stacked bar of the sum of the percentage of population of the groups that obtained each result for the parity test."""
population_bar_tooltips = [
alt.Tooltip(field=f"{metric}_parity_result", type="nominal", title="Parity"),
alt.Tooltip(
field="tooltip_group_size",
type="nominal",
title="Size",
),
alt.Tooltip(field="tooltip_groups_name_size", type="nominal", title="Groups"),
]
population_bar = (
alt.Chart(population_bar_df)
.transform_calculate(y_position="2.8")
.mark_bar(size=6, stroke="white")
.encode(
alt.X("sum(group_size):Q", stack="normalize", axis=no_axis()),
alt.Y("y_position:Q", scale=alt.Scale(domain=[3, 1]), axis=no_axis()),
alt.Color(
f"{metric}_parity_result:O",
scale=color_scale,
legend=alt.Legend(
title="Parity Test",
padding=20,
),
),
tooltip=population_bar_tooltips,
)
)
return population_bar
def __draw_group_circles(plot_df, metric, scales, size_constants):
"""Draws a circle for each group, color-coded by the result of the parity test.
The groups are spread around the central reference group according to their disparity."""
circle_tooltip_encoding = [
alt.Tooltip(field="attribute_value", type="nominal", title="Group"),
alt.Tooltip(field="tooltip_group_size", type="nominal", title="Group Size"),
alt.Tooltip(
field=f"tooltip_parity_test_explanation_{metric}",
type="nominal",
title="Parity Test",
),
alt.Tooltip(
field=f"tooltip_disparity_explanation_{metric}",
type="nominal",
title="Disparity",
),
alt.Tooltip(
field=f"{metric}",
type="quantitative",
format=".2f",
title=f"{metric}".upper(),
),
]
return (
alt.Chart(plot_df)
.transform_calculate(y_position="2")
.mark_circle(opacity=1)
.encode(
alt.X(
f"{metric}_disparity_rank:Q", scale=scales["circles_x"], axis=no_axis()
),
alt.Y("y_position:Q", scale=alt.Scale(domain=[3, 1]), axis=no_axis()),
alt.Color(
f"{metric}_parity_result:O",
scale=scales["color"],
legend=alt.Legend(title=""),
),
size=alt.value(size_constants["group_circle_size"]),
tooltip=circle_tooltip_encoding,
)
)
def __draw_parity_test_explanation(fairness_threshold, x_position):
"""Draw text that explains what does pass/fail mean in the parity test results."""
explanation_text = alt.Chart(DUMMY_DF).mark_text(
baseline="top",
align="left",
font=FONT,
fill=Annotation.font_color,
fontSize=Annotation.font_size,
fontWeight=Annotation.font_weight,
)
explanation_text_group = explanation_text.encode(
x=alt.value(x_position),
y=alt.value(0),
text=alt.value(
f"For a group to pass the parity test its disparity to the reference group cannot exceed the fairness threshold ({fairness_threshold})."
),
)
explanation_text_attribute = explanation_text.encode(
x=alt.value(x_position),
y=alt.value(Annotation.font_size * Annotation.line_spacing),
text=alt.value(
f"An attribute passes the parity test for a given metric if all its groups pass the test."
),
)
return explanation_text_group + explanation_text_attribute
def __create_population_bar_df(attribute_df, metric):
"""Creates a pandas aggregation of the attribute_df by parity result, along with the
list of groups tooltip variable."""
attribute_df["group_size_formatted"] = attribute_df.apply(
lambda row: format_number(row["group_size"]), axis=1
)
attribute_df["tooltip_groups_name_size"] = (
attribute_df["attribute_value"]
+ " ("
+ attribute_df["group_size_formatted"].astype(str)
+ ")"
)
population_bar_df = (
attribute_df.groupby(by=f"{metric}_parity_result")
.agg(
{
"attribute_name": min,
"total_entities": min,
"group_size": sum,
"tooltip_groups_name_size": lambda x: ", ".join(x),
}
)
.reset_index()
)
population_bar_df["tooltip_group_size"] = population_bar_df.apply(
lambda row: get_tooltip_text_group_size(
row["group_size"], row["total_entities"]
),
axis=1,
)
return population_bar_df
def __create_group_rank_variable(attribute_df, metric):
""" Creates the disparity rank variable for the given metric, centered around 0 (the Reference Group's value). """
# RANK
attribute_df[f"{metric}_disparity_rank"] = attribute_df[
f"{metric}_disparity_scaled"
].rank(method="first")
# REFERENCE GROUP RANK
reference_rank = attribute_df.loc[
attribute_df[f"{metric}_parity_result"] == "Reference"
][f"{metric}_disparity_rank"].iloc[0]
# CENTERED RANK
attribute_df[f"{metric}_disparity_rank"] = (
attribute_df[f"{metric}_disparity_rank"] - reference_rank
)
def __create_tooltip_variables(attribute_df, metric, fairness_threshold):
""" Creates disparity explanation and formatted group size tooltip variables. """
# PARITY TEST EXPLANATION
attribute_df[f"tooltip_parity_test_explanation_{metric}"] = attribute_df.apply(
lambda row: get_tooltip_text_parity_test_explanation(
row[f"{metric}_parity_result"],
metric,
fairness_threshold,
),
axis=1,
)
# DISPARITY EXPLANATION
ref_group = attribute_df["ref_group_value"].iloc[0]
attribute_df[f"tooltip_disparity_explanation_{metric}"] = attribute_df.apply(
lambda row: get_tooltip_text_disparity_explanation(
row[f"{metric}_disparity_scaled"],
row["attribute_value"],
metric,
ref_group,
),
axis=1,
)
# FORMATTED GROUP SIZE
attribute_df["tooltip_group_size"] = attribute_df.apply(
lambda row: get_tooltip_text_group_size(
row["group_size"], row["total_entities"]
),
axis=1,
)
def __create_disparity_variables(attribute_df, metric, fairness_threshold):
""" Creates scaled disparity, parity test result & disparity explanation tooltip variables. """
# SCALED DISPARITY VALUE
attribute_df[f"{metric}_disparity_scaled"] = attribute_df.apply(
lambda row: transform_ratio(row[f"{metric}_disparity"]), axis=1
)
# PARITY RESULT
attribute_df[f"{metric}_parity_result"] = attribute_df.apply(
__get_parity_result_variable,
metric=metric,
fairness_threshold=fairness_threshold,
axis=1,
)
def __get_attribute_column(
attribute_df, metrics, scales, attribute, size_constants, fairness_threshold
):
""" Returns a vertical concatenation of all elements of all metrics for each attribute's column."""
metric_summary = []
for metric in metrics:
# CREATE VARIABLES IN DF
__create_disparity_variables(attribute_df, metric, fairness_threshold)
__create_tooltip_variables(attribute_df, metric, fairness_threshold)
__create_group_rank_variable(attribute_df, metric)
# PARITY RESULT TEXT
## The parity result is equal to the "worst" of each group's results
## If one group fails the parity test, the whole metric fails (for that attribute)
parity_result = attribute_df.loc[
attribute_df[f"{metric}_parity_result"] != "Reference"
][f"{metric}_parity_result"].min()
parity_result_text = __draw_parity_result_text(parity_result, scales["color"])
# GROUP CIRCLES
group_circles = __draw_group_circles(
attribute_df, metric, scales, size_constants
)
# POPULATION BAR
population_bar_df = __create_population_bar_df(attribute_df, metric)
population_bar = __draw_population_bar(
population_bar_df, metric, scales["color"]
)
# LAYERING
metric_summary.append(
(parity_result_text + population_bar + group_circles)
.properties(
width=size_constants["column_width"],
height=size_constants["line_height"],
)
.resolve_scale(x="independent")
)
# ATTRIBUTE TITLE
attribute_title = __draw_attribute_title(
attribute, size_constants["column_width"], size_constants
)
return alt.vconcat(
attribute_title,
*metric_summary,
bounds="flush",
spacing=size_constants["line_spacing"],
)
def plot_summary_chart(
disparity_df,
metrics_list,
attributes_list=None,
fairness_threshold=1.25,
chart_height=None,
chart_width=None,
):
"""Draws chart that summarizes the parity results for the provided metrics across the existing attributes.
This includes an overall result, the specific results by each attribute's groups as well as the percentage
of population by result.
:param disparity_df: a dataframe generated by the Aequitas Bias class
:type disparity_df: pandas.core.frame.DataFrame
:param metrics_list: a list of the metrics of interest
:type metrics_list: list
:param attributes_list: a list of the attributes of interest, defaults to using all in the dataframe
:type attributes_list: list, optional
:param fairness_threshold: a value for the maximum allowed disparity, defaults to 1.25
:type fairness_threshold: float, optional
:param chart_height: a value (in pixels) for the height of the chart
:type chart_height: int, optional
:param chart_width: a value (in pixels) for the width of the chart
:type chart_width: int, optional
:return: the full summary chart
:rtype: Altair chart object
"""
## If a specific list of attributes was not passed, use all from df
(
metrics,
attributes,
chart_height,
chart_width,
) = Initializer.prepare_summary_chart(
disparity_df,
metrics_list,
attributes_list,
fairness_threshold,
chart_height,
chart_width,
Summary_Chart,
)
num_metrics = len(metrics)
num_attributes = len(attributes)
max_num_groups = max(
disparity_df.loc[disparity_df["attribute_name"].isin(attributes)]
.groupby(by="attribute_name")["attribute_value"]
.count()
)
size_constants = __get_size_constants(
chart_height, chart_width, num_attributes, num_metrics, max_num_groups
)
Validator.chart_size_summary(size_constants, num_metrics, num_attributes)
# SCALES
scales = __get_scales(max_num_groups)
# METRIC TITLES
metric_titles = __draw_metric_line_titles(metrics, size_constants)
# RELEVANT FIELDS
viz_fields = [
"attribute_name",
"attribute_value",
"group_size",
"total_entities",
f"{metrics[0]}_ref_group_value",
*metrics,
]
viz_fields += [f"{metric}_disparity" for metric in metrics]
attribute_columns = []
for attribute in attributes:
# CREATE ATTRIBUTE DF
attribute_df = (
disparity_df[viz_fields]
.loc[disparity_df["attribute_name"] == attribute]
.copy(deep=True)
)
attribute_df.rename(
columns={f"{metrics[0]}_ref_group_value": "ref_group_value"}, inplace=True
)
# ATTRIBUTE COLUMN
attribute_column = __get_attribute_column(
attribute_df,
metrics,
scales,
attribute,
size_constants,
fairness_threshold,
)
attribute_columns.append((attribute_column))
# CONCATENATE ATTRIBUTE COLUMNS
summary_chart_columns = alt.hconcat(
*attribute_columns,
bounds="flush",
spacing=size_constants["column_spacing"] + size_constants["column_width"],
)
# ADD METRIC TITLES
summary_chart_table = alt.hconcat(
metric_titles,
summary_chart_columns,
bounds="flush",
spacing=size_constants["metric_titles_width"]
+ size_constants["column_spacing"],
)
summary_chart_explanation = __draw_parity_test_explanation(
fairness_threshold, size_constants["column_spacing"] / 2
)
full_summary_chart = (
alt.vconcat(summary_chart_table, summary_chart_explanation)
.properties(padding=Summary_Chart.full_chart_padding)
.configure_legend(
labelFont=FONT,
labelColor=Legend.font_color,
labelFontSize=Legend.font_size,
titleFont=FONT,
titleColor=Legend.font_color,
titleFontSize=Legend.title_font_size,
titleFontWeight=Legend.title_font_weight,
titlePadding=Legend.title_margin_bottom + Legend.vertical_spacing,
)
.configure_view(strokeWidth=0)
)
return full_summary_chart
|
11475216
|
import collections
import dataclasses
import dis
import functools
import importlib
import inspect
import itertools
import logging
import operator
import sys
import traceback
import types
import typing
from typing import Any
from typing import Dict
from typing import List
from unittest.mock import patch
import torchdynamo.side_effects
import torchdynamo.variables.base
from torchdynamo.source import AttrSource
from torchdynamo.source import GetItemSource
from torchdynamo.source import GlobalSource
from torchdynamo.source import LocalSource
from torchdynamo.variables.builder import VariableBuilder
from . import config
from . import exc
from . import skipfiles
from .allowed_functions import is_allowed
from .allowed_functions import is_builtin
from .bytecode_analysis import livevars_analysis
from .bytecode_transformation import Instruction
from .bytecode_transformation import cleaned_instructions
from .bytecode_transformation import create_instruction
from .bytecode_transformation import is_generator
from .bytecode_transformation import unique_id
from .codegen import PyCodegen
from .exc import Unsupported
from .exc import unimplemented
from .guards import GuardBuilder
from .output_graph import OutputGraph
from .resume_execution import ContinueExecutionCache
from .resume_execution import ReenterWith
from .utils import counters
from .utils import istype
from .variables.base import MutableLocal
from .variables.base import VariableTracker
from .variables.base import typestr
from .variables.builtin import BuiltinVariable
from .variables.constant import ConstantVariable
from .variables.dicts import ConstDictVariable
from .variables.functions import BaseUserFunctionVariable
from .variables.functions import NestedUserFunctionVariable
from .variables.functions import UserFunctionVariable
from .variables.lists import BaseListVariable
from .variables.lists import ListIteratorVariable
from .variables.lists import ListVariable
from .variables.lists import SliceVariable
from .variables.lists import TupleVariable
from .variables.misc import ClosureVariable
from .variables.misc import ContextManagerVariable
from .variables.misc import GetAttrVariable
from .variables.misc import GradModeVariable
from .variables.misc import PythonModuleVariable
from .variables.misc import UnknownVariable
from .variables.misc import WithExitFunctionVariable
from .variables.nn_module import NNModuleVariable
from .variables.tensor import TensorVariable
from .variables.torch import TorchVariable
from .variables.user_defined import UserDefinedVariable
log = logging.getLogger(__name__)
@dataclasses.dataclass
class BlockStackEntry:
target: Instruction
stack_index: int = None
with_context: ContextManagerVariable = None
def can_restore(self):
return self.with_context is not None
def resume_fn(self):
assert self.stack_index is not None
return ReenterWith(self.stack_index)
def exit(self, tx):
return self.with_context.exit(tx)
def stack_op(fn: typing.Callable):
nargs = len(inspect.signature(fn).parameters)
fn_var = BuiltinVariable(fn)
@functools.wraps(fn)
def impl(self: "InstructionTranslatorBase", inst: Instruction):
self.push(fn_var.call_function(self, self.popn(nargs), {}))
return impl
def generic_jump(truth_fn: typing.Callable, push: bool):
def inner(self: "InstructionTranslatorBase", inst: Instruction):
value: VariableTracker = self.pop()
self.output.guards.update(value.guards)
if value.is_python_constant():
if truth_fn(value.as_python_constant()):
push and self.push(value)
self.jump(inst)
elif isinstance(value, TensorVariable) and self.should_compile_partial_graph():
# compile a partial subgraph prefix then jump into user code
self.push(value)
self.output.compile_subgraph(self)
self.pop()
if_next = self.create_call_resume_at(self.next_instruction)
push and self.push(value)
if_jump = self.create_call_resume_at(inst.target)
self.output.add_output_instructions(
[(create_instruction(inst.opname, target=if_jump[0]))]
+ if_next
+ if_jump
)
elif not isinstance(value, TensorVariable) and value.has_unpack_var_sequence(
self
):
if truth_fn(len(value.unpack_var_sequence(self))):
push and self.push(value)
self.jump(inst)
else:
unimplemented(f"generic_jump {typestr(value)}")
return inner
def break_graph_if_unsupported(*, push):
def decorator(inner_fn):
@functools.wraps(inner_fn)
def wrapper(self: "InstructionTranslatorBase", inst: Instruction):
state = self.copy_graphstate()
try:
return inner_fn(self, inst)
except Unsupported as exc:
if not self.should_compile_partial_graph():
raise
exc.remove_from_stats()
exc.add_to_stats("graph_break")
self.restore_graphstate(state)
self.output.compile_subgraph(self)
self.popn(push - dis.stack_effect(inst.opcode, inst.arg))
for _ in range(push):
self.push(UnknownVariable())
resume_call_insts = self.create_call_resume_at(self.next_instruction)
# Check if there is a block stack entry with GradModeVariable. And
# wrap the instruction causing the graph break inside a try..finally
# block. See more details at
# https://github.com/pytorch/torchdynamo/issues/207
cleanup = []
if len(self.block_stack) == 1 and isinstance(
self.block_stack[0].with_context, GradModeVariable
):
ctx_variable = self.block_stack[0].with_context
cg = PyCodegen(self)
setup_finally, cleanup = ctx_variable.reconstruct(
cg, resume_call_insts[0]
)
self.output.add_output_instructions(setup_finally)
self.output.add_output_instructions([inst])
# Add the cleanup instructions from try..finally block
self.output.add_output_instructions(cleanup)
self.output.add_output_instructions(
resume_call_insts,
)
return wrapper
return decorator
class InstructionTranslatorBase(object):
def cell_and_freevars(self):
if not hasattr(self, "_cell_and_freevars"):
self._cell_and_freevars = tuple(
self.code_options["co_cellvars"] or []
) + tuple(self.code_options["co_freevars"] or [])
return self._cell_and_freevars
def prune_dead_locals(self):
reads = livevars_analysis(self.instructions, self.current_instruction)
# implicit use by super()
# reads = reads | {"__class__"}
# output variables?
reads = reads | set(self.cell_and_freevars())
self.symbolic_locals = collections.OrderedDict(
[(k, v) for k, v in self.symbolic_locals.items() if k in reads]
)
self.output.side_effects.prune_dead_object_new(self)
def call_function(
self,
fn: VariableTracker,
args: List[VariableTracker],
kwargs: Dict[str, VariableTracker],
):
assert isinstance(fn, VariableTracker)
assert isinstance(args, list)
assert isinstance(kwargs, dict)
assert all(
isinstance(x, VariableTracker)
for x in itertools.chain(args, kwargs.values())
)
self.push(fn.call_function(self, args, kwargs))
def update_locals_and_stack(self, oldvar: VariableTracker, newvar: VariableTracker):
def repl(v: VariableTracker):
if v.mutable_local is oldvar.mutable_local:
return newvar
return v
cache = dict()
self.output.side_effects.apply(repl, cache)
self.stack = [VariableTracker.apply(repl, x, cache) for x in self.stack]
for k, x in self.symbolic_locals.items():
self.symbolic_locals[k] = VariableTracker.apply(repl, x, cache)
def replace_all(self, oldvar: VariableTracker, newvar: VariableTracker):
if isinstance(
oldvar.mutable_local, torchdynamo.side_effects.MutableSideEffects
):
newvar = self.output.side_effects.mutation(oldvar, newvar)
else:
assert isinstance(
oldvar.mutable_local, torchdynamo.variables.base.MutableLocal
)
newvar = newvar.clone(
mutable_local=torchdynamo.variables.base.MutableLocal()
)
self.update_locals_and_stack(oldvar, newvar)
return newvar
def inline_user_function_return(self, fn, args, kwargs):
"""
A call to some user defined function by inlining it.
"""
state = self.copy_graphstate()
try:
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
self.output.guards.update(fn.guards)
return result
except Exception:
self.restore_graphstate(state)
raise
def step(self):
"""Process exactly one instruction, return False we should exit"""
inst = self.instructions[self.instruction_pointer]
self.current_instruction = inst
self.instruction_pointer += 1
if self.instruction_pointer < len(self.instructions):
self.next_instruction = self.instructions[self.instruction_pointer]
else:
self.instruction_pointer = None
self.next_instruction = None
if inst.starts_line:
self.lineno = inst.starts_line
if len(self.stack) == 0 and self.should_compile_partial_graph():
self.checkpoint = inst, self.copy_graphstate()
if config.trace:
print("TRACE", inst.opname, inst.argval, self.stack)
try:
if not hasattr(self, inst.opname):
unimplemented(f"missing: {inst.opname}")
getattr(self, inst.opname)(inst)
return inst.opname != "RETURN_VALUE"
except Unsupported as exc:
exc.real_stack.append(self.frame_summary())
if not self.checkpoint:
raise
# generate code from checkpoint
assert not self.output.output_instructions
continue_inst, state = self.checkpoint
self.restore_graphstate(state)
self.output.compile_subgraph(self, partial_convert=True)
self.output.add_output_instructions(
[create_instruction("JUMP_ABSOLUTE", target=continue_inst)]
+ self.instructions
)
def run(self):
try:
while (
self.instruction_pointer is not None
and not self.output.should_exit
and self.step()
):
pass
except (
exc.Unsupported,
exc.RestartAnalysis,
exc.TorchRuntimeError,
exc.SkipFrame,
):
raise
except Exception as e:
sys.stderr.write(
f"ERROR FROM offset={self.current_instruction.offset} "
f"filename {self.code_options.get('co_filename')} "
f"{self.lineno} {typestr(e)}\n"
)
raise
finally:
# Cleanup the outputGraph to delete the held tensors. We perform the
# cleanup only for InstructionTranslator and not
# InliningInstructionTranslator. The InliningInstructionTranslator
# mutates the output object and is restored to original state if
# there was an exception.
if isinstance(self, InstructionTranslator):
self.output.cleanup()
def push(self, val):
assert val is None or isinstance(
val, VariableTracker
), f"push expects VariableTracker, got {typestr(val)}"
self.stack.append(val)
def push_many(self, vals: List[TensorVariable]):
for val in vals:
self.push(val)
def pop(self) -> TensorVariable:
return self.stack.pop()
def popn(self, n: int) -> List[TensorVariable]:
assert n >= 0
return list(reversed([self.pop() for _ in range(n)]))
def LOAD_FAST(self, inst):
name = inst.argval
if name.startswith(".") and name not in self.symbolic_locals:
# This happens in dict/list comprehensions
name = name.replace(".", "implicit")
assert name not in self.cell_and_freevars()
if name not in self.symbolic_locals:
unimplemented("undefined LOAD_FAST")
self.push(self.symbolic_locals[name])
if name.startswith("___stack"):
self.symbolic_locals.pop(name)
def LOAD_DEREF(self, inst):
assert inst.argval in self.cell_and_freevars()
if inst.argval not in self.symbolic_locals:
unimplemented(f"undefined LOAD_DEREF {inst.argval}")
self.push(self.symbolic_locals[inst.argval])
def STORE_FAST(self, inst):
self.symbolic_locals[inst.argval] = self.pop()
def DELETE_FAST(self, inst):
del self.symbolic_locals[inst.argval]
STORE_DEREF = STORE_FAST
def LOAD_CLOSURE(self, inst):
self.push(ClosureVariable(name=inst.argval))
def LOAD_CONST(self, inst):
self.push(ConstantVariable(value=inst.argval))
def get_global_source(self, name):
if self.output.root_globals is self.f_globals:
source = GlobalSource(name)
else:
if "__name__" in self.f_globals:
source = AttrSource(
self.import_source(self.f_globals["__name__"]), name
)
else:
mangled_name = f"___unnamed_scope_{id(self.f_globals)}"
if mangled_name not in self.output.root_globals:
self.output.install_global(mangled_name, self.f_globals)
source = GetItemSource(GlobalSource(mangled_name), name)
return source
def LOAD_GLOBAL(self, inst):
name = inst.argval
if name in self.symbolic_globals:
variable = self.output.side_effects[self.symbolic_globals[name]]
self.push(self.output.side_effects.load_global(variable, name))
return
try:
value = self.f_globals[name]
except KeyError:
return self.load_builtin(inst)
source = self.get_global_source(name)
self.push(VariableBuilder(self, source)(value))
def STORE_GLOBAL(self, inst):
value = self.pop()
name = inst.argval
source = self.get_global_source(name)
if name not in self.symbolic_globals:
self.symbolic_globals[name] = object() # sentinel object
variable = self.output.side_effects.track_global_existing(
source, self.symbolic_globals[name]
)
self.output.side_effects.store_global(variable, name, value)
def import_source(self, module_name):
"""Create an alias to a module for use in guards"""
value = importlib.import_module(module_name)
alias = f"__import_{module_name.replace('.', '_dot_')}"
f_globals = self.output.root_globals
assert alias not in f_globals or f_globals[alias] is value
f_globals[alias] = value
self.output.update_co_names(alias)
return GlobalSource(alias)
def IMPORT_NAME(self, inst):
level, fromlist = self.popn(2)
if level.as_python_constant() != 0:
unimplemented("IMPORT_NAME with level")
# Import name imports the top level package
module_name = inst.argval.split(".")[0]
value = importlib.import_module(module_name)
source = self.import_source(module_name)
if is_allowed(value):
self.push(TorchVariable(value, source=source))
elif istype(value, types.ModuleType):
self.push(PythonModuleVariable(value, source=source))
else:
unimplemented(f"IMPORT_NAME {typestr(value)}")
def IMPORT_FROM(self, inst):
self.DUP_TOP(inst)
self.LOAD_ATTR(inst)
def load_builtin(self, inst):
assert inst.argval in self.f_builtins
val = self.f_builtins[inst.argval]
assert is_builtin(val)
self.push(VariableBuilder(self, GlobalSource(inst.argval))(val))
def jump(self, inst):
self.instruction_pointer = self.indexof[id(inst.target)]
JUMP_FORWARD = jump
JUMP_ABSOLUTE = jump
POP_JUMP_IF_FALSE = generic_jump(operator.not_, False)
POP_JUMP_IF_TRUE = generic_jump(operator.truth, False)
JUMP_IF_FALSE_OR_POP = generic_jump(operator.not_, True)
JUMP_IF_TRUE_OR_POP = generic_jump(operator.truth, True)
def SETUP_LOOP(self, inst):
# only exists in python<=3.7
self.block_stack.append(BlockStackEntry(inst.target))
def SETUP_EXCEPT(self, inst):
# only exists in python<=3.7
self.block_stack.append(BlockStackEntry(inst.target))
def POP_BLOCK(self, inst):
self.block_stack.pop()
def SETUP_WITH(self, inst):
ctx = self.pop()
if not isinstance(ctx, ContextManagerVariable):
unimplemented(f"SETUP_WITH {ctx}")
self.output.guards.update(ctx.guards)
if isinstance(self, InstructionTranslator):
self.block_stack.append(BlockStackEntry(inst.target, len(self.stack), ctx))
else:
# can't restore this while inlining
self.block_stack.append(BlockStackEntry(inst.target))
self.push(
WithExitFunctionVariable(
ctx,
inst.target,
**VariableTracker.propagate(ctx),
)
)
self.push(ctx.enter(self))
def SETUP_FINALLY(self, inst):
self.block_stack.append(BlockStackEntry(inst.target))
def BEGIN_FINALLY(self, inst):
self.push(None)
def WITH_CLEANUP_START(self, inst):
exit, exc = self.popn(2)
if sys.version_info < (3, 8):
assert exc.is_python_constant()
assert exc.as_python_constant() is None
else:
assert exc is None
self.push(exc)
self.push(exit.call_function(self, [ConstantVariable(None)] * 3, {}))
def WITH_CLEANUP_FINISH(self, inst):
self.popn(2)
self.push(None)
def END_FINALLY(self, inst):
assert self.pop() is None
def FOR_ITER(self, inst):
it = self.pop()
if isinstance(it, ListIteratorVariable):
self.output.guards.update(it.guards)
try:
val, next_iter = it.next_variables()
self.replace_all(it, next_iter)
self.push(next_iter)
self.push(val)
except StopIteration:
self.jump(inst)
else:
unimplemented(f"FOR_ITER {typestr(it)}")
def COMPARE_OP(self, inst):
left, right = self.popn(2)
options = VariableTracker.propagate([left, right])
op = inst.argval
supported_is_const = {
"is": operator.is_,
"is not": operator.is_not,
"==": operator.eq,
"!=": operator.ne,
}
supported_tensors = {
">": operator.gt,
"<": operator.lt,
">=": operator.ge,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
}
supported_any = dict(
itertools.chain(supported_tensors.items(), supported_is_const.items())
)
if (
isinstance(
left,
(
TensorVariable,
NNModuleVariable,
BaseListVariable,
UserDefinedVariable,
BaseUserFunctionVariable,
ConstDictVariable,
),
)
and isinstance(right, ConstantVariable)
and right.value is None
and op in supported_is_const
):
# <non-None> is None
self.push(
ConstantVariable(
supported_is_const[op](object(), right.value), **options
)
)
elif (
isinstance(left, TensorVariable) or isinstance(right, TensorVariable)
) and op in supported_tensors:
self.push(
TensorVariable.create(
self,
supported_tensors[op](left.as_proxy(), right.as_proxy()),
**options,
)
)
elif (
left.is_python_constant()
and right.is_python_constant()
and op in supported_any
):
# constant fold
self.push(
ConstantVariable(
supported_any[op](
left.as_python_constant(), right.as_python_constant()
),
**options,
)
)
elif op in ("in", "not in"):
self.push(right.call_method(self, "__contains__", [left], {}))
if op == "not in":
self.UNARY_NOT(inst)
else:
unimplemented(f"COMPARE_OP {typestr(left)} {op} {typestr(right)}")
def GET_ITER(self, inst):
self.call_function(BuiltinVariable(iter), [self.pop()], {})
@break_graph_if_unsupported(push=1)
def CALL_FUNCTION(self, inst):
args = self.popn(inst.argval)
fn = self.pop()
self.call_function(fn, args, {})
@break_graph_if_unsupported(push=1)
def CALL_FUNCTION_EX(self, inst):
if inst.argval == 0:
kwargsvars = ConstDictVariable({}, dict)
argsvars = self.pop()
elif inst.argval == 1:
kwargsvars = self.pop()
argsvars = self.pop()
else:
unimplemented("CALL_FUNCTION_EX")
fn = self.pop()
self.output.guards.update(argsvars.guards)
self.output.guards.update(kwargsvars.guards)
if (
isinstance(fn, GetAttrVariable)
and isinstance(fn.obj, TensorVariable)
and fn.name == "view"
and isinstance(argsvars, (ConstantVariable, TensorVariable))
):
# Hack to handle special case in some bert models. Converts
# x.view(*shape) into x.view(shape), which is correct for view()
# but not generally. See test_transpose_for_scores().
argsvars = TupleVariable([argsvars])
if not isinstance(
argsvars, BaseListVariable
) and argsvars.has_unpack_var_sequence(self):
argsvars = TupleVariable(argsvars.unpack_var_sequence(self))
if not isinstance(argsvars, BaseListVariable) or not isinstance(
kwargsvars, ConstDictVariable
):
unimplemented(f"non-static call {typestr(argsvars)} {typestr(kwargsvars)}")
self.call_function(fn, argsvars.items, kwargsvars.items)
@break_graph_if_unsupported(push=1)
def CALL_FUNCTION_KW(self, inst):
argnames = self.pop()
args = self.popn(inst.argval)
fn = self.pop()
assert isinstance(argnames, ConstantVariable)
argnames = argnames.value
args, kwargs = args[: -len(argnames)], args[-len(argnames) :]
kwargs = dict(zip(argnames, kwargs))
assert len(kwargs) == len(argnames)
self.call_function(fn, args, kwargs)
def LOAD_METHOD(self, inst):
self.LOAD_ATTR(inst)
self.push(self.pop())
self.push(None)
def CALL_METHOD(self, inst):
args = self.popn(inst.argval)
dummy = self.pop()
assert dummy is None
fn = self.pop()
self.call_function(fn, args, {})
def LOAD_ATTR(self, inst):
obj = self.pop()
result = BuiltinVariable(getattr).call_function(
self, [obj, ConstantVariable(inst.argval)], {}
)
self.push(result)
def STORE_ATTR(self, inst):
prior = self.copy_graphstate()
val, obj = self.popn(2)
try:
self.output.guards.update(
BuiltinVariable(setattr)
.call_function(self, [obj, ConstantVariable(inst.argval), val], {})
.guards
)
return
except Unsupported as e:
if not self.should_compile_partial_graph():
raise
e.remove_from_stats()
e.add_to_stats("graph_break")
self.restore_graphstate(prior)
# break the graph
self.output.compile_subgraph(self)
self.output.add_output_instructions([inst])
self.popn(2)
self.output.add_output_instructions(
self.create_call_resume_at(self.next_instruction)
)
@break_graph_if_unsupported(push=0)
def STORE_SUBSCR(self, inst):
val, obj, key = self.popn(3)
result = obj.call_method(self, "__setitem__", [key, val], {})
# no result is pushed, so need to lift the guards to global
self.output.guards.update(result.guards)
def BUILD_TUPLE(self, inst):
items = self.popn(inst.argval)
options = VariableTracker.propagate(items)
self.push(TupleVariable(items, **options))
def BUILD_SLICE(self, inst):
items = self.popn(inst.argval)
options = VariableTracker.propagate(items)
self.push(SliceVariable(items, **options))
def BUILD_LIST(self, inst):
items = self.popn(inst.argval)
options = VariableTracker.propagate(items)
self.push(ListVariable(items, mutable_local=MutableLocal(), **options))
def BUILD_LIST_UNPACK(self, inst, cls=ListVariable):
seqs = self.popn(inst.argval)
options = VariableTracker.propagate(seqs)
items = list()
for seq in seqs:
try:
items.extend(seq.unpack_var_sequence(self))
except NotImplementedError:
unimplemented(f"BUILD_LIST_UNPACK {seq}")
self.push(cls(items, mutable_local=MutableLocal(), **options))
def BUILD_TUPLE_UNPACK(self, inst):
self.BUILD_LIST_UNPACK(inst, cls=TupleVariable)
BUILD_TUPLE_UNPACK_WITH_CALL = BUILD_TUPLE_UNPACK
def BUILD_MAP(self, inst):
items = self.popn(inst.argval * 2)
options = VariableTracker.propagate(items)
result = dict()
for k, v in zip(items[::2], items[1::2]):
assert isinstance(k, ConstantVariable)
result[k.value] = v
assert len(result) == len(items) / 2
self.push(
ConstDictVariable(result, dict, mutable_local=MutableLocal(), **options)
)
def BUILD_CONST_KEY_MAP(self, inst):
keys = self.pop()
values = self.popn(inst.argval)
options = VariableTracker.propagate([keys] + values)
assert isinstance(keys, ConstantVariable)
keys = keys.value
assert istype(keys, tuple)
assert len(keys) == len(values)
self.push(
ConstDictVariable(
dict(zip(keys, values)),
dict,
mutable_local=MutableLocal(),
**options,
)
)
def MAP_ADD(self, inst):
if sys.version_info < (3, 8):
v, k = self.popn(2)
else:
k, v = self.popn(2)
assert inst.argval > 0
obj = self.stack[-inst.arg]
assert isinstance(obj, ConstDictVariable)
assert obj.mutable_local
items = dict(obj.items)
items[k.as_python_constant()] = v
self.replace_all(
obj,
ConstDictVariable(
items,
obj.user_cls,
**VariableTracker.propagate([obj, k, v]),
),
)
def LIST_APPEND(self, inst):
v = self.pop()
assert inst.argval > 0
obj = self.stack[-inst.arg]
assert isinstance(obj, ListVariable)
assert obj.mutable_local
self.replace_all(
obj,
ListVariable(
obj.items + [v],
**VariableTracker.propagate([obj, v]),
),
)
def MAKE_FUNCTION(self, inst):
flags = inst.arg
old_stack = list(self.stack)
fn_name = self.pop()
code = self.pop()
defaults = None
closure = None
annotations = None
kwdefaults = None
if flags & 0x08:
closure = self.pop()
if flags & 0x04:
annotations = self.pop()
if flags & 0x02:
kwdefaults = self.pop()
if flags & 0x01:
defaults = self.pop()
options = VariableTracker.propagate(old_stack[len(self.stack) :])
self.push(
NestedUserFunctionVariable(
fn_name,
code,
self.f_globals,
defaults,
kwdefaults,
annotations,
closure,
closure_scope=self,
**options,
)
)
def UNPACK_SEQUENCE(self, inst):
# TODO(jansel): rewrite this using unpack_var_sequence
seq = self.pop()
options = VariableTracker.propagate([seq])
if isinstance(seq, BaseListVariable):
assert len(seq.items) == inst.argval
self.output.guards.update(seq.guards)
for i in reversed(seq.items):
self.push(i)
elif seq.is_python_constant() and isinstance(seq, ConstantVariable):
val = seq.as_python_constant()
assert len(val) == inst.argval
for i in reversed(val):
self.push(ConstantVariable(i, **options))
elif isinstance(seq, TensorVariable):
proxy = seq.as_proxy()
for i in reversed(range(inst.argval)):
self.push(TensorVariable.create(self, proxy[i], **options))
elif isinstance(seq, GetAttrVariable) and isinstance(seq.obj, TensorVariable):
# x, y = a.shape
proxy = getattr(seq.obj.as_proxy(), seq.name)
for i in reversed(range(inst.argval)):
self.push(TensorVariable.create(self, proxy[i], **options))
else:
unimplemented(f"UNPACK_SEQUENCE {seq}")
def UNPACK_EX(self, inst):
assert 0 <= inst.argval <= 0xFFFF
prefix = inst.argval & 0xFF # low byte
suffix = inst.argval >> 8 # high byte
seq = self.pop()
options = VariableTracker.propagate(seq)
if seq.has_unpack_var_sequence(self):
vals = list(seq.unpack_var_sequence(self))
assert len(vals) >= prefix + suffix
vals_prefix = vals[:prefix]
vals_list = vals[prefix : len(vals) - suffix]
vals_suffix = vals[len(vals) - suffix :]
for item in reversed(vals_suffix):
self.push(item.add_options(options))
self.push(TupleVariable(vals_list, **options))
for item in reversed(vals_prefix):
self.push(item.add_options(options))
else:
unimplemented(f"UNPACK_EX {seq}")
def NOP(self, inst):
pass
def POP_TOP(self, inst):
self.pop()
def ROT_TWO(self, inst):
a = self.pop()
b = self.pop()
self.push(a)
self.push(b)
def ROT_THREE(self, inst):
a = self.pop()
b = self.pop()
c = self.pop()
self.push(a)
self.push(c)
self.push(b)
def ROT_FOUR(self, inst):
a = self.pop()
b = self.pop()
c = self.pop()
d = self.pop()
self.push(a)
self.push(d)
self.push(c)
self.push(b)
def DUP_TOP(self, inst):
a = self.pop()
self.push(a)
self.push(a)
def DUP_TOP_TWO(self, inst):
a = self.pop()
b = self.pop()
self.push(b)
self.push(a)
self.push(b)
self.push(a)
def FORMAT_VALUE(self, inst):
flags = inst.arg
if (flags & 0x04) == 0x04:
fmt_spec = self.pop()
else:
fmt_spec = ConstantVariable("")
value = self.pop()
if (flags & 0x03) == 0x01:
value = BuiltinVariable(str).call_function(self, [value], {})
elif (flags & 0x03) == 0x02:
value = BuiltinVariable(repr).call_function(self, [value], {})
elif (flags & 0x03) == 0x03:
value = BuiltinVariable(ascii).call_function(self, [value], {})
fmt_var = ConstantVariable(
"{:" + fmt_spec.as_python_constant() + "}"
).add_options(fmt_spec)
self.call_function(BuiltinVariable(str.format), [fmt_var, value], {})
def BUILD_STRING(self, inst):
result = ""
for _ in range(inst.arg):
str_var = self.pop()
assert isinstance(str_var, ConstantVariable)
result = str_var.value + result
self.push(ConstantVariable(value=result))
def IS_OP(self, inst):
assert inst.argval == 0 or inst.argval == 1
if inst.argval == 0:
new_argval = "is"
else:
new_argval = "is not"
new_inst = create_instruction("COMPARE_OP", argval=new_argval)
self.COMPARE_OP(new_inst)
def CONTAINS_OP(self, inst):
assert inst.argval == 0 or inst.argval == 1
left, right = self.popn(2)
op = inst.argval
self.push(right.call_method(self, "__contains__", [left], {}))
if op == 1:
self.UNARY_NOT(inst)
def LIST_EXTEND(self, inst):
v = self.pop()
assert inst.argval > 0
obj = self.stack[-inst.arg]
assert isinstance(obj, ListVariable)
assert obj.mutable_local
obj.call_method(self, "extend", [v], {})
def LIST_TO_TUPLE(self, inst):
self.push(BuiltinVariable(tuple).call_function(self, [self.pop()], {}))
def DICT_MERGE(self, inst):
v = self.pop()
assert inst.argval > 0
obj = self.stack[-inst.arg]
assert isinstance(obj, ConstDictVariable)
assert obj.mutable_local
obj.call_method(self, "update", [v], {})
def GEN_START(self, inst):
self.pop()
def GET_LEN(self, inst):
tos = self.stack[-1]
if tos.is_python_constant():
self.push(ConstantVariable(len(tos.as_python_constant())))
else:
self.push(tos.call_method(self, "__len__", [], {}))
def MATCH_MAPPING(self, inst):
tos = self.stack[-1]
assert isinstance(tos, ConstDictVariable)
if isinstance(tos.items, collections.abc.Mapping):
self.push(ConstantVariable(True))
else:
self.push(ConstantVariable(False))
def MATCH_SEQUENCE(self, inst):
tos = self.stack[-1]
assert tos.is_python_constant()
tos_value = tos.as_python_constant()
if isinstance(tos_value, collections.abc.Sequence) and not isinstance(
tos_value, (str, bytes, bytearray)
):
self.push(ConstantVariable(True))
else:
self.push(ConstantVariable(False))
def MATCH_KEYS(self, inst):
tos = self.stack[-1]
assert tos.is_python_constant()
keys = tos.as_python_constant()
tos1 = self.stack[-2]
assert isinstance(tos1, ConstDictVariable)
match_obj = tos1.items
if all(key in match_obj for key in keys):
self.push(TupleVariable(list(match_obj[key] for key in keys)))
self.push(ConstantVariable(True))
else:
self.push(ConstantVariable(None))
self.push(ConstantVariable(False))
UNARY_POSITIVE = stack_op(operator.pos)
UNARY_NEGATIVE = stack_op(operator.neg)
UNARY_NOT = stack_op(operator.not_)
UNARY_INVERT = stack_op(operator.invert)
BINARY_POWER = stack_op(operator.pow)
BINARY_MULTIPLY = stack_op(operator.mul)
BINARY_MATRIX_MULTIPLY = stack_op(operator.matmul)
BINARY_FLOOR_DIVIDE = stack_op(operator.floordiv)
BINARY_TRUE_DIVIDE = stack_op(operator.truediv)
BINARY_MODULO = stack_op(operator.mod)
BINARY_ADD = stack_op(operator.add)
BINARY_SUBTRACT = stack_op(operator.sub)
BINARY_SUBSCR = break_graph_if_unsupported(push=1)(stack_op(operator.getitem))
BINARY_LSHIFT = stack_op(operator.lshift)
BINARY_RSHIFT = stack_op(operator.rshift)
BINARY_AND = stack_op(operator.and_)
BINARY_OR = stack_op(operator.or_)
BINARY_XOR = stack_op(operator.xor)
INPLACE_POWER = stack_op(operator.ipow)
INPLACE_MULTIPLY = stack_op(operator.imul)
INPLACE_MATRIX_MULTIPLY = stack_op(operator.imatmul)
INPLACE_FLOOR_DIVIDE = stack_op(operator.ifloordiv)
INPLACE_TRUE_DIVIDE = stack_op(operator.itruediv)
INPLACE_MODULO = stack_op(operator.imod)
INPLACE_ADD = stack_op(operator.iadd)
INPLACE_SUBTRACT = stack_op(operator.isub)
INPLACE_LSHIFT = stack_op(operator.ilshift)
INPLACE_RSHIFT = stack_op(operator.irshift)
INPLACE_AND = stack_op(operator.iand)
INPLACE_XOR = stack_op(operator.ixor)
INPLACE_OR = stack_op(operator.ior)
def copy_graphstate(self):
"""Create a checkpoint of the current state by copying everything"""
return (
self.output.copy_graphstate(),
collections.OrderedDict(self.symbolic_locals),
list(self.stack),
list(self.block_stack),
self.instruction_pointer,
self.current_instruction,
self.next_instruction,
self.lineno,
)
def restore_graphstate(self, state):
"""Restore a checkpoint created by self.copy_graphstate()"""
(
output_state,
self.symbolic_locals,
self.stack,
self.block_stack,
self.instruction_pointer,
self.current_instruction,
self.next_instruction,
self.lineno,
) = state
self.output.restore_graphstate(output_state)
def frame_summary(self):
return traceback.FrameSummary(
getattr(self.f_code, "co_filename", "<unknown>"),
self.lineno,
getattr(self.f_code, "co_name", "<unknown>"),
lookup_line=False,
)
def __init__(
self,
output: OutputGraph,
instructions: List[Instruction],
f_globals: Dict[str, Any],
f_builtins: Dict[str, Any],
code_options: Dict[str, Any],
symbolic_locals: Dict[str, VariableTracker],
symbolic_globals: Dict[str, VariableTracker],
f_code: types.CodeType,
):
super(InstructionTranslatorBase, self).__init__()
# Mutable state checkpointed by copy_graphstate()
self.output: OutputGraph = output
self.symbolic_locals: Dict[str, VariableTracker] = symbolic_locals
self.symbolic_globals: Dict[str, VariableTracker] = symbolic_globals
self.stack: List[VariableTracker] = []
self.instruction_pointer: int = 0
self.current_instruction: Instruction = create_instruction("NOP")
self.next_instruction: typing.Optional[Instruction] = None
self.block_stack: List[BlockStackEntry] = []
self.lineno: int = code_options.get("co_firstlineno")
# Properties of the input/output code
self.instructions: List[Instruction] = instructions
self.indexof: Dict[int, int] = {id(i): n for n, i in enumerate(instructions)}
self.f_globals: Dict[str, Any] = f_globals
self.f_builtins: Dict[str, Any] = f_builtins
self.code_options: Dict[str, Any] = code_options
self.f_code: types.CodeType = f_code
self.checkpoint = None
if sys.version_info >= (3, 10):
from .resume_execution import CO_ASYNC_GENERATOR
from .resume_execution import CO_COROUTINE
from .resume_execution import CO_GENERATOR
from .resume_execution import CO_ITERABLE_COROUTINE
if f_code.co_flags & (
CO_GENERATOR | CO_COROUTINE | CO_ITERABLE_COROUTINE | CO_ASYNC_GENERATOR
):
self.push(BuiltinVariable(None))
class InstructionTranslator(InstructionTranslatorBase):
def __init__(
self,
instructions: List[Instruction],
f_code,
f_locals,
f_globals,
f_builtins,
code_options,
compiler_fn,
one_graph,
):
super(InstructionTranslator, self).__init__(
output=OutputGraph(f_globals, code_options, compiler_fn, self),
instructions=instructions,
f_globals=f_globals,
f_builtins=f_builtins,
code_options=code_options,
symbolic_locals=collections.OrderedDict(), # set below
# A global var is inserted only after a STORE_GLOBAL happens to it
symbolic_globals=collections.OrderedDict(),
f_code=f_code,
)
self.one_graph: bool = one_graph
vars = list(code_options["co_varnames"])
vars.extend(x for x in self.cell_and_freevars() if x not in vars)
self.symbolic_locals = collections.OrderedDict(
(k, VariableBuilder(self, LocalSource(k))(f_locals[k]))
for k in vars
if k in f_locals
)
# symbolic_locals contains the mapping from original f_locals to the
# Variable objects. During the Variable building phase, each object also
# has its associated guards. At the end, we will accumulate these
# guards.
#
# One way of handling these guards is to just accumulate all of them
# right now. However, many f_locals might not be used in the frame and
# thus can unnecessarily increase guard execution overhead. Therefore,
# we selectively update output.guards as we run the Python Bytecode
# instruction by instruction.
#
# An exception here is list/dict variables. Guards related to these
# variables have indexed access, like Tensor_match on args[0], and if
# args is not used in this frame, we will miss a LIST_LENGTH check like
# len(args) == 2. Missing the LIST_LENGTH check causes problem for the
# next invocation when args is not a list, and args[0] is a runtime
# error. Therefore, we recursively add guards for list/dict variable here.
for val in self.symbolic_locals.values():
if isinstance(
val, (ListIteratorVariable, BaseListVariable, ConstDictVariable)
):
local_guards = VariableTracker.propagate(val)["guards"]
index_guards = [
guard
for guard in local_guards
if guard.create_fn
in (
GuardBuilder.LIST_LENGTH,
GuardBuilder.DICT_KEYS,
GuardBuilder.ODICT_KEYS,
)
]
self.output.guards.update(index_guards)
self._freevars_ids = dict()
for name in self.code_options["co_freevars"]:
if name in f_locals:
self._freevars_ids[name] = id(f_locals[name])
def match_nested_cell(self, name, cell):
"""Match a cell in this method to one in a function we are inlining"""
value = cell.cell_contents
# TODO(jansel): check the id of the cell rather than the contents
if id(value) != self._freevars_ids.get(name):
return None
return self.symbolic_locals[name]
def should_compile_partial_graph(self):
return all(b.can_restore() for b in self.block_stack) and not self.one_graph
def create_call_resume_at(self, inst):
self.instruction_pointer = None
if inst.opname == "RETURN_VALUE":
return [create_instruction("RETURN_VALUE")]
reads = livevars_analysis(self.instructions, inst)
argnames = tuple(
k
for k in self.symbolic_locals.keys()
if k in reads and k not in self.cell_and_freevars()
)
nargs = len(self.stack) + len(argnames)
name = unique_id(f"__resume_at_{inst.offset}")
new_code: types.CodeType = ContinueExecutionCache.lookup(
self.f_code,
inst.offset,
len(self.stack),
argnames,
tuple(b.resume_fn() for b in self.block_stack),
)
cg = PyCodegen(self)
if new_code.co_freevars:
cg.make_function_with_closure(name, new_code, len(self.stack))
else:
self.output.install_global(
name, types.FunctionType(new_code, self.f_globals, name)
)
cg.extend_output(cg.load_function_name(name, len(self.stack)))
cg.extend_output([cg.create_load(k) for k in argnames])
cg.extend_output(
[
create_instruction("CALL_FUNCTION", nargs),
create_instruction("RETURN_VALUE"),
]
)
return cg.get_instructions()
def RETURN_VALUE(self, inst):
if self.output.count_calls() == 0:
raise exc.SkipFrame()
self.instruction_pointer = None
self.output.compile_subgraph(self)
self.output.add_output_instructions([create_instruction("RETURN_VALUE")])
class InliningInstructionTranslator(InstructionTranslatorBase):
"""Trace and inline a called method"""
@classmethod
def inline_call(cls, parent, func, args, kwargs):
with patch.dict(counters, {"unimplemented": counters["inline_call"]}):
return cls.inline_call_(parent, func, args, kwargs)
@staticmethod
def inline_call_(parent, func, args, kwargs):
assert isinstance(func, (UserFunctionVariable, NestedUserFunctionVariable))
if func.has_self():
unimplemented("inline with __self__")
if func.get_name() == "patched_init":
unimplemented("Patched init cannot be inlined.")
if skipfiles.check(
func.get_filename()
) and not skipfiles.is_torch_inline_allowed(func.get_filename()):
unimplemented(
f"inline in skipfiles: {func.get_name()} {func.get_filename()}"
)
try:
sub_locals, closure_cells = func.bind_args(parent, args, kwargs)
except TypeError as exc:
print(func.get_filename(), func.get_function(), args, kwargs, exc)
unimplemented("arg mismatch inlining")
for v in itertools.chain(sub_locals.values(), closure_cells.values()):
if not isinstance(v, VariableTracker):
unimplemented(f"unconverted arg {v}")
code: types.CodeType = func.get_code()
if code.co_name in ("__setitem__", "__setattr__"):
unimplemented(f"inline {code.co_name}")
if config.trace:
print("INLINING ", code)
dis.dis(code)
print()
if is_generator(code):
tracer = InliningGeneratorInstructionTranslator(
parent, code, sub_locals, parent.symbolic_globals, closure_cells, func
)
else:
tracer = InliningInstructionTranslator(
parent, code, sub_locals, parent.symbolic_globals, closure_cells, func
)
tracer.run()
assert tracer.symbolic_result is not None
func.export_freevars(parent, tracer)
if tracer.f_globals is parent.f_globals:
# Merge symbolic_globals back if parent and child are in the same namespace
parent.symbolic_globals.update(tracer.symbolic_globals)
if config.trace:
print("DONE INLINING", code)
if is_generator(code):
assert tracer.symbolic_result.as_python_constant() is None
return ListIteratorVariable(
tracer.generated_items,
mutable_local=MutableLocal(),
**VariableTracker.propagate(tracer.symbolic_result),
)
else:
return tracer.symbolic_result
def __init__(
self,
parent: InstructionTranslatorBase,
code: types.CodeType,
symbolic_locals: Dict[str, VariableTracker],
symbolic_globals: Dict[str, VariableTracker],
closure_cells: Dict[str, VariableTracker],
funcvar: BaseUserFunctionVariable,
):
f_globals = funcvar.get_globals()
f_builtins = f_globals["__builtins__"]
if not isinstance(f_builtins, dict):
f_builtins = f_builtins.__dict__
super(InliningInstructionTranslator, self).__init__(
output=parent.output,
f_globals=f_globals,
f_builtins=f_builtins,
symbolic_locals=symbolic_locals,
symbolic_globals=symbolic_globals,
instructions=cleaned_instructions(code),
code_options={k: getattr(code, k) for k in dir(code)},
f_code=code,
)
self.parent = parent
self.symbolic_result = None
self.closure_cells = closure_cells
def STORE_DEREF(self, inst):
if inst.argval in self.closure_cells:
cell = self.closure_cells[inst.argval]
val = self.pop()
if isinstance(cell, ClosureVariable):
self.output.root_tx.symbolic_locals[cell.name] = val
else:
self.output.side_effects.store_cell(cell, val)
else:
if isinstance(
self.symbolic_locals.get(inst.argval),
torchdynamo.variables.NewCellVariable,
):
self.output.side_effects.store_cell(
self.symbolic_locals[inst.argval], self.pop()
)
else:
unimplemented("write to __closure__ while inlining")
def LOAD_DEREF(self, inst):
if inst.argval in self.closure_cells:
cell = self.closure_cells[inst.argval]
if isinstance(cell, ClosureVariable):
self.push(self.output.root_tx.symbolic_locals[cell.name])
else:
self.push(self.output.side_effects.load_cell(cell))
else:
maybe_sym_local = self.symbolic_locals.get(inst.argval, None)
if isinstance(maybe_sym_local, torchdynamo.variables.NewCellVariable):
self.push(self.output.side_effects.load_cell(maybe_sym_local))
else:
super().LOAD_DEREF(inst)
def LOAD_CLOSURE(self, inst):
assert inst.argval in self.cell_and_freevars()
self.push(self.closure_cells[inst.argval])
def replace_all(self, oldvar: VariableTracker, newvar: VariableTracker):
newvar = super().replace_all(oldvar, newvar)
# recursively check and update parent's locals and stack in case oldvar is from parent
translator = self
while hasattr(translator, "parent"):
translator = translator.parent
translator.update_locals_and_stack(oldvar, newvar)
return newvar
def should_compile_partial_graph(self):
return False # inlining functions is all-or-nothing
def create_call_resume_at(self, offset):
unimplemented("cant resume while inlining")
def RETURN_VALUE(self, inst):
self.symbolic_result = self.pop()
self.instruction_pointer = None
class InliningGeneratorInstructionTranslator(InliningInstructionTranslator):
def __init__(self, *args, **kwargs):
super(InliningGeneratorInstructionTranslator, self).__init__(*args, **kwargs)
self.generated_items = []
def YIELD_VALUE(self, inst: Instruction):
self.generated_items.append(self.pop())
# TODO(jansel): figure out why this is needed, it isn't in the docs for YIELD_VALUE
self.push(ConstantVariable(None))
|
11475217
|
from ajax_datatable.views import AjaxDatatableView
from django.contrib.auth.models import Permission
class PermissionAjaxDatatableView(AjaxDatatableView):
model = Permission
title = 'Permissions'
initial_order = [["app_label", "asc"], ]
length_menu = [[10, 20, 50, 100, -1], [10, 20, 50, 100, 'all']]
search_values_separator = '+'
column_defs = [
AjaxDatatableView.render_row_tools_column_def(),
{'name': 'id', 'visible': False, },
{'name': 'codename', 'visible': True, },
{'name': 'name', 'visible': True, },
{'name': 'app_label', 'foreign_field': 'content_type__app_label', 'visible': True, },
{'name': 'model', 'foreign_field': 'content_type__model', 'visible': True, },
]
|
11475244
|
import heapq
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __eq__(self, other):
return self.val == other.val and self.next == other.next
def __repr__(self):
return '<Node {} {}>'.format(self.val, self.next)
class LinkedList(ListNode):
def __init__(self, arr):
nodes = [ListNode(v) for v in arr]
for i in range(1, len(nodes)):
nodes[i-1].next = nodes[i]
head = nodes[0]
self.val = head.val
self.next = head.next
class LinkedListHeap():
def __init__(self, lists):
self.heap = []
for list_head in lists:
heapq.heappush(self.heap, (list_head.val, list_head))
def pop_min(self):
"""Get and remove node with min value"""
try:
_, min_node = heapq.heappop(self.heap)
except IndexError:
return None
next_node = min_node.next
if next_node:
heapq.heappush(self.heap, (next_node.val, next_node))
return min_node
def merge_k_lists(lists):
heap = LinkedListHeap(lists)
head = ListNode('dummy')
tail = head
min_node = heap.pop_min()
while min_node:
min_node.next = None
tail.next = min_node
tail = tail.next
min_node = heap.pop_min()
return head.next
def test_merge_k_sorted_lists():
a = LinkedList([1, 10, 20])
b = LinkedList([4, 11, 13])
c = LinkedList([3, 8, 9])
lists = [a, b, c]
merged_list = merge_k_lists(lists)
assert merged_list == LinkedList([1, 3, 4, 8, 9, 10, 11, 13, 20])
|
11475252
|
import inspect
import warnings
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union, get_type_hints
from pydantic import BaseModel
from pydantic.fields import Undefined
from typing_extensions import TypedDict
from pait.field import BaseField, Depends
from pait.model.core import PaitCoreModel
from pait.util import FuncSig, create_pydantic_model, get_func_sig, get_parameter_list_from_class
class _IgnoreField(BaseField):
pass
class FieldSchemaRawTypeDict(TypedDict):
param_name: str
schema: dict
parent_schema: dict
annotation: Type
field: BaseField
class FieldSchemaTypeDict(TypedDict):
param_name: str
description: str
default: Any
type: str
other: dict
raw: FieldSchemaRawTypeDict
FieldDictType = Dict[Type[BaseField], List[FieldSchemaTypeDict]]
class PaitBaseParse(object):
def __init__(self, pait_dict: Dict[str, PaitCoreModel], undefined: Any = Undefined):
self._undefined: Any = undefined
self._group_list: List[str] = []
self._group_pait_dict: Dict[str, List[PaitCoreModel]] = {}
self._init(pait_dict)
self.content: str = ""
self._content_type: str = ""
def _init(self, pait_dict: Dict[str, PaitCoreModel]) -> None:
"""read from `pait_id_dict` and write PaitMd attributes"""
for pait_id, pait_model in pait_dict.items():
if not pait_model.operation_id:
continue
group: str = pait_model.group
if group not in self._group_pait_dict:
self._group_pait_dict[group] = [pait_model]
else:
self._group_pait_dict[group].append(pait_model)
self._group_list = sorted(self._group_pait_dict.keys())
def _parse_schema(
self, schema_dict: dict, definition_dict: Optional[dict] = None, parent_key: str = ""
) -> List[FieldSchemaTypeDict]:
"""gen pait field dict from pydantic basemodel schema"""
field_dict_list: List[FieldSchemaTypeDict] = []
# model property openapi dict
# e.g. : {'code': {'title': 'Code', 'description': 'api code', 'default': 1, 'type': 'integer'}}
property_dict: dict = schema_dict["properties"]
# class schema in the parent schema
if not definition_dict:
definition_dict = schema_dict.get("definitions", {})
for param_name, param_dict in property_dict.items():
if parent_key:
all_param_name: str = f"{parent_key}.{param_name}"
else:
all_param_name = param_name
if "$ref" in param_dict and definition_dict:
# ref support
key: str = param_dict["$ref"].split("/")[-1]
if isinstance(definition_dict, dict):
field_dict_list.extend(self._parse_schema(definition_dict[key], definition_dict, all_param_name))
elif "items" in param_dict and "$ref" in param_dict["items"]:
# mad item ref support
key = param_dict["items"]["$ref"].split("/")[-1]
if isinstance(definition_dict, dict):
field_dict_list.extend(self._parse_schema(definition_dict[key], definition_dict, all_param_name))
elif "allOf" in param_dict:
for item in param_dict["allOf"]:
key = item["$ref"].split("/")[-1]
if not isinstance(definition_dict, dict):
continue
if "enum" in definition_dict[key]:
if len(param_dict["allOf"]) > 1:
raise RuntimeError("Not support")
default: Any = definition_dict[key].get("enum", self._undefined)
if default is not self._undefined:
default = f'Only choose from: {",".join(["`" + i + "`" for i in default])}'
_type: str = "enum"
else:
if "enum" in param_dict:
# enum support
default = param_dict.get("enum", self._undefined)
if default is not self._undefined:
default = f'Only choose from: {",".join(["`" + i + "`" for i in default])}'
_type = "enum"
else:
default = param_dict.get("default", self._undefined)
_type = param_dict.get("type", "object")
field_dict_list.append(
{
"param_name": all_param_name,
"description": param_dict.get("description", ""),
"default": default,
"type": _type,
"other": {
key: value
for key, value in param_dict.items()
if key not in {"description", "title", "type", "default"}
},
"raw": {
"param_name": param_name,
"schema": param_dict,
"parent_schema": schema_dict,
# can not parse annotation and field
"annotation": str,
"field": _IgnoreField.i(),
},
}
)
return field_dict_list
def _parse_base_model_to_field_dict(
self,
field_dict: FieldDictType,
_pydantic_model: Type[BaseModel],
param_field_dict: Dict[str, BaseField],
) -> None:
"""
write field_dict from _pydantic_model or param_field_dict
:param field_dict:
e.g.
{
"Body": [
{
"param_name": "",
"description": "",
"default": "",
"type": _"",
"other": {"": ""},
"raw": {
"param_name": "",
"schema": {"": ""},
"parent_schema": {"": ""},
},
}
]
}
:param _pydantic_model: pydantic.basemodel
:param param_field_dict:
e.g.
{
'uid': Query(default=Ellipsis, description='user id', gt=10, lt=1000, extra={}),
'user_name': Query(default=Ellipsis, description='user name', min_length=2, max_length=4, extra={}),
'user_agent': Header(
default=Ellipsis, alias='user-agent', alias_priority=2, description='user agent', extra={}
),
'age': Body(default=Ellipsis, description='age', gt=1, lt=100, extra={})
}
:return:
"""
# TODO design like _parse_schema
param_name_alias_dict: Dict[str, str] = {
value.alias: key for key, value in param_field_dict.items() if isinstance(value, BaseField) and value.alias
}
property_dict: Dict[str, Any] = _pydantic_model.schema()["properties"]
for param_name, param_dict in property_dict.items():
param_python_name: str = param_name_alias_dict.get(param_name, param_name)
pait_field: BaseField = param_field_dict[param_python_name]
pait_field_class: Type[BaseField] = pait_field.__class__
if "$ref" in param_dict:
# ref support
key: str = param_dict["$ref"].split("/")[-1]
param_dict = _pydantic_model.schema()["definitions"][key]
elif "items" in param_dict and "$ref" in param_dict["items"]:
# mad item ref support
key = param_dict["items"]["$ref"].split("/")[-1]
param_dict = _pydantic_model.schema()["definitions"][key]
elif "allOf" in param_dict:
if len(param_dict["allOf"]) > 1:
warnings.warn(f"{param_dict['param_name']} only support 1 item")
param_dict.update(param_dict["allOf"][0])
key = param_dict["$ref"].split("/")[-1]
param_dict = _pydantic_model.schema()["definitions"][key]
if "enum" in param_dict:
# enum support
default: Any = param_dict.get("enum", self._undefined)
if default is not self._undefined:
default = f'Only choose from: {",".join(["`" + i + "`" for i in default])}'
_type: str = "enum"
description: str = param_field_dict[param_python_name].description
else:
default = param_dict.get("default", self._undefined)
_type = param_dict.get("type", self._undefined)
description = param_dict.get("description")
# NOTE: I do not know <pydandic.Filed(default=None)> can not found default value
if default is self._undefined and param_name not in _pydantic_model.schema().get("required", []):
default = param_field_dict[param_python_name].default
_field_dict: FieldSchemaTypeDict = {
"param_name": param_name,
"description": description,
"default": default,
"type": _type,
"other": {
key: value
for key, value in param_dict.items()
if key not in {"description", "title", "type", "default"}
},
"raw": {
"param_name": param_name,
"schema": param_dict,
"parent_schema": _pydantic_model.schema(),
"annotation": _pydantic_model.__annotations__[param_python_name],
"field": pait_field,
},
}
if pait_field_class not in field_dict:
field_dict[pait_field_class] = [_field_dict]
else:
field_dict[pait_field_class].append(_field_dict)
def parameter_list_handle(
self,
parameter_list: List["inspect.Parameter"],
field_dict: FieldDictType,
single_field_list: List[Tuple[str, "inspect.Parameter"]],
pait_model: PaitCoreModel,
) -> None:
"""parse parameter_list to field_dict and single_field_list"""
for parameter in parameter_list:
if parameter.default != parameter.empty:
annotation: type = parameter.annotation
pait_field: Union[BaseField, Depends] = parameter.default
if (
inspect.isclass(annotation)
and issubclass(annotation, BaseModel)
and not isinstance(pait_field, Depends)
):
# support def test(pait_model_route: BaseModel = Body())
# Adapt each property of pydantic.BaseModel to pait.field
# Convert Field classes of pydantic.
# Model properties to Field classes of genuine request types, such as: Body, Query, Header, etc.
param_filed_dict: Dict[str, BaseField] = {
_param_name: pait_field.from_pydantic_field(
annotation.__fields__[_param_name].field_info # type: ignore
)
for _param_name, _ in get_type_hints(annotation).items()
}
self._parse_base_model_to_field_dict(field_dict, annotation, param_filed_dict)
else:
# def test(pait_model_route: int = Body())
if isinstance(pait_field, Depends):
field_dict.update(self._parse_func_param_to_field_dict(pait_field.func, pait_model))
else:
field_name: str = pait_field.__class__.__name__.lower()
single_field_list.append((field_name, parameter))
# parse link
# TODO mv to gen tree
if not isinstance(pait_field, Depends) and pait_field.link:
pait_field.link.register(pait_model, parameter.name, pait_field)
elif issubclass(parameter.annotation, BaseModel):
# def test(pait_model_route: PaitBaseModel)
_pait_model: Type[BaseModel] = parameter.annotation
param_filed_dict = {
key: model_field.field_info
for key, model_field in _pait_model.__fields__.items()
if isinstance(model_field.field_info, BaseField)
}
self._parse_base_model_to_field_dict(field_dict, _pait_model, param_filed_dict)
def _parse_func_param_to_field_dict(self, func: Callable, pait_model: PaitCoreModel) -> FieldDictType:
"""gen filed dict from func
{
"Body": [
{
'field': {
'param_name': str,
'description': str,
'default': str,
'type': type,
'other': dict,
'raw': {
'param_name': str,
'schema': dict,
'parent_schema': pydantic base model.schema(),
'annotation': annotation,
'field': basefield,
}
}
}
]
}
"""
field_dict: FieldDictType = {}
func_sig: FuncSig = get_func_sig(func)
single_field_list: List[Tuple[str, "inspect.Parameter"]] = []
qualname = func.__qualname__.split(".<locals>", 1)[0].rsplit(".", 1)[0]
class_ = getattr(inspect.getmodule(func), qualname)
if inspect.isclass(class_):
parameter_list: List["inspect.Parameter"] = get_parameter_list_from_class(class_)
self.parameter_list_handle(parameter_list, field_dict, single_field_list, pait_model)
self.parameter_list_handle(func_sig.param_list, field_dict, single_field_list, pait_model)
if single_field_list:
annotation_dict: Dict[str, Tuple[Type, Any]] = {}
_pait_field_dict: Dict[str, BaseField] = {}
_column_name_set: Set[str] = set()
for field_name, parameter in single_field_list:
field: BaseField = parameter.default
key: str = field.alias or parameter.name
if key in _column_name_set:
# Since the same name cannot exist together in a Dict,
# it will be parsed directly when a Key exists
# fix
# class Demo(BaseModel):
# header_token: str = Header(alias="token")
# query_token: str = Query(alias="token")
_pydantic_model: Type[BaseModel] = create_pydantic_model(
{parameter.name: (parameter.annotation, field)}
)
self._parse_base_model_to_field_dict(field_dict, _pydantic_model, {parameter.name: field})
else:
_column_name_set.add(key)
annotation_dict[parameter.name] = (parameter.annotation, field)
_pait_field_dict[parameter.name] = field
_pydantic_model = create_pydantic_model(annotation_dict)
self._parse_base_model_to_field_dict(field_dict, _pydantic_model, _pait_field_dict)
return field_dict
def _parse_pait_model_to_field_dict(self, pait_model: PaitCoreModel) -> FieldDictType:
"""Extracting request and response information through routing functions"""
all_field_dict: FieldDictType = self._parse_func_param_to_field_dict(pait_model.func, pait_model)
for pre_depend in pait_model.pre_depend_list:
for field_class, field_dict_list in self._parse_func_param_to_field_dict(pre_depend, pait_model).items():
if field_class not in all_field_dict:
all_field_dict[field_class] = field_dict_list
else:
all_field_dict[field_class].extend(field_dict_list)
return all_field_dict
def output(self, filename: Optional[str], suffix: str = "") -> None:
if not suffix:
suffix = self._content_type
if not filename:
print(self.content)
else:
if not filename.endswith(suffix):
filename += suffix
with open(filename, mode="w") as f:
f.write(self.content)
|
11475270
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import openpyxl
#Get pixel/distance (using ImageJ software) to output actual diameters of circles
dp = 1
accum_ratio = 1
min_dist = 5
p1 = 40
p2 = 30
minDiam = 1
maxDiam = 30
scalebar = 10
min_range = 0
max_range = 100
intervals = 10
rad_list =[]
detected_circles = []
dataForTable = {}
def clear_plt():
plt.clf()
def autoDetect(resized_img, accum_ratio, min_dist, p1, p2, minDiam, maxDiam, pixel_distance):
global result, img, table_data, rad_list, detected_circles
# Convert to grayscale.
img = resized_img
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Blur using 3 * 3 kernel.
gray_blurred = cv2.blur(gray, (3, 3))
minDist = int(min_dist*pixel_distance)
minRadius = int(minDiam*pixel_distance/2)
maxRadius = int(maxDiam*pixel_distance/2)
if minDist < 1:
minDist = 1
if minRadius <1:
minRadius =1
if minRadius <1:
minRadius =1
# Apply Hough transform on the blurred image.
detected_circles = cv2.HoughCircles(gray_blurred,
cv2.HOUGH_GRADIENT, dp = int(accum_ratio), minDist = minDist,
param1 = int(p1), param2 = int(p2), minRadius = minRadius, maxRadius = maxRadius)
def autoDetectBin(resized_img, threshold,accum_ratio, min_dist, p1, p2, minDiam, maxDiam, pixel_distance):
global result, img, table_data, rad_list, detected_circles
img = resized_img
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
thres,binImg = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)
# Blur using 3 * 3 kernel.
blurred = cv2.blur(binImg, (3, 3))
minDist = int(min_dist*pixel_distance)
minRadius = int(minDiam*pixel_distance/2)
maxRadius = int(maxDiam*pixel_distance/2)
if minDist < 1:
minDist = 1
if minRadius <1:
minRadius =1
if minRadius <1:
minRadius =1
# Apply Hough transform on the blurred image.
detected_circles = cv2.HoughCircles(blurred,
cv2.HOUGH_GRADIENT, dp = int(accum_ratio), minDist = minDist,
param1 = int(p1), param2 = int(p2), minRadius = minRadius, maxRadius = maxRadius)
def processCircles(state, resized_img, filename, pixel_distance, manual_list):
global detected_circles, rad_list, img, result, bottom_10percentile, top_90percentile, new_name
# Draw circles that are detected.
img = resized_img
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rad_list=[]
if state == False:
detected_circles = None
result = '\n\n'
try:
if (detected_circles is None) and (len(manual_list) == 0):
return '\nNo circles found!\n'
elif len(manual_list) > 0 and (detected_circles is None):
manual_list.sort()
bottom_10percentile = int(len(manual_list)*0.1)
top_90percentile = int(len(manual_list)*0.9)
result += '# of circles found: ' + str(len(manual_list))
rad_list = manual_list
else:
# Convert the circle parameters a, b and r to integers.
detected_circles = np.uint16(np.around(detected_circles))
for pt in detected_circles[0, :]:
a, b, r = pt[0], pt[1], pt[2]
# Draw the circumference of the circle.
cv2.circle(img, (a, b), r, (0, 255, 0), 2)
# Draw a small circle (of radius 1) to show the center.
cv2.circle(img, (a, b), 1, (0, 0, 255), 2)
new_name = filename[:-4] + '_detected' + filename[-4:]
cv2.imwrite(new_name, img)
#Loop to convert radius (pixel) values to diameter
for x in range(detected_circles.shape[1]):
diam = detected_circles[0,x,2]*2/pixel_distance
rad_list.append(round(diam,1))
rad_list.sort()
bottom_10percentile = int(len(rad_list)*0.1)
top_90percentile = int(len(rad_list)*0.9)
result += '# of circles found: ' + str(detected_circles.shape[1])
result +='\nAvg diam. = ' + "%.1f"%np.average(rad_list) + 'um'
result +='\nD10 = '+ str(rad_list[bottom_10percentile])+'um'+'\nD50 = ' + "%.1f"%np.median(rad_list) + "um"
result +='\nD90 = '+ str(rad_list[top_90percentile])+'um'
except IndexError:
pass
return result
def tableData():
global rad_list, row_list, dataForTable, col_list, bottom_10percentile, top_90percentile, detected_circles, dataForTable
col_list = []
row_list = []
Diam_um = 'Diameter (um)'
temp_2 = ' '
temp_3 = ' '
temp_4 = ' '
temp_5 = ' '
if len(rad_list)>0:
for items in range(len(rad_list)):
col_list.append(dict(Diam_um = rad_list[items]))
for rows in range(len(rad_list)):
row_list.append('rec'+ str(rows+1))
dataForTable = dict(zip(row_list,col_list))
try:
if len(dataForTable) < 2:
temp_1 = dataForTable['rec1']['Diam_um']
elif len(dataForTable) < 3:
temp_1 = dataForTable['rec1']['Diam_um']
temp_2 = dataForTable['rec2']['Diam_um']
elif len(dataForTable) < 4:
temp_1 = dataForTable['rec1']['Diam_um']
temp_2 = dataForTable['rec2']['Diam_um']
temp_3 = dataForTable['rec3']['Diam_um']
elif len(dataForTable) < 5:
temp_1 = dataForTable['rec1']['Diam_um']
temp_2 = dataForTable['rec2']['Diam_um']
temp_3 = dataForTable['rec3']['Diam_um']
temp_4 = dataForTable['rec4']['Diam_um']
elif len(dataForTable) >= 5:
temp_1 = dataForTable['rec1']['Diam_um']
temp_2 = dataForTable['rec2']['Diam_um']
temp_3 = dataForTable['rec3']['Diam_um']
temp_4 = dataForTable['rec4']['Diam_um']
temp_5 = dataForTable['rec5']['Diam_um']
dataForTable.update({'rec1':{'Diam_um': str(temp_1) , 'Col2': '# of Circles', 'Col3': str(len(rad_list))},
'rec2':{'Diam_um': str(temp_2),'Col2': 'Avg Diam (um)', 'Col3': "%.1f"%np.average(rad_list)},
'rec3':{'Diam_um': str(temp_3) ,'Col2': 'D10 (um)', 'Col3': str(rad_list[bottom_10percentile])},
'rec4':{'Diam_um': str(temp_4),'Col2': 'D50 (um)', 'Col3': "%.1f"%np.median(rad_list)},
'rec5':{'Diam_um': str(temp_5) ,'Col2': 'D90 (um)', 'Col3': str(rad_list[top_90percentile])}
})
except KeyError:
pass
return dataForTable
def histoPlot(filename, min_range, max_range, intervals):
global rad_list
#Plot histogram
plt.xlabel('Diameter (um)')
plt.ylabel('Frequency')
plt.title('Particle Size Distribution')
(n, bins, patch) = plt.hist([rad_list], bins=np.arange(min_range,max_range+1,intervals), rwidth=0.9)
plt.xticks(np.arange(min_range,max_range,intervals))
# plt.gca().grid(which='major', axis='y')
plt.savefig((filename[:-4] + '_histogram.png'), dpi = 500)
plt.clf()
# pd.DataFrame(rad_list).to_excel('emulsions_D50_list_1.xlsx',header=False, index=False)
|
11475281
|
import base64
from datetime import datetime
from sqlite3.dbapi2 import IntegrityError
from django.db import transaction
from django.db.models import Q
from nft_market.api.models import Operation, Asset
from nft_market.services import algorand
from nft_market.utils.constants import (
ASK_PRICE,
BID_PRICE,
SET_PRICE,
BID,
BUY_NOW,
OWNER,
SELL_NOW,
)
from nft_market.utils.transactions import decode_state
class InvalidOperation(Exception):
pass
class NotFoundError(Exception):
pass
def is_valid_bid(operation):
is_valid = True
has_future_bids = Operation.objects.filter(
asset=operation.asset,
sender=operation.sender,
op_type__in=[Operation.OperationType.BID, Operation.OperationType.BUY_NOW],
block_number__gt=operation.block_number,
is_executed=True,
).exists()
if has_future_bids:
is_valid = False
has_sold = Operation.objects.filter(
asset=operation.asset,
account=operation.sender,
op_type=Operation.OperationType.SELL_NOW,
block_number__gt=operation.block_number,
is_executed=True,
).exists()
if has_sold:
is_valid = False
return is_valid
def handle_operation(operation, tx):
if operation.op_type == Operation.OperationType.ASK:
validate_and_save_ask_op(tx, operation)
elif operation.op_type == Operation.OperationType.BID:
validate_and_save_bid_op(tx, operation)
elif operation.op_type == Operation.OperationType.BUY_NOW:
validate_and_save_buy_now_op(tx, operation)
elif operation.op_type == Operation.OperationType.SELL_NOW:
validate_and_save_sell_now_op(tx, operation)
def validate_and_save_ask_op(tx, operation):
validate_ask_op(tx)
global_state_delta = decode_state(tx["global-state-delta"])
ask_price = global_state_delta[ASK_PRICE]
save_ask_op(ask_price, operation, tx)
def validate_and_save_bid_op(tx, operation):
validate_bid_op(tx)
local_state_delta = tx["local-state-delta"]
local_state_delta = next(
filter(
lambda x: x["address"] == tx["sender"],
local_state_delta,
)
)
local_state_delta = decode_state(local_state_delta["delta"])
bid_price = local_state_delta[BID_PRICE]
save_bid_op(bid_price, operation, tx)
def validate_and_save_buy_now_op(tx, operation):
validate_buy_now_op(tx)
save_buy_now_op(operation, tx)
def validate_and_save_sell_now_op(tx, operation):
validate_sell_now_op(tx)
save_sell_now_op(operation, tx)
def validate_ask_op(tx):
if "application-transaction" not in tx:
raise InvalidOperation()
app_tx = tx["application-transaction"]
app_args = app_tx["application-args"]
correct_arg = base64.b64encode(SET_PRICE.encode("utf-8")).decode("utf-8")
if not app_args or app_args[0] != correct_arg:
raise InvalidOperation()
@transaction.atomic()
def save_ask_op(ask_price, operation, tx):
operation.op_type = Operation.OperationType.ASK
operation.value = ask_price
operation = update_op_tx_data(operation, tx)
is_valid = not Operation.objects.filter(
asset=operation.asset,
op_type__in=[
Operation.OperationType.BUY_NOW,
Operation.OperationType.ASK,
Operation.OperationType.SELL_NOW,
],
block_number__gt=tx["confirmed-round"],
is_executed=True,
).exists()
operation.is_valid = is_valid
save_operation(operation)
invalidate_prev_ops(
asset=operation.asset,
confirmed_round=tx["confirmed-round"],
op_type=Operation.OperationType.ASK,
sender=tx["sender"],
)
def validate_bid_op(tx):
if "application-transaction" not in tx:
raise InvalidOperation()
app_tx = tx["application-transaction"]
app_args = app_tx["application-args"]
correct_arg = base64.b64encode(BID.encode("utf-8")).decode("utf-8")
if not app_args or app_args[0] != correct_arg:
raise InvalidOperation()
@transaction.atomic()
def save_bid_op(bid_price, operation, tx):
operation.op_type = Operation.OperationType.BID
operation.value = bid_price
update_op_tx_data(operation, tx)
# It's more important to invalidate bids than other operations
operation.is_valid = is_valid_bid(operation)
save_operation(operation)
invalidate_prev_ops(
asset=operation.asset,
confirmed_round=tx["confirmed-round"],
op_type=Operation.OperationType.BID,
sender=tx["sender"],
)
def validate_buy_now_op(tx):
if "application-transaction" not in tx:
raise InvalidOperation()
app_tx = tx["application-transaction"]
app_args = app_tx["application-args"]
correct_arg = base64.b64encode(BUY_NOW.encode("utf-8")).decode("utf-8")
if not app_args or app_args[0] != correct_arg:
raise InvalidOperation()
global_state_delta = decode_state(tx["global-state-delta"])
ask_price = global_state_delta[ASK_PRICE]
owner = global_state_delta[OWNER]
if ask_price != 0:
raise InvalidOperation()
if owner != b"\x00" * 32:
raise InvalidOperation()
@transaction.atomic()
def save_buy_now_op(operation, tx):
previous_ask_op = Operation.objects.filter(
Q(block_number__lt=tx["confirmed-round"]) | Q(block_number=None),
op_type=Operation.OperationType.ASK,
is_valid=True,
is_executed=True,
asset=operation.asset,
).last()
operation.op_type = Operation.OperationType.BUY_NOW
operation.value = previous_ask_op.value if previous_ask_op else 0
operation = update_op_tx_data(operation, tx)
save_operation(operation)
invalidate_prev_ops(
asset=operation.asset,
confirmed_round=tx["confirmed-round"],
op_type=Operation.OperationType.BID,
sender=tx["sender"],
)
invalidate_prev_ops(
asset=operation.asset,
op_type=Operation.OperationType.ASK,
confirmed_round=tx["confirmed-round"],
)
def validate_sell_now_op(tx):
if "application-transaction" not in tx:
raise InvalidOperation()
app_tx = tx["application-transaction"]
app_args = app_tx["application-args"]
correct_arg = base64.b64encode(SELL_NOW.encode("utf-8")).decode("utf-8")
if not app_args or app_args[0] != correct_arg:
raise InvalidOperation()
@transaction.atomic()
def save_sell_now_op(operation, tx):
app_tx = tx["application-transaction"]
buyers_bid_op = Operation.objects.filter(
Q(block_number__lt=tx["confirmed-round"]) | Q(block_number=None),
op_type=Operation.OperationType.BID,
sender=app_tx["accounts"][0],
is_valid=True,
is_executed=True,
asset=operation.asset,
).last()
operation.op_type = Operation.OperationType.SELL_NOW
operation.value = buyers_bid_op.value if buyers_bid_op else 0
operation.account = app_tx["accounts"][0]
operation = update_op_tx_data(operation, tx)
save_operation(operation)
invalidate_prev_ops(
asset=operation.asset,
sender=tx["sender"],
confirmed_round=tx["confirmed-round"],
op_type=Operation.OperationType.ASK,
)
invalidate_prev_ops(
asset=operation.asset,
sender=app_tx["accounts"][0],
confirmed_round=tx["confirmed-round"],
op_type=Operation.OperationType.BID,
)
def invalidate_prev_ops(asset, confirmed_round, op_type, sender=None):
ops = Operation.objects.filter(
Q(block_number__lt=confirmed_round) | Q(block_number=None),
sender=sender,
is_valid=True,
is_executed=True,
asset=asset,
op_type=op_type,
)
if sender:
ops.filter(sender=sender)
ops.update(is_valid=False)
def save_operation(operation):
try:
operation.save()
except IntegrityError as exc:
if "unique constraint" in exc.message:
handle_duplicate(operation)
else:
raise exc
def update_op_tx_data(operation, tx):
operation.tx_id = tx["id"]
operation.sender = tx["sender"]
operation.is_pending = False
operation.is_valid = True
operation.is_executed = True
operation.block_number = tx["confirmed-round"]
operation.block_time = datetime.fromtimestamp(tx["round-time"])
return operation
def handle_duplicate(operation):
other_operation = Operation.objects.filter(tx_id=operation.tx_id).first()
if not other_operation:
operation.save()
return
if not other_operation.is_pending and not other_operation.is_valid:
other_operation.delete()
operation.save()
elif hasattr("operation", "indexer") and operation.indexer:
operation.delete()
else:
other_operation.delete()
operation.save()
def invalidate_operation(operation):
operation.blob = None
operation.is_valid = False
operation.is_pending = False
operation.is_executed = False
operation.save()
def validate_and_save_op(operation_pk, tx_id):
operation = Operation.objects.get(pk=operation_pk, is_pending=True)
found_txs = algorand.indexer.search_transactions(
txid=tx_id,
limit=1,
)
length = len(found_txs["transactions"])
if length < 1:
raise NotFoundError()
tx = found_txs["transactions"][0]
if "application-transaction" not in tx:
raise InvalidOperation()
app_tx = tx["application-transaction"]
asset = Asset.objects.filter(application_id=app_tx["application-id"]).first()
operation.asset = asset
operation.save()
if not asset:
raise NotFoundError()
handle_operation(operation, tx)
|
11475287
|
import sys
if (sys.version_info[0] == 2 and sys.version_info[:2] >= (2,7)) or \
(sys.version_info[0] == 3 and sys.version_info[:2] >= (3,2)):
import unittest
else:
import unittest2 as unittest
import os
import re
class TestRecipeImports (unittest.TestCase):
def test_imports(self):
import py2app.recipes as m
dirname = os.path.dirname(m.__file__)
all_imported = set()
for fn in os.listdir(dirname):
if fn.startswith('__'): continue
if fn.endswith('.py'):
with open(os.path.join(dirname, fn)) as fp:
for ln in fp:
m = re.search(r'^\s*import (.*)', ln)
if m is not None:
for nm in m.group(1).split(','):
all_imported.add(nm.strip())
for fn in os.listdir(dirname):
if fn.startswith('__'): continue
mod = os.path.splitext(fn)[0]
if mod not in all_imported: continue
try:
m = __import__(mod)
except ImportError:
pass
else:
self.fail("Can import %r" % (mod,))
if __name__ == "__main__":
unittest.main()
|
11475297
|
import argparse
import itertools
from base_data_curve import start_jobs
path_model = {}
# Base models
path_model["on"] = "/srv/local1/paxia/exp_logs/public_icoref/ontonotes/checkpoint.bin"
path_model["preco"] = "/srv/local1/paxia/exp_logs/public_icoref/preco/checkpoint.bin"
path_model["en"] = "/srv/local1/paxia/exp_logs/public_icoref/ontonotes_en/checkpoint.bin"
# ENCODERS
# base_encoders = ["onb"]
# qb_encoders = ["fb", "on", "da"]
# lb_encoders = ["fb", "on", "da"]
qb_encoders = ["da"]
lb_encoders = ["da"]
zh_encoders = ["xlmr", "da"]
# LAYERS
large_layers = [0, 6, 12]
base_layers = [0, 3, 6]
# DATA
qb_samples = [15, 60, 240]
lb_samples = [10, 40, 80]
zh_samples = [50, 500, 1810]
trials = [0]
GPUs = [2, 3, 4, 5, 6, 7]
def add_job(name, encoder, lang, job_list):
if encoder == "da" and lang == "en":
job_list.append(
{"name": name,
"log_location": log_location + "preco",
"load_path": path_model["preco"],
"test_set": "true",
})
# job_list.append(
# {"name": name,
# "log_location": log_location + "on",
# "load_path": path_model["on"],
# "test_set": "true",
# })
elif encoder == "da" and lang == "zh":
job_list.append(
{"name": name,
"log_location": log_location + "onen",
"load_path": path_model["en"],
"test_set": "true",
})
else:
job_list.append(
{"name": name,
"log_location": log_location + "_pretrained",
"load_path": path_model["preco"],
"test_set": "true",
})
log_location = "layer_exps_da_"
job_list = []
# QB Jobs
large_job_names = [(f"qb_curve_{encoder}_0_{layers}_{num_samples}_{trial}", encoder)
for (encoder, layers, num_samples, trial) in
itertools.product(["da"], large_layers, qb_samples, trials)]
# small_job_names = [(f"qb_curve_{encoder}_0_{layers}_{num_samples}_{trial}", encoder)
# for (encoder, layers, num_samples, trial) in
# itertools.product(base_encoders, base_layers, qb_samples, trials)]
# for job, encoder in large_job_names: #+ small_job_names:
# add_job(job, encoder, "en", job_list)
# litbank Jobs
large_job_names = [(f"litbank_curve_{encoder}_0_{layers}_{num_samples}_{trial}", encoder)
for (encoder, layers, num_samples, trial) in
itertools.product(lb_encoders, large_layers, lb_samples, trials)]
# small_job_names = [(f"litbank_curve_{encoder}_0_{layers}_{num_samples}_{trial}", encoder)
# for (encoder, layers, num_samples, trial) in
# itertools.product(base_encoders, base_layers, lb_samples, trials)]
# for job, encoder in large_job_names: # + small_job_names:
# add_job(job, encoder, "en", job_list)
# zh
tmp_layers = [6]
large_job_names = [(f"onml_curve_zh_{encoder}_{layers}_{num_samples}_{trial}", encoder)
for (encoder, layers, num_samples, trial) in
itertools.product(zh_encoders, tmp_layers, zh_samples, trials)]
for job, encoder in large_job_names:
add_job(job, encoder, "zh", job_list)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
log_location = log_location
config = {
"command": "trainer.py",
"jobs": job_list,
}
start_jobs(config, GPUs)
|
11475341
|
from enum import Enum
from .. import db
from .base import BaseModel
class Disposition(Enum):
NEUTRAL = 'NEUTRAL'
POSITIVE = 'POS'
NEGATIVE = 'NEG'
EDGE = 'EDGE'
DESTRUCTIVE = 'DESTRUCTIVE'
class TestDatum(BaseModel):
__tablename__ = 'test_data'
# fields
test_id = db.Column(db.Integer, db.ForeignKey('tests.id'), nullable=False)
datum_id = db.Column(db.Integer, db.ForeignKey('data.id'), nullable=False)
label = db.Column(db.String(length=80), nullable=False)
disposition = db.Column(db.Enum(Disposition), nullable=False)
# relationships
test = db.relationship('Test', back_populates='data', foreign_keys=[test_id])
datum = db.relationship('Datum', back_populates='tests', foreign_keys=[datum_id])
@staticmethod
def ensure_uniqueness(test_id, label):
old_td = TestDatum.query.filter_by(test_id=test_id, label=label).first()
if old_td is not None:
err = (f'Each datum within a test must have a unique label, and '
f'there is already a datum labeled "{label}" within this test')
raise Exception(err)
@staticmethod
def create(*args, **kwargs):
if kwargs.get('test') is not None:
kwargs['test_id'] = kwargs['test'].id
TestDatum.ensure_uniqueness(kwargs['test_id'], kwargs['label'])
td = TestDatum(**kwargs)
db.session.add(td)
db.session.commit()
|
11475375
|
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q
from django.utils.translation import gettext_lazy
from django.utils import timezone
from . import deliverytypes
from .abstract_is_admin import AbstractIsAdmin
from .abstract_is_candidate import AbstractIsCandidate
from .abstract_is_examiner import AbstractIsExaminer
from .assignment_group import AssignmentGroup
from devilry.devilry_account.models import User
class NewerDeadlineExistsError(Exception):
"""
Exception raised by :meth:DeadlineManager.smart_create``.
"""
class DeadlineQuerySet(models.QuerySet):
def smart_create(self, groupqueryset, deadline_datetime, text=None,
why_created=None, added_by=None,
query_created_deadlines=False):
# We do this because we want to make it easy to compare deadlines based on their datetime.
deadline_datetime = Deadline.reduce_datetime_precision(deadline_datetime)
# DB query 1 - Check that no newer deadlines exist
if groupqueryset.filter(last_deadline__deadline__gt=deadline_datetime).exists():
raise NewerDeadlineExistsError()
# DB query 2 - get all the groups
groups = groupqueryset.all()
if len(groups) == 0:
return []
# DB query 3 - create deadlines
def init_deadline(group):
return Deadline(
assignment_group=group,
deadline=deadline_datetime,
text=text,
why_created=why_created,
added_by=added_by)
deadlines_to_create = list(map(init_deadline, groups))
self.bulk_create(deadlines_to_create)
# DB query 4 - Fetch created deadlines
def get_created_deadlines():
return Deadline.objects.filter(
assignment_group__in=groups,
deadline=deadline_datetime,
text=text).select_related('assignment_group')
created_deadlines = get_created_deadlines()
def save_group(deadline):
group = deadline.assignment_group
group.is_open = True
group.delivery_status = "waiting-for-something"
group.last_deadline = deadline
group.save(update_delivery_status=False,
autocreate_first_deadline_for_nonelectronic=False)
assignment = groups[0].assignment # NOTE: We assume all groups are within the same assignment - as documented
time_of_delivery = timezone.now().replace(microsecond=0, tzinfo=None)
if assignment.delivery_types == deliverytypes.NON_ELECTRONIC:
from .delivery import Delivery
# DB query 6 - create deliveries
deliveries_to_create = [
Delivery(deadline=deadline, time_of_delivery=time_of_delivery, number=1, successful=True)
for deadline in created_deadlines]
Delivery.objects.bulk_create(deliveries_to_create)
# DB query 7 - fetch created deliveries
created_deliveries = Delivery.objects.filter(
deadline__assignment_group__in=groups,
time_of_delivery=time_of_delivery).select_related(
'deadline', 'deadline__assignment_group')
# DB query 8...N - Update groups
for delivery in created_deliveries:
save_group(delivery.deadline)
else:
# DB query 5...N - Update groups
for deadline in created_deadlines:
save_group(deadline)
if query_created_deadlines:
return get_created_deadlines()
class DeadlineManager(models.Manager):
def get_queryset(self):
return DeadlineQuerySet(self.model, using=self._db)
def smart_create(self, groupqueryset, deadline_datetime, text=None,
why_created=None, added_by=None,
query_created_deadlines=False):
"""
Creates deadlines for all groups in the given QuerySet of AssignmentGroups.
Algorighm:
1. Create deadlines in bulk.
2. If assignment is ``NON_ELECTRONIC``, create a delivery for each of the created deadlines.
3. Update all the groups in groupqueryset() with ``is_open=True``,
``delivery_status="waiting-for-something"``, ``last_deadline=<newly created deadline>``
and ``last_delivery=<created delivery>`` (if a delivery was created in step 2).
:param groupqueryset:
A QuerySet of AssignmentGroup objects. MUST match groups within a single assignment.
:param deadline_datetime:
The datetime of the deadline. The function runs this through
:meth:`Deadline.reduce_datetime_precision` before using it.
:param text: The deadline text. Defaults to ``None``.
:param query_created_deadlines:
Perform a query for the created deadlines at the end of the method.
:raise NewerDeadlineExistsError:
When one of the groups in the ``groupqueryset`` has a ``last_deadline``
that is newer than ``deadline_datetime``
First described in https://github.com/devilry/devilry-django/issues/514.
"""
return self.get_queryset().smart_create(
groupqueryset, deadline_datetime, text,
why_created=why_created, added_by=added_by,
query_created_deadlines=query_created_deadlines)
class Deadline(models.Model, AbstractIsAdmin, AbstractIsExaminer, AbstractIsCandidate):
""" A deadline on an `AssignmentGroup`_. A deadline contains zero or more
`deliveries <Delivery>`_, the time of the deadline and an optional text.
.. attribute:: assignment_group
The `AssignmentGroup`_ where the deadline is registered.
.. attribute:: deadline
The deadline a DateTimeField.
.. attribute:: text
A optional deadline text.
.. attribute:: deliveries
A django ``RelatedManager`` that holds the `deliveries <Delivery>`_ on this group.
NOTE: You should normally not use this directly, but rather use meth:`.query_successful_deliveries`.
.. attribute:: deliveries_available_before_deadline
Should deliveries on this deadline be available to examiners before the
deadline expires? This is set by students.
.. attribute:: added_by
The User that added this deadline.
Can be ``None``, and all deadlines created before Devilry
version ``1.4.0`` has this set to ``None``.
.. versionadded:: 1.4.0
.. attribute:: why_created
Why was this deadline created? Valid choices:
- ``None``: Why the deadline was created is unknown.
- ``"examiner-gave-another-chance"``: Created because the examiner
elected to give the student another chance to pass the assignment.
Can be ``None``, and all deadlines created before Devilry
version ``1.4.0`` has this set to ``None``.
.. versionadded:: 1.4.0
"""
objects = DeadlineManager()
assignment_group = models.ForeignKey(AssignmentGroup,
related_name='deadlines', on_delete=models.CASCADE)
deadline = models.DateTimeField(help_text='The time of the deadline.')
text = models.TextField(blank=True, null=True,
help_text='An optional text to show to students and examiners.')
deliveries_available_before_deadline = models.BooleanField(
default=False,
help_text='Should deliveries on this deadline be available to examiners before the'
'deadline expires? This is set by students.')
added_by = models.ForeignKey(User,
null=True, blank=True, default=None,
on_delete=models.SET_NULL)
why_created = models.CharField(
null=True, blank=True, default=None,
max_length=50,
choices=(
(None, gettext_lazy('Unknown.')),
('examiner-gave-another-chance', gettext_lazy('Examiner gave the student another chance.')),
)
)
class Meta:
app_label = 'core'
verbose_name = 'Deadline'
verbose_name_plural = 'Deadlines'
ordering = ['-deadline']
@classmethod
def reduce_datetime_precision(cls, datetimeobj):
"""
Reduce the precition of the ``datetimeobj`` to make it easier to
compare and harder to make distinct deadlines that is basically the
same time. We:
- Set seconds and microseconds to ``0``. This makes "Friday 14:59",
"Friday 14:59:00" and "Friday 14:59:59" equal. We do not allow
specifying seconds in the UI, and handling this right in the core
makes this easier to handle across the board.
- Set tzinfo to None. We do not support timezones in Devilry, so including it makes no sense.
:return: A copy of ``datetimeobj`` with second and microsecond set to ``0``, and tzinfo set to ``None``.
"""
return datetimeobj.replace(microsecond=0, second=0, tzinfo=None)
@classmethod
def _clean_deadline_seconds(cls, datetimeobj):
"""Set the seconds of the deadline to '59' so deliveries can be done during the whole minute"""
return datetimeobj.replace(second=59, tzinfo=None)
def _clean_deadline(self):
self.deadline = Deadline.reduce_datetime_precision(
self.deadline) # NOTE: We want this so a unique deadline is a deadline which matches with second-specition.
qry = Q(deadline=self.deadline, assignment_group=self.assignment_group)
if self.id:
qry &= ~Q(id=self.id)
deadlines = Deadline.objects.filter(qry)
if deadlines.count() > 0:
raise ValidationError('Can not have more than one deadline with the same date/time on a single group.')
self.deadline = Deadline._clean_deadline_seconds(datetimeobj=self.deadline)
def clean(self):
"""Validate the deadline.
Always call this before save()! Read about validation here:
http://docs.djangoproject.com/en/dev/ref/models/instances/#id1
Raises ValidationError if:
- ``deadline`` is before ``Assignment.publishing_time``.
- ``deadline`` is not before ``Period.end_time``.
"""
if self.deadline is not None:
if self.deadline < self.assignment_group.parentnode.publishing_time:
raise ValidationError('Deadline cannot be before publishing time.')
if self.deadline > self.assignment_group.parentnode.parentnode.end_time:
raise ValidationError(
"Deadline must be within it's period (%(period)s)."
% dict(period=str(self.assignment_group.parentnode.parentnode)))
self._clean_deadline()
super(Deadline, self).clean()
def save(self, *args, **kwargs):
"""
:param autocreate_delivery_if_nonelectronic:
Autocreate a delivery if this save creates the deadline,
and the assignment is non-electronic. Defaults to ``True``.
"""
autocreate_delivery_if_nonelectronic = kwargs.pop('autocreate_delivery_if_nonelectronic', True)
created = self.id is None
super(Deadline, self).save(*args, **kwargs)
group = self.assignment_group
# Only update the AssignmentGroup if needed.
# See https://github.com/devilry/devilry-django/issues/502
groupsave_needed = False
if created:
if not group.is_open:
groupsave_needed = True
group.is_open = True
if group.delivery_status == 'no-deadlines':
groupsave_needed = True
group.delivery_status = 'waiting-for-something'
if autocreate_delivery_if_nonelectronic and group.assignment.delivery_types == deliverytypes.NON_ELECTRONIC:
from .delivery import Delivery
delivery = Delivery(
deadline=self,
successful=True,
time_of_delivery=timezone.now(),
delivery_type=deliverytypes.NON_ELECTRONIC,
number=1)
delivery.save()
groupsave_needed = True
if group.last_deadline is None or group.last_deadline.deadline < self.deadline:
group.last_deadline = self
groupsave_needed = True
if groupsave_needed:
group.save(update_delivery_status=False)
def __str__(self):
return str(self.deadline)
def __repr__(self):
return 'Deadline(id={id}, deadline={deadline})'.format(**self.__dict__)
# TODO delete this?
# def is_old(self):
# """ Return True if :attr:`deadline` expired. """
# return self.deadline < timezone.now()
@classmethod
def q_published(cls, old=True, active=True):
now = timezone.now()
q = Q(assignment_group__parentnode__publishing_time__lt=now)
if not active:
q &= ~Q(assignment_group__parentnode__parentnode__end_time__gte=now)
if not old:
q &= ~Q(assignment_group__parentnode__parentnode__end_time__lt=now)
return q
@classmethod
def q_is_candidate(cls, user_obj):
return Q(assignment_group__candidates__student=user_obj)
@classmethod
def q_is_examiner(cls, user_obj):
return Q(assignment_group__examiners__user=user_obj)
def query_successful_deliveries(self):
"""
Returns a django QuerySet that filters all the successful `deliveries
<Delivery>`_ on this group.
"""
return self.deliveries.filter(successful=True)
@property
def successful_delivery_count(self):
return self.query_successful_deliveries().count()
def is_empty(self):
"""
Returns ``True`` if this Deadline does not contain any deliveries.
"""
return self.query_successful_deliveries().count() == 0
def can_delete(self, user_obj):
"""
Check if the given user is permitted to delete this object. A user is
permitted to delete an Deadline if the user is superadmin, or if the user
is admin on the assignment. Only superusers
are allowed to delete deadlines with any deliveries.
:return: ``True`` if the user is permitted to delete this object.
"""
if self.id is None:
return False
if user_obj.is_superuser:
return True
if self.is_empty():
return self.assignment_group.parentnode.is_admin(user_obj)
else:
return False
def copy(self, newgroup):
"""
Copy this deadline into ``newgroup``, including all deliveries and
filemetas, with the actual file data.
.. note:: Always run this is a transaction.
.. warning::
This does not autoset the latest feedback as active on the group.
You need to handle that yourself after the copy.
"""
deadlinecopy = Deadline(assignment_group=newgroup,
deadline=self.deadline,
text=self.text)
deadlinecopy.full_clean()
deadlinecopy.save()
for delivery in self.query_successful_deliveries():
delivery.copy(deadlinecopy)
return deadlinecopy
def is_in_the_future(self):
"""
Return ``True`` if this deadline is in the future.
"""
return self.deadline > timezone.now()
def is_in_the_past(self):
"""
Return ``True`` if this deadline is in the past.
"""
return self.deadline < timezone.now()
def has_text(self):
"""
Checks that the text is not ``None`` or an empty string.
"""
if self.text is None:
return False
else:
return self.text.strip() != ''
|
11475420
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('telegraf')
def test_telegraf_running_and_enabled(host):
telegraf = host.service("telegraf")
if host.system_info.distribution not in ['opensuse-leap']:
assert telegraf.is_enabled
assert telegraf.is_running
def test_telegraf_dot_conf(host):
telegraf = host.file("/etc/telegraf/telegraf.conf")
assert telegraf.user == "telegraf"
assert telegraf.group == "telegraf"
assert telegraf.mode == 0o640
assert telegraf.contains('interval = "10s"')
assert telegraf.contains('[[inputs.cpu]]')
assert telegraf.contains('percpu = true')
assert telegraf.contains('[[outputs.influxdb]]')
assert telegraf.contains('["http://influxdb:8086"]')
assert telegraf.contains('[[inputs.net]]')
def test_telegraf_dot_d_dir(host):
telegraf = host.file("/etc/telegraf/telegraf.d")
assert telegraf.user == "root"
assert telegraf.group == "root"
assert telegraf.mode == 0o755
assert telegraf.is_directory
def test_telegraf_dot_d(host):
telegraf = host.file("/etc/telegraf/telegraf.d/logparser.conf")
assert telegraf.user == "telegraf"
assert telegraf.group == "telegraf"
assert telegraf.mode == 0o640
assert telegraf.contains('[[inputs.logparser]]')
assert telegraf.contains('from_beginning = false')
def test_telegraf_package(host):
telegraf = host.package('telegraf')
assert telegraf.is_installed
|
11475424
|
import torch
import numpy as np
from graphics.transform import compute_tsdf
def add_noise_to_grid(grid, config):
# transform to occupancy
occ = torch.clone(grid)
occ[grid < 0] = 1
occ[grid >= 0] = 0
noise = torch.rand(grid.shape)
noise[noise < (1. - config.DATA.noise)] = 0.
noise[noise > 0] = 1.
occ_noise = occ + noise
occ_noise = occ_noise.clamp(0, 1)
grid_noise = occ_noise.detach().numpy()
dist1 = compute_tsdf(grid_noise.astype(np.float64))
dist1[dist1 > 0] -= 0.5
dist2 = compute_tsdf(np.ones(grid_noise.shape) - grid_noise)
dist2[dist2 > 0] -= 0.5
grid_noise = np.copy(dist1 - dist2)
resolution = 1./grid_noise.shape[0]
grid_noise = resolution * grid_noise
return torch.tensor(grid_noise)
def get_tsdf(grid):
dist1 = compute_tsdf(grid.astype(np.float64))
dist1[dist1 > 0] -= 0.5
dist2 = compute_tsdf(np.ones(grid.shape) - grid)
dist2[dist2 > 0] -= 0.5
tsdf_grid = np.copy(dist1 - dist2)
resolution = 1. / tsdf_grid.shape[0]
tsdf_grid = resolution * tsdf_grid
return tsdf_grid
def get_normal_field(grid):
[gradx, grady, gradz] = np.gradient(grid)
# normalize
norm = np.sqrt(np.power(gradx, 2) + np.power(grady, 2) + np.power(gradz, 2))
gradx /= (norm + 1.e-08)
grady /= (norm + 1.e-08)
gradz /= (norm + 1.e-08)
gradient = np.stack((gradx, grady, gradz))
return gradient
|
11475438
|
import torch, sys, random
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class AbsWeighting(nn.Module):
r"""An abstract class for weighting strategies.
"""
def __init__(self):
super(AbsWeighting, self).__init__()
def init_param(self):
r"""Define and initialize some trainable parameters required by specific weighting methods.
"""
pass
def _compute_grad_dim(self):
self.grad_index = []
for param in self.get_share_params():
self.grad_index.append(param.data.numel())
self.grad_dim = sum(self.grad_index)
def _grad2vec(self):
grad = torch.zeros(self.grad_dim)
count = 0
for param in self.get_share_params():
if param.grad is not None:
beg = 0 if count == 0 else sum(self.grad_index[:count])
end = sum(self.grad_index[:(count+1)])
grad[beg:end] = param.grad.data.view(-1)
count += 1
return grad
def _compute_grad(self, losses, mode, rep_grad=False):
'''
mode: backward, autograd
'''
if not rep_grad:
grads = torch.zeros(self.task_num, self.grad_dim).to(self.device)
for tn in range(self.task_num):
if mode == 'backward':
losses[tn].backward(retain_graph=True) if (tn+1)!=self.task_num else losses[tn].backward()
grads[tn] = self._grad2vec()
elif mode == 'autograd':
grad = list(torch.autograd.grad(losses[tn], self.get_share_params(), retain_graph=True))
grads[tn] = torch.cat([g.view(-1) for g in grad])
else:
raise ValueError('No support {} mode for gradient computation')
self.zero_grad_share_params()
else:
if not isinstance(self.rep, dict):
grads = torch.zeros(self.task_num, *self.rep.size()).to(self.device)
else:
grads = [torch.zeros(*self.rep[task].size()) for task in self.task_name]
for tn, task in enumerate(self.task_name):
if mode == 'backward':
losses[tn].backward(retain_graph=True) if (tn+1)!=self.task_num else losses[tn].backward()
grads[tn] = self.rep_tasks[task].grad.data.clone()
return grads
def _reset_grad(self, new_grads):
count = 0
for param in self.get_share_params():
if param.grad is not None:
beg = 0 if count == 0 else sum(self.grad_index[:count])
end = sum(self.grad_index[:(count+1)])
param.grad.data = new_grads[beg:end].contiguous().view(param.data.size()).data.clone()
count += 1
def _get_grads(self, losses, mode='backward'):
r"""This function is used to return the gradients of representations or shared parameters.
If ``rep_grad`` is ``True``, it returns a list with two elements. The first element is \
the gradients of the representations with the size of [task_num, batch_size, rep_size]. \
The second element is the resized gradients with size of [task_num, -1], which means \
the gradient of each task is resized as a vector.
If ``rep_grad`` is ``False``, it returns the gradients of the shared parameters with size \
of [task_num, -1], which means the gradient of each task is resized as a vector.
"""
if self.rep_grad:
per_grads = self._compute_grad(losses, mode, rep_grad=True)
if not isinstance(self.rep, dict):
grads = per_grads.reshape(self.task_num, self.rep.size()[0], -1).sum(1)
else:
try:
grads = torch.stack(per_grads).sum(1).view(self.task_num, -1)
except:
raise ValueError('The representation dimensions of different tasks must be consistent')
return [per_grads, grads]
else:
self._compute_grad_dim()
grads = self._compute_grad(losses, mode)
return grads
def _backward_new_grads(self, batch_weight, per_grads=None, grads=None):
r"""This function is used to reset the gradients and make a backward.
Args:
batch_weight (torch.Tensor): A tensor with size of [task_num].
per_grad (torch.Tensor): It is needed if ``rep_grad`` is True. The gradients of the representations.
grads (torch.Tensor): It is needed if ``rep_grad`` is False. The gradients of the shared parameters.
"""
if self.rep_grad:
if not isinstance(self.rep, dict):
transformed_grad = torch.einsum('i, i... -> ...', batch_weight, per_grads)
self.rep.backward(transformed_grad)
else:
for tn, task in enumerate(self.task_name):
rg = True if (tn+1)!=self.task_num else False
self.rep[task].backward(batch_weight[tn]*per_grads[tn], retain_graph=rg)
else:
new_grads = torch.einsum('i, i... -> ...', batch_weight, grads)
self._reset_grad(new_grads)
@property
def backward(self, losses, **kwargs):
r"""
Args:
losses (list): A list of losses of each task.
kwargs (dict): A dictionary of hyperparameters of weighting methods.
"""
pass
|
11475548
|
import pandas as pd
import sys
import os
import argparse
import time
from consts import NUM_WORKERS
def main():
# Parse command line arguments.
parser = argparse.ArgumentParser()
parser.add_argument("min", type=int, help="Minimum size to search.")
parser.add_argument("max", type=int, help="Maximum size to search.")
parser.add_argument(
"--update", action="store_const",
dest="updating", const=True, default=False,
help=(
"Search notebooks that have been added "
+ "or updated since last search, along with new "
+ "notebooks"
)
)
args = parser.parse_args()
MIN = args.min
MAX = args.max
updating = args.updating
# Load csv with approximated number of notebooks per size interval.
data_df = pd.read_csv("./nb_counts.csv")
# Calculate best size distribution among workers.
workers = distribute_query(data_df, NUM_WORKERS, MIN, MAX)
# Format commands.
query_commands = []
for _, worker in workers.iterrows():
size = worker["size"]
size_min = size.split("..")[0]
size_max = size.split("..")[1]
if size_min <= size_max:
query_commands.append(("nohup python3 -u query_git.py {0} {1}"
" --worker {2}{3}> query_{4}.log &").format(
size_min, size_max,
worker["id"],
" --update " if updating else " ",
worker["id"]
)
)
i = 0
for command in query_commands:
if i in [5]:
os.system(command)
time.sleep(10)
i += 1
def distribute_query(data_df, num_workers, MIN, MAX):
"""
Find the best size partitions (from MIN to MAX)
to distribute among worker.
"""
num_nbs = sum(data_df.number)
expected_each = num_nbs // num_workers
workers = []
start_idx = 0
end_idx = 0
done_worker = False
done_all = False
for w in range(num_workers):
if not done_all:
done_worker = False
while not done_worker:
# If reached end of range, record partition.
if data_df.iloc[end_idx]["size"] >= MAX:
total = data_df[start_idx:-1].number.sum()
size = "{0}..{1}".format(
(data_df.iloc[start_idx-1]["size"] if w > 0 else MIN),
MAX
)
worker = {
"id": w,
"size": size,
"total": total
}
workers.append(worker)
done_worker = True
done_all = True
continue
# Notebooks in current partition vs. number expected.
total = data_df[start_idx:end_idx+1].number.sum()
diff = abs(total - expected_each)
# Notebooks in next partition vs. number expected.
total_next = data_df[start_idx:end_idx+2].number.sum()
diff_next = abs(total_next - expected_each)
# Record partition if closer than the next option.
if ((total >= expected_each or diff < diff_next)
and data_df.iloc[end_idx]["size"] > MIN):
size = "{0}..{1}".format(
(data_df.iloc[start_idx-1]["size"] if w > 0 else MIN),
data_df.iloc[end_idx]["size"]
)
worker = {
"id": w,
"size": size,
"total": total
}
workers.append(worker)
done_worker = True
continue
end_idx = end_idx+1
# Set variables for the next worker.
start_idx = end_idx+1
end_idx = end_idx+1
# If not all workers are used, split largest partitions in half.
workers_df = pd.DataFrame(workers).sort_values(by="total")\
.reset_index(drop=True)
workers_df["id"] = list(range(len(workers_df)))
while len(workers_df) < num_workers:
minimum = int(workers_df.iloc[-1]["size"].split("..")[0])
maximum = int(workers_df.iloc[-1]["size"].split("..")[1])
half = minimum + (maximum - minimum) // 2
new = {
"id": len(workers_df),
"size": ["{0}..{1}".format(minimum,half),
"{0}..{1}".format(half+1,maximum)],
"total": [workers_df.iloc[-1]["total"]//2]*2
}
workers_df = workers_df[:-1].append(pd.DataFrame(new))\
.sort_values(by="total")
workers_df = workers_df.sort_values(by="size")
workers_df["id"] = list(range(len(workers_df)))
return workers_df
if __name__ == "__main__":
main()
|
11475551
|
import numpy as np
import pandas as pd
from defect_features.config import conf
class Idmodel:
def getdata(self, pythonProjectPath,project):
data = pd.read_csv(pythonProjectPath+"/defect_features/report/"+project+'__'+conf.modelName+".csv")
trainSet = np.array(data)+0
trainSet = np.insert(trainSet, 0, np.ones(trainSet.shape[0]), axis=1)
trainSet = trainSet[np.argsort(trainSet[:,-1])]
train = trainSet[:, :-1]
label = trainSet[:, -1]
# print(trainArr)
return train, label
def sigmoid(self, z):
return 1.0 / (1 + np.exp(-z))
# s = 1 / (1 + np.exp(-z))
# ds = s * (1 - s)
# return ds
def normalizeRows(self, x):
x = np.array(x, dtype=np.float64)
x_norm = np.linalg.norm(x, axis=1, keepdims=True)
x = x / x_norm
return x
def gradDescent(self, train, label, lr, epoch):
M, N = train.shape
label = label.reshape(-1, 1)
errlist = []
beta = np.ones((N, 1))
for t in range(epoch):
# print(np.dot(train, beta))
z = np.dot(train, beta)
z = np.array(z, dtype=np.float64)
# print(z.dtype.name)
p = self.sigmoid(z)
dBetaMat = -train * (label - p)
# shape (1,n)
dBeta = np.sum(dBetaMat, axis=0, keepdims=True, dtype=np.float64)
# print(dBeta.dtype.name)
beta -= lr * dBeta.T
pre = self.predict(beta, train)
errorRate = self.cntErrRate(pre, label)
errlist.append(errorRate)
return beta, errlist
def predict(self, beta, trainArr):
z = np.dot(trainArr, beta)
z = np.array(z, dtype=np.float64)
preArr = self.sigmoid(z)
preArr[preArr > 0.5] = 1
preArr[preArr <= 0.5] = 0
return preArr
def undersampling(self, train, label):
clean_size = sum(label == 0)
buggy_size = sum(label == 1)
clean_data = train[0:clean_size, :]
buggy_data = train[clean_size:clean_size+buggy_size, :]
if (clean_size > buggy_size):
row_rand_array = np.arange(clean_data.shape[0])
np.random.shuffle(row_rand_array)
sampled_data = clean_data[row_rand_array[0:buggy_size]]
ret_data = np.vstack((sampled_data, buggy_data))
ret_label = np.append(np.zeros(buggy_size), np.ones(buggy_size))
else:
row_rand_array = np.arange(buggy_data.shape[0])
np.random.shuffle(row_rand_array)
sampled_data = buggy_data[row_rand_array[0:clean_size]]
ret_data = np.vstack((clean_data, sampled_data))
ret_label = np.append(np.zeros(clean_size), np.ones(clean_size))
return ret_data, ret_label
def cntErrRate(self, prelabel, label):
M = len(prelabel)
if M == 0:
return 0
cnt = 0.0
for i in range(M):
if prelabel[i] != label[i]:
cnt += 1.0
return cnt / float(M)
def buildIdmodel(self, pythonProjectPath, project):
train, label = self.getdata(pythonProjectPath,project)
train, label = self.undersampling(train, label)
train = self.normalizeRows(train)
lr = 0.001
epoch = 500
beta, err = self.gradDescent(train, label, lr, epoch)
# print(err[-1])
# print(beta)
np.savetxt(pythonProjectPath+"/train/"+project+'_'+conf.modelName+"_learnedModel.txt", beta, fmt='%f', delimiter=',')
def runIdmodel(self, pythonProjectPath,project,projectName,modelName):
data = pd.read_csv(pythonProjectPath + "/defect_features/report/"+project+"_one.csv")
beta = np.loadtxt(pythonProjectPath+"/train/"+projectName+'_'+modelName+"_learnedModel.txt", delimiter=',')
testSet = np.array(data) + 0
testSet = np.insert(testSet, 0, np.ones(testSet.shape[0]), axis=1)
testSet = testSet[np.argsort(testSet[:, -1])]
test = testSet[:, :-1]
test = self.normalizeRows(test)
# print(test)
z = np.dot(test, beta)
z = np.array(z, dtype=np.float64)
# print(z)
predict = self.sigmoid(z)
# print(predict)
predict[predict > 0.5] = 1
predict[predict <= 0.5] = 0
return predict
# if __name__ == '__main__':
# run("/Users/lifeasarain/Desktop/JITO/JIT-Identification")
|
11475565
|
import numpy as np
import math
class schur_solver:
def __init__(self):
T_c_rel = []
R_v_rel = []
return
def set_vals(self, T_v_rel, T_c_rel):
self.T_v_rel = T_c_rel #Reversed should be fixed before we open source the code
self.T_c_rel = T_v_rel
return True
def rotation_projection(self, R):
u, s, vh = np.linalg.svd(R, full_matrices=True)
return u.dot(vh)
def calculate_rotation(self):
Q = self.construct_Q()
u, s, vh = np.linalg.svd(Q, full_matrices=True)
Rx = np.reshape(u[4:-1, -2], (3, 3))
Rx = self.rotation_projection(Rx)
Rx = Rx * np.sign(np.linalg.det(Rx))
Rx = Rx.transpose()
return Rx
def solve(self):
Rx = self.calculate_rotation()
r = np.zeros((10, 1))
r[:9] = np.reshape(Rx.transpose(), (9, 1))
r[9] = 0
Q = self.construct_Q()
Qta = Q[:4, :4]
Qtar = Q[:4, 4:]
v = np.resize(-np.linalg.inv(Qta).dot(Qtar.dot(r)), (4, 1))
tx = np.resize(v[:3], (3))
scale = v[3]
return [Rx, tx, scale] #The inverse of scale is calculated, needs to be fixed
def get_rot_matrix(self, pose_list):
rot_matrices = [pose.rot.as_matrix() for pose in pose_list]
return rot_matrices
def get_trans_vec(self, pose_list):
trans_list = [np.transpose(np.atleast_2d(pose.trans)) for pose in pose_list]
return trans_list
def construct_M_R(self):
rot_a = self.get_rot_matrix(self.T_v_rel)
rot_b = self.get_rot_matrix(self.T_c_rel)
M_R_list = []
for i in range(len(rot_a)):
kron_prod = np.kron(np.transpose(rot_a[i]), np.eye(3)) - np.kron(np.transpose(np.eye(3)), rot_b[i])
M_R_list.append(np.hstack((np.zeros((9, 4)), kron_prod, np.zeros((9, 1)))))
return M_R_list
def construct_M_t(self):
M_t_list = []
rot_a = self.get_rot_matrix(self.T_v_rel)
rot_b = self.get_rot_matrix(self.T_c_rel)
trans_a = self.get_trans_vec(self.T_v_rel)
trans_b = self.get_trans_vec(self.T_c_rel)
for i in range(len(rot_a)):
term_1 = np.eye(3) - rot_b[i]
term_2 = np.kron(np.transpose(trans_a[i]), np.eye(3))
M_t_list.append(np.hstack((term_1, -trans_b[i], term_2, np.zeros((3, 1)))))
return M_t_list
def construct_Q_i(self, M_i_list):
Q_i_i = np.empty((len(M_i_list), M_i_list[0].shape[1], M_i_list[0].shape[1]))
for i, entry in enumerate(M_i_list):
Q_i_i[i, :, :] = np.matmul(np.transpose(entry),entry)
Q_i = np.sum(Q_i_i, axis=0)
return Q_i
def construct_Q(self):
# Construct the rotation error cost
M_R_list = self.construct_M_R()
Q_R = self.construct_Q_i(M_R_list)
#Construct the translation error cost
M_t_list = self.construct_M_t()
Q_t = self.construct_Q_i(M_t_list)
Q = Q_t + Q_R
Q = (np.transpose(Q) + Q)/2 #Make sure that Q is symmetric
return Q
|
11475580
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class DistributedDataStoreManagerAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedDataStoreManagerAI')
|
11475582
|
from aiokafka import AIOKafkaProducer
import asyncio
loop = asyncio.get_event_loop()
async def send_one():
producer = AIOKafkaProducer(
bootstrap_servers='localhost:9092',
transactional_id="transactional_test",
loop=loop)
# Get cluster layout and topic/partition allocation
await producer.start()
try:
async with producer.transaction():
# Produce messages
res = await producer.send_and_wait(
"test-topic", b"Super transactional message")
input()
raise ValueError()
finally:
await producer.stop()
print(res)
loop.run_until_complete(send_one())
|
11475621
|
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from PIL import Image
class Post(models.Model):
"""
The model to store Posts created by users.
Fields-
post_content: The text of the post
posted_by: User who created the post
post_date_posted: Date of making the post
post_image: Image posted with the post
"""
post_content = models.TextField(verbose_name="content")
posted_by = models.ForeignKey(User, on_delete=models.CASCADE)
post_date_posted = models.DateTimeField(auto_now_add=True,
verbose_name="Published on")
post_image = models.ImageField(upload_to="post_images", null=True, blank=True)
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk})
def save(self, **kwargs):
"""
This function will resize and add a pixelating effect to the images
to give them a more retro and early internet feel.
"""
super().save()
try:
img = Image.open(self.post_image.path)
except ValueError as E:
print("No image was attached with the post\n", repr(E))
return None
img = img.resize((240, 240), resample=Image.BILINEAR)
output_size = (240, 240)
img = img.resize(output_size, Image.NEAREST)
img.thumbnail(output_size)
img.save(self.post_image.path)
|
11475711
|
from reactive_robot.connectors.mqtt import MqttTopic
def test_mqtt_topic_has_wilcard():
topic1 = MqttTopic("device/+/temperture")
assert topic1.has_wild_card() == True
topic2 = MqttTopic("device/#")
assert topic2.has_wild_card() == True
topic2 = MqttTopic("device/a42b4d93/temperture")
assert topic2.has_wild_card() == False
def test_mqtt_topics_are_equals():
topic1 = MqttTopic("device/abcdd12/temperture")
topic2 = MqttTopic("device/abcdd12/temperture")
assert (topic1 == topic2) is True
topic1 = MqttTopic("device/+/temperture/+/value")
topic2 = MqttTopic("device/abcdd12/temperture/1/value")
assert (topic1 == topic2) is True
topic1 = MqttTopic("device/#")
topic2 = MqttTopic("device/asbdf/temperture/r23r")
topic3 = MqttTopic("device/asbdf/coordinate")
assert (topic1 == topic2) is True
assert (topic1 == topic3) is True
assert (topic2 == topic3) is False
|
11475717
|
from __future__ import print_function, division, absolute_import
from collections import OrderedDict
import numpy as np
from typing import List, Union, Tuple
from time import time
import collections
try:
from itertools import izip_longest
except Exception:
from itertools import zip_longest as izip_longest
def vector2index_img(vector_img):
vector_img = np.array(vector_img)
height, width, nb_channels = vector_img.shape[-3:]
onehot = np.reshape(vector_img, [height, width, nb_channels])
index_img = np.zeros([height, width], dtype=np.int32)
for i in range(nb_channels):
index_img += 2**i * onehot[:,:,i].astype(np.int32)
return index_img
def render_index_img(index_img):
symbols = [" ", "!", "@", "#", "$", "%", "^", "&", "*", "(", ")", "_", "="]
symbols = symbols + [str(i) for i in range(64)]
height, width = index_img.shape
for y in range(height):
for x in range(width):
print(symbols[index_img[y,x]], end="")
print()
def join_vkb_lists(v1: Tuple[List[np.ndarray]], v2: Tuple[List[np.ndarray]]):
return [vt1+vt2 for vt1, vt2 in zip(v1, v2)]
def stack_vkb(vkb: Tuple[List[np.ndarray]]):
return tuple([np.stack(vkb_list, axis=-1) if vkb_list else [] for vkb_list in vkb])
def concat_vkb(v1: Tuple[np.ndarray], v2: Tuple[np.ndarray]):
# dim=-1 is the nb_predicates dim
vkb = []
for vt1, vt2 in zip(v1, v2):
if isinstance(vt1, np.ndarray) and isinstance(vt2, np.ndarray):
vkb.append(np.concatenate([vt1, vt2], axis=-1))
elif isinstance(vt1, np.ndarray):
vkb.append(vt1)
elif isinstance(vt2, np.ndarray):
vkb.append(vt2)
else:
vkb.append([])
return tuple(vkb)
def conv_output_size(input_size, kernel_size, stride=1, padding=0, dilation=1):
feature_map_shape = (np.array(input_size, dtype=np.int32)+2*padding-dilation*(kernel_size-1)-1)//stride+1
return feature_map_shape[0], feature_map_shape[1]
def rotate_vec2d(vec, degrees):
"""
rotate a vector anti-clockwise
:param vec:
:param degrees:
:return:
"""
theta = np.radians(degrees)
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s), (s, c)))
return R@vec
def ind_dict2list(dic):
"""
:param dic: dictionary form object ot index, starting from zero
:return:
"""
l = list(range(len(dic)))
for item, index in dic.items():
l[index] = item
return l
def discount(r, discounting):
discounted_reward = np.zeros_like(r, dtype=np.float32)
G = 0.0
for i in reversed(range(0, len(r))):
G = G * discounting + r[i]
discounted_reward[i] = G
return discounted_reward
def normalize(scalars):
mean, std = np.mean(scalars), np.std(scalars)
return (scalars - mean)/(std+1e-8)
def unique_list(l):
if not l:
return []
return list(OrderedDict.fromkeys(l))
class TimerCollection():
def __init__(self):
self._timers = {}
def get_timer(self, name):
if name not in self._timers.keys():
self._timers[name] = Timer()
return self._timers[name]
def reset_all(self):
for timer in self._timers.values():
timer.reset()
def get_statistics(self):
statistics = {}
for name, timer in self._timers.items():
statistics[name] = timer.accumulated_time
return statistics
def get_summary_str(self):
statistics = self.get_statistics()
if "total" not in statistics:
statistics["total"] = sum(statistics.values())
summary_str = ""
for name, t in statistics.items():
summary_str += "{} takes {} seconds ({}%);\n".format(name, t, t/statistics["total"])
return summary_str
class Timer():
"""
Based on context manager.
Estimate time spent in the context.
Query by property of accumulated_time
"""
def __init__(self):
self._accumulated_time = 0
@property
def accumulated_time(self):
return self._accumulated_time
def __enter__(self):
self._start_time = time()
def __exit__(self, type, value, traceback):
current_time = time()
elapsed = current_time - self._start_time
self._accumulated_time += elapsed
return False #re-raise any exceptions
def reset(self):
self._accumulated_time = 0
# from https://stackoverflow.com/questions/27890052
def find_shape(seq):
try:
len_ = len(seq)
except TypeError:
return ()
shapes = [find_shape(subseq) for subseq in seq]
return (len_,) + tuple(max(sizes) for sizes in izip_longest(*shapes,
fillvalue=1))
def fill_array(arr, seq):
if arr.ndim == 1:
try:
len_ = len(seq)
except TypeError:
len_ = 0
arr[:len_] = seq
arr[len_:] = 0
else:
for subarr, subseq in izip_longest(arr, seq, fillvalue=()):
fill_array(subarr, subseq)
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
|
11475721
|
import tensorflow as tf
from convLayerRelu import *
def resSkip(x,outMaps,stride):
"""
skip component for res layer for when the resolution changes during stride
number of channels also changes via linear combination
"""
with tf.variable_scope(None, default_name="resSkip"):
return convLayer(x,1,outMaps,stride)
def resLayer(x,kernelSize,outMaps):
"""
a reslayer, no striding
"""
with tf.variable_scope(None, default_name="resLayer"):
skip = x
conv1 = convLayerRelu(x,kernelSize,outMaps,1)
conv2 = convLayer(conv1,kernelSize,outMaps,1)
return leakyRelu(conv2 + skip, 0.1)
def resLayerStride(x,kernelSize,outMaps):
"""
a reslayer, stride by 2
"""
with tf.variable_scope(None, default_name="resLayerStride"):
skip = resSkip(x,outMaps,2)
conv1 = convLayerRelu(x,kernelSize,outMaps,2)
conv2 = convLayer(conv1,kernelSize,outMaps,1)
return leakyRelu(conv2 + skip, 0.1)
def resBlock(x,kernelSize,outMaps):
"""
block of resnet units, resolution halves
"""
with tf.variable_scope(None, default_name="resBlock"):
res1 = resLayerStride(x,kernelSize,outMaps)
res2 = resLayer(res1,kernelSize,outMaps)
return res2
|
11475722
|
from .base import ApibpTest
get_schema_json = """{
"type": "object",
"properties": {
"id": {
"type": "string"
},
"title": {
"type": "string"
},
"content": {
"type": "string"
},
"tags": {
"type": "array",
"items": {
"type": "string"
}
}
}
}""".replace(
'"', """
)
get_schema = (
"""
<div class="api-action-schema">
Schema:
<pre><code>"""
+ get_schema_json
+ """
</code></pre>
</div>"""
)
patch_schema_json = """{
"type": "object",
"properties": {
"title": {
"type": "string"
},
"content": {
"type": "string"
},
"tags": {
"type": "array",
"items": {
"type": "string"
}
}
},
"additionalProperties": false
}""".replace(
'"', """
)
patch_schema = (
"""
<div class="api-action-schema">
Schema:
<pre><code>"""
+ patch_schema_json
+ """
</code></pre>
</div>"""
)
class JsonSchemaTest(ApibpTest):
def test(self):
response = self.get_response(
"apiblueprint_view/tests/fixtures/14. JSON Schema.md"
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, get_schema, html=True)
self.assertContains(response, patch_schema, html=True)
|
11475724
|
from __future__ import absolute_import
import numpy as np
from keras import backend as K
from keras.layers import Layer
from keras import initializers, regularizers, constraints
def _softmax(x, dim):
"""Computes softmax along a specified dim. Keras currently lacks this feature.
"""
if K.backend() == 'tensorflow':
import tensorflow as tf
return tf.nn.softmax(x, dim)
elif K.backend() is 'cntk':
import cntk
return cntk.softmax(x, dim)
elif K.backend() == 'theano':
# Theano cannot softmax along an arbitrary dim.
# So, we will shuffle `dim` to -1 and un-shuffle after softmax.
perm = np.arange(K.ndim(x))
perm[dim], perm[-1] = perm[-1], perm[dim]
x_perm = K.permute_dimensions(x, perm)
output = K.softmax(x_perm)
# Permute back
perm[dim], perm[-1] = perm[-1], perm[dim]
output = K.permute_dimensions(x, output)
return output
else:
raise ValueError("Backend '{}' not supported".format(K.backend()))
class AttentionLayer(Layer):
"""Attention layer that computes a learned attention over input sequence.
For details, see papers:
- https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf
- http://colinraffel.com/publications/iclr2016feed.pdf (fig 1)
Input:
x: Input tensor of shape `(..., time_steps, features)` where `features` must be static (known).
Output:
2D tensor of shape `(..., features)`. i.e., `time_steps` axis is attended over and reduced.
"""
def __init__(self,
kernel_initializer='he_normal',
kernel_regularizer=None,
kernel_constraint=None,
use_bias=True,
bias_initializer='zeros',
bias_regularizer=None,
bias_constraint=None,
use_context=True,
context_initializer='he_normal',
context_regularizer=None,
context_constraint=None,
attention_dims=None,
**kwargs):
"""
Args:
attention_dims: The dimensionality of the inner attention calculating neural network.
For input `(32, 10, 300)`, with `attention_dims` of 100, the output is `(32, 10, 100)`.
i.e., the attended words are 100 dimensional. This is then collapsed via summation to
`(32, 10, 1)` to indicate the attention weights for 10 words.
If set to None, `features` dims are used as `attention_dims`. (Default value: None)
"""
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(AttentionLayer, self).__init__(**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.use_bias = use_bias
self.bias_initializer = initializers.get(bias_initializer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.bias_constraint = constraints.get(bias_constraint)
self.use_context = use_context
self.context_initializer = initializers.get(context_initializer)
self.context_regularizer = regularizers.get(context_regularizer)
self.context_constraint = constraints.get(context_constraint)
self.attention_dims = attention_dims
self.supports_masking = True
def build(self, input_shape):
if len(input_shape) < 3:
raise ValueError("Expected input shape of `(..., time_steps, features)`, found `{}`".format(input_shape))
attention_dims = input_shape[-1] if self.attention_dims is None else self.attention_dims
self.kernel = self.add_weight(shape=(input_shape[-1], attention_dims),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(attention_dims, ),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.use_context:
self.context_kernel = self.add_weight(shape=(attention_dims, ),
initializer=self.context_initializer,
name='context_kernel',
regularizer=self.context_regularizer,
constraint=self.context_constraint)
else:
self.context_kernel = None
super(AttentionLayer, self).build(input_shape)
def call(self, x, mask=None):
# x: [..., time_steps, features]
# ut = [..., time_steps, attention_dims]
ut = K.dot(x, self.kernel)
if self.use_bias:
ut = K.bias_add(ut, self.bias)
ut = K.tanh(ut)
if self.use_context:
ut = ut * self.context_kernel
# Collapse `attention_dims` to 1. This indicates the weight for each time_step.
ut = K.sum(ut, axis=-1, keepdims=True)
# Convert those weights into a distribution but along time axis.
# i.e., sum of alphas along `time_steps` axis should be 1.
self.at = _softmax(ut, dim=1)
if mask is not None:
self.at *= K.cast(K.expand_dims(mask, -1), K.floatx())
# Weighted sum along `time_steps` axis.
return K.sum(x * self.at, axis=-2)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]
def get_attention_tensor(self):
if not hasattr(self, 'at'):
raise ValueError('Attention tensor is available after calling this layer with an input')
return self.at
def get_config(self):
config = {
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_initializer': initializers.serialize(self.bias_initializer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'bias_constraint': constraints.serialize(self.bias_constraint),
'context_initializer': initializers.serialize(self.context_initializer),
'context_regularizer': regularizers.serialize(self.context_regularizer),
'context_constraint': constraints.serialize(self.context_constraint)
}
base_config = super(AttentionLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConsumeMask(Layer):
"""Layer that prevents mask propagation.
"""
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
return x
|
11475732
|
from django.conf.urls import url
from rest_framework.schemas import get_schema_view
from . import views
app_name = "api"
schema_view = get_schema_view(title="Cast API")
urlpatterns = [
url(r"^schema/$", schema_view),
url(r"^$", views.api_root, name="root"),
# image
url(r"^images/?$", views.ImageListView.as_view(), name="image_list"),
url(
r"^images/(?P<pk>\d+)/?$", views.ImageDetailView.as_view(), name="image_detail"
),
url(
regex=r"^upload_image/$",
view=views.ImageCreateView.as_view(),
name="upload_image",
),
# gallery
url(r"^gallery/?$", views.GalleryListView.as_view(), name="gallery_list"),
url(
r"^gallery/(?P<pk>\d+)/?$",
views.GalleryDetailView.as_view(),
name="gallery_detail",
),
# video
url(r"^videos/?$", views.VideoListView.as_view(), name="video_list"),
url(
r"^videos/(?P<pk>\d+)/?$", views.VideoDetailView.as_view(), name="video_detail"
),
url(
regex=r"^upload_video/$",
view=views.VideoCreateView.as_view(),
name="upload_video",
),
# audio
url(r"^audio/?$", views.AudioListView.as_view(), name="audio_list"),
url(
r"^audios/(?P<pk>\d+)/?$", views.AudioDetailView.as_view(), name="audio_detail"
),
url(
r"^audios/podlove/(?P<pk>\d+)/?$",
views.AudioPodloveDetailView.as_view(),
name="audio_podlove_detail",
),
# request
url(r"^request/?$", views.RequestListView.as_view(), name="request_list"),
]
|
11475740
|
from . import add, divide, multiply, subtract
from .__about__ import __version__
__all__ = [
"__version__",
"add",
"subtract",
"multiply",
"divide",
]
|
11475774
|
import sys
sys.path.append("./lambda/helper/python")
import boto3
import unittest
from moto import mock_s3
from moto import mock_dynamodb2
from helper import S3Helper
from helper import DynamoDBHelper
BUCKET_NAME = "test-bucket"
S3_FILE_NAME = "test_file_name.txt"
TABLE_NAME = "TestsTable"
current_session = boto3.session.Session()
REGION = current_session.region_name
print(f"Test region is {REGION}")
@mock_s3
class TestS3Helper(unittest.TestCase):
def setUp(self):
self.conn = boto3.resource('s3', region_name=REGION)
# according to the documentation https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.ServiceResource.create_bucket
# buckets in region us-east-1 do not require the region to be specified and have a location constraint of null
if(REGION=='us-east-1'):
self.conn.create_bucket(Bucket=BUCKET_NAME)
else:
self.conn.create_bucket(Bucket=BUCKET_NAME, CreateBucketConfiguration={'LocationConstraint': REGION})
def test_get_s3_bucket_region(self):
bucketRegion = S3Helper.getS3BucketRegion(BUCKET_NAME)
if(REGION=='us-east-1'):
self.assertEqual(bucketRegion, None)
else:
self.assertEqual(bucketRegion,REGION)
def test_write_to_s3(self):
S3Helper.writeToS3("Hello World", BUCKET_NAME, S3_FILE_NAME, REGION)
body = self.conn.Object(BUCKET_NAME, S3_FILE_NAME).get()['Body'].read().decode('utf-8')
self.assertEqual(body, "Hello World")
def test_read_from_s3(self):
self.conn.Object(BUCKET_NAME, S3_FILE_NAME).put(Body="Test")
body = S3Helper.readFromS3(BUCKET_NAME, S3_FILE_NAME, REGION)
self.assertEqual(body,"Test")
def tearDown(self):
buckets = boto3.client('s3').list_buckets()
for bucket in buckets['Buckets']:
s3_bucket = self.conn.Bucket(bucket['Name'])
s3_bucket.objects.all().delete()
s3_bucket.delete()
@mock_dynamodb2
class TestDynamoDBHelper(unittest.TestCase):
def setUp(self):
self.conn = boto3.client('dynamodb',region_name=REGION)
self.conn.create_table(
TableName = TABLE_NAME,
KeySchema = [{"AttributeName": "forum_name","KeyType":"HASH"}],
AttributeDefinitions=[{"AttributeName": "forum_name", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
)
self.conn.put_item(
TableName = TABLE_NAME,
Item={
"forum_name": {"S" : "Test"},
"subject": {"S" : "test subject"}
}
)
def test_get_items(self):
items = DynamoDBHelper.getItems(TABLE_NAME, "forum_name", "Test")
expected_result = [{'forum_name': 'Test', 'subject': 'test subject'}]
self.assertEqual(items, expected_result)
def test_insert_item(self):
new_item = {
"forum_name": "Test2",
"subject": "test subject2"
}
ddbResponse = DynamoDBHelper.insertItem(TABLE_NAME, new_item)
self.assertEqual(ddbResponse['ResponseMetadata']['HTTPStatusCode'],200)
def tearDown(self):
self.conn.delete_table(TableName=TABLE_NAME)
if __name__=='__main__':
unittest.main()
|
11475776
|
from dolfin import *
from xii.meshing.make_mesh_cpp import make_mesh
from xii.assembler.average_matrix import curve_average_matrix
from xii.assembler.average_shape import Square
from xii import EmbeddedMesh
import numpy as np
surface_average_matrix = lambda V, TV, bdry_curve: curve_average_matrix(V, TV, bdry_curve, which='surface')
def make_z_mesh(num_vertices, zmin=0, zmax=1):
'''{(0, 0, zmin + t*(zmax - zmin))}'''
t = zmin + np.linspace(0, 1, num_vertices)*(zmax - zmin)
coordinates = np.c_[np.zeros_like(t), np.zeros_like(t), t]
cells = np.c_[np.arange(num_vertices - 1), np.arange(1, num_vertices)]
cells.dtype = 'uintp'
mesh = Mesh(mpi_comm_world())
make_mesh(coordinates, cells, 1, 3, mesh)
return mesh
def test(f, n, P, degree=8):
'''Check integrals due to averaging operator'''
mesh = BoxMesh(Point(-1, -1, -1), Point(1, 1, 1), n, n, n)
mf = MeshFunction('size_t', mesh, 1, 0)
CompiledSubDomain('near(x[0], 0.0) && near(x[1], 0.0)').mark(mf, 1)
line_mesh = EmbeddedMesh(mf, 1)
V = FunctionSpace(mesh, 'CG', 1)
TV = FunctionSpace(line_mesh, 'DG', 1)
f = interpolate(f, V)
cylinder = Square(P, degree)
Pi = surface_average_matrix(V, TV, cylinder)
print('\t', Pi.norm('linf'), max(len(Pi.getrow(i)[0]) for i in range(TV.dim())))
Pi_f = Function(TV)
Pi.mult(f.vector(), Pi_f.vector())
return Pi_f
# --------------------------------------------------------------------
if __name__ == '__main__':
# NOTE the size for integration size!!
size = 0.125
P = lambda x0: np.array([-size, -size, x0[2]])
f = Expression('2', degree=2)
Pi_f0 = f
f = Expression('x[2]', degree=1)
Pi_f0 = f
f = Expression('x[2]*x[2]', degree=2)
Pi_f0 = f
f = Expression('x[0]', degree=2)
Pi_f0 = Constant(0)
f = Expression('x[0]+x[1]', degree=2)
Pi_f0 = Constant(0)
f = Expression('x[0]*x[0]', degree=2)
Pi_f0 = Constant(2*size**2/3.)
f = Expression('x[0]*x[0]+x[1]*x[1]', degree=2)
Pi_f0 = Constant(4*size**2/3.)
f = Expression('x[2]*(x[0]*x[0]+x[1]*x[1])', degree=2)
Pi_f0 = Expression('x[2]*4*A*A/3.', A=size, degree=1)
e0, n0 = None, None
for n in (4, 8, 16, 32):
Pi_f = test(f, n, P)
print(Pi_f(0, 0, 0.5))
assert Pi_f.vector().norm('l2') > 0
e = sqrt(abs(assemble(inner(Pi_f0 - Pi_f, Pi_f0 - Pi_f)*dx)))
if e0 is not None:
rate = ln(e/e0)/ln(float(n0)/n)
else:
rate = np.inf
print('error %g, rate=%.2f' % (e, rate))
n0, e0 = n, e
|
11475780
|
from dataclasses import dataclass
@dataclass
class RiotGameSource:
"""
Example source class for the Riot API
Use it with:
setattr(game.sources, 'riotLolApi', RiotGameSource(gameId=..., platformId=...))
"""
gameId: int = None
platformId: str = None
# Esports games field
gameHash: str = None
@dataclass
class RiotPlayerSource:
puuid: str = None
accountId: str = None
summonerId: str = None
platformId: str = None
participantId: int = None # Will usually be player.id, but it is still a key unique to this data source
|
11475829
|
import tkinter as tk
class Menubar:
def __init__(self, parent):
font_specs = ("ubuntu", 14)
menubar = tk.Menu(parent.master, font=font_specs)
parent.master.config(menu=menubar)
file_dropdown = tk.Menu(menubar, font=font_specs, tearoff=0)
file_dropdown.add_command(label="Nuovo File",
command=parent.new_file)
file_dropdown.add_command(label="Apri File",
command=parent.open_file)
file_dropdown.add_command(label="Salva",
command=parent.save)
file_dropdown.add_command(label="Salva con Nome",
command=parent.save_as)
file_dropdown.add_separator()
file_dropdown.add_command(label="Esci",
command=parent.master.destroy)
menubar.add_cascade(label="File", menu=file_dropdown)
class PyText:
def __init__(self, master):
master.title("Untitled - PyText")
master.geometry("1200x700")
font_specs = ("ubuntu", 18)
self.master = master
self.textarea = tk.Text(master, font=font_specs)
self.scroll = tk.Scrollbar(master, command=self.textarea.yview)
self.textarea.configure(yscrollcommand=self.scroll.set)
self.textarea.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
self.scroll.pack(side=tk.RIGHT, fill=tk.Y)
self.menubar = Menubar(self)
def set_window_title(self):
pass
def new_file(self):
pass
def open_file(self):
pass
def save(self):
pass
def save_as(self):
pass
if __name__ == "__main__":
master = tk.Tk()
pt = PyText(master)
master.mainloop()
|
11475858
|
def selection_sort(l):
# Outer loop for iterating until second to last index in list
for i in range(len(l)):
# Track least index position
least = i
# Inner loop to handle checking every item for lower number
for j in range(i + 1, len(l)):
# Set least to lowest index if found
if l[j] < l[least]:
least = j
# Swap least index with current index if found
if least is not i:
l[i], l[least] = l[least], l[i]
# Return sorted list
return l
|
11475867
|
def parity(x):
p = 0
while x:
p ^= x&1
x >>= 1
return p
def main():
print 'uint8_t byte_parity[256] = {'
for byte in range(256):
print '{parity},'.format(parity=parity(byte)),
if (byte + 1) % 16 == 0:
print
print '};'
if __name__ == '__main__':
main()
|
11475878
|
import logging
import math
logger = logging.getLogger(__name__)
# Helps visualize the steps of Viterbi.
def log_dptable(V):
s = " " + " ".join(("%7d" % i) for i in range(len(V))) + "\n"
for y in V[0]:
s += "%.15s: " % y
s += " ".join("%.7s" % ("%f" % v[y]) for v in V)
s += "\n"
logger.debug('%s', s)
def viterbi(obs, states, start_p, trans_p, emit_p):
states = list(states)
V = [{}]
path = {}
# Initialize base cases (t == 0)
for y in states:
V[0][y] = math.log10(start_p(y)) + math.log10(emit_p(y, obs[0]))
path[y] = [y]
# alternative Python 2.7+ initialization syntax
# V = [{y:(start_p[y] * emit_p[y][obs[0]]) for y in states}]
# path = {y:[y] for y in states}
# Run Viterbi for t > 0
for t in range(1, len(obs)):
logger.info('---- %s%%', int(100.0 * (float(t) / len(obs))))
V.append({})
newpath = {}
for i, y in enumerate(states):
if i % 500 == 0:
logger.debug('%s %s', i, y)
candidates = [(V[t - 1][y0] +
math.log10(trans_p(y0, y)) +
math.log10(emit_p(y, obs[t])), y0)
for y0 in states]
(prob, state) = max(candidates)
V[t][y] = prob
newpath[y] = path[state] + [y]
# Don't need to remember the old paths
path = newpath
logger.info('---- 100%%')
log_dptable(V)
(prob, state) = max((V[t][y], y) for y in states)
return (10.0 ** prob, path[state])
def example():
states = ('Healthy', 'Fever')
observations = ('normal', 'cold', 'dizzy')
start_probability = {'Healthy': 0.6, 'Fever': 0.4}
transition_probability = {
'Healthy': {'Healthy': 0.7, 'Fever': 0.3},
'Fever': {'Healthy': 0.4, 'Fever': 0.6},
}
emission_probability = {
'Healthy': {'normal': 0.5, 'cold': 0.4, 'dizzy': 0.1},
'Fever': {'normal': 0.1, 'cold': 0.3, 'dizzy': 0.6},
}
def start_p(s):
return start_probability[s]
def transition_p(s, n):
return transition_probability[s][n]
def emission_p(s, o):
return emission_probability[s][o]
return viterbi(observations,
states,
start_p,
transition_p,
emission_p)
if __name__ == '__main__':
example()
|
11475943
|
import json
from textwrap import dedent
class TestAddHostStoragePartition:
def test_no_args(self, host):
result = host.run('stack add host storage partition')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "host" argument is required
{host ...} {device=string} {size=integer} [mountpoint=string] [options=string] [partid=integer] [type=string]
''')
def test_all_params(self, host, add_host):
result = host.run(
'stack add host storage partition backend-0-0 device=sda mountpoint=/ '
'size=1024 type=ext4 options=test_options partid=1'
)
assert result.rc == 0
result = host.run('stack list host storage partition backend-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [{
"host": "backend-0-0",
"device": "sda",
"partid": 1,
"mountpoint": "/",
"size": 1024,
"fstype": "ext4",
"options": "test_options",
"source": "H"
}]
|
11475954
|
import shutil
import glob
import os
import inkml2img
from datetime import datetime
dataPath = 'CROHME_labeled_2016/'
dataMergedPath = 'data_merged/'
targetFolder = 'data_processed/'
logger = open('log.txt', 'w+')
def writeLog(message):
logger.write("[" + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "] " + str(message) + "\n")
def createDirectory(dirPath):
if not os.path.exists(dirPath):
os.mkdir(dirPath)
writeLog("Create " + dirPath)
if __name__ == "__main__":
writeLog("Start processing.")
filesPath = glob.glob(dataPath + '*/*.inkml')
writeLog("There are " + str(len(filesPath)) + " files in " + dataPath)
createDirectory(dataMergedPath)
cnt = 0
for fileName in filesPath:
cnt = cnt + 1
print("Copying %d/%d" % (cnt, len(filesPath)))
writeLog("Copied " + fileName + " --> " + dataMergedPath + fileName)
shutil.copy2(fileName, dataMergedPath)
createDirectory(targetFolder)
listFiles = glob.glob(dataMergedPath + '*.inkml')
numberOfFile = len(listFiles)
writeLog("There are " + str(numberOfFile) + " files in " + dataMergedPath)
cnt = 0
for fileInkml in listFiles:
cnt = cnt + 1
fileName = fileInkml.split('/')[1]
print("Processing %s [%d/%d]" % (fileName, cnt, numberOfFile))
writeLog("[" + str(cnt) + "/" + str(numberOfFile) + "]" + "Processed " + fileInkml + " --> " + targetFolder + fileName + ".png")
try:
inkml2img.inkml2img(fileInkml, targetFolder + fileName + '.png')
except:
writeLog("Failed!")
print("An error occured!")
writeLog("Successful!")
|
11476003
|
from typing import Callable, Tuple
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from absl.testing import parameterized
from test_efficientnet_lite.test_model import TEST_PARAMS
# Disable GPU
tf.config.set_visible_devices([], "GPU")
class TestWeightClusteringWrappers(parameterized.TestCase):
centroid_initialization = tfmot.clustering.keras.CentroidInitialization
clustering_params = {
"number_of_clusters": 3,
"cluster_centroids_init": centroid_initialization.DENSITY_BASED,
}
def setUp(self):
tf.keras.backend.clear_session()
@parameterized.named_parameters(TEST_PARAMS)
def test_tfmot_weight_clustering_wrap(
self, model_fn: Callable, input_shape: Tuple[int, int]
):
model = model_fn(weights=None, input_shape=input_shape + (3,))
tfmot.clustering.keras.cluster_weights(model, **self.clustering_params)
|
11476006
|
from tests.unit import unittest
import boto.swf.layer1_decisions
class TestDecisions(unittest.TestCase):
def setUp(self):
self.decisions = boto.swf.layer1_decisions.Layer1Decisions()
def assert_data(self, *data):
self.assertEquals(self.decisions._data, list(data))
def test_continue_as_new_workflow_execution(self):
self.decisions.continue_as_new_workflow_execution(
child_policy='TERMINATE',
execution_start_to_close_timeout='10',
input='input',
tag_list=['t1', 't2'],
task_list='tasklist',
start_to_close_timeout='20',
workflow_type_version='v2'
)
self.assert_data({
'decisionType': 'ContinueAsNewWorkflowExecution',
'continueAsNewWorkflowExecutionDecisionAttributes': {
'childPolicy': 'TERMINATE',
'executionStartToCloseTimeout': '10',
'input': 'input',
'tagList': ['t1', 't2'],
'taskList': {'name': 'tasklist'},
'taskStartToCloseTimeout': '20',
'workflowTypeVersion': 'v2',
}
})
|
11476014
|
from pylongigetestcase import PylonTestCase
from pypylon import pylon
import unittest
class LoadAndSaveTestSuite(PylonTestCase):
def test_load_and_save(self):
nodeFile = "NodeMap.pfs"
# Create an instant camera object with the camera device found first.
camera = self.create_first()
camera.Open()
# Print the model name of the camera.
print("Using device ", camera.GetDeviceInfo().GetModelName())
# featurePersistence = pylon.FeaturePersistence()
print("Saving camera's node map to file...")
print(nodeFile)
# Save the content of the camera's node map into the file.
pylon.FeaturePersistence.Save(nodeFile, camera.GetNodeMap())
# Just for demonstration, read the content of the file back to the camera's node map with enabled validation.
print("Reading file back to camera's node map...")
pylon.FeaturePersistence.Load(nodeFile, camera.GetNodeMap(), True)
# Close the camera.
camera.Close()
if __name__ == "__main__":
unittest.main()
|
11476039
|
import difflib
file1 = "precheck.txt"
file2 = "postcheck.txt"
diff = difflib.ndiff(open(file1).readlines(),open(file2).readlines())
print (''.join(diff),)
|
11476104
|
from .universal_datamodule import UniversalDataModule
from .universal_sampler import PretrainingSampler, PretrainingRandomSampler
__all__ = ['UniversalDataModule', 'PretrainingSampler', 'PretrainingRandomSampler']
|
11476108
|
from random import random
# This function takes
# - v: value in register
# - a: a scaling value for the logarithm based on Morris's paper
# It returns n(v,a), the approximate_count
def n(v, a):
return a*((1 + 1/a)**v - 1)
# This function takes
# - v: value in register
# - a: a scaling value for the logarithm based on Morris's paper
# It returns a new value for v
def increment(v, a):
delta = 1/(n(v + 1, a) - n(v, a))
if random() <= delta:
return v + 1
else:
return v
#This simulates counting and takes
# - n_items: number of items to count and loop over
# - a: a scaling value for the logarithm based on Morris's paper
# It returns n(v,a), the approximate count
def approximate_count(n_items, a):
v = 0
for i in range(1, n_items + 1):
v = increment(v, a)
return n(v, a)
# This function takes
# - n_trials: the number of counting trials
# - n_items: the number of items to count to
# - a: a scaling value for the logarithm based on Morris's paper
# - threshold: the maximum percent error allowed
# It returns a true / false test value
def test_approximate_count(n_trials, n_items, a, threshold):
samples = [approximate_count(n_items, a) for i in range(1, n_trials + 1)]
avg = sum(samples)/n_trials
if abs((avg - n_items)/n_items) < threshold:
print("passed")
else:
print("failed")
print("[#]\nCounting Tests, 100 trials")
print("[#]\ntesting 1,000, a = 30, 10% error")
test_approximate_count(100, 1000, 30, 0.1)
print("[#]\ntesting 12,345, a = 10, 10% error")
test_approximate_count(100, 12345, 10, 0.1)
print("[#]\ntesting 222,222, a = 0.5, 20% error")
test_approximate_count(100, 222222, 0.5, 0.2)
|
11476115
|
import argparse
import numpy as np
import os
import pandas as pd
from tqdm import tqdm
import yaml
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SequentialSampler, RandomSampler
from mean_average_precision import MetricBuilder
from models import SiimCovidAuxModel
from dataset import PseudoAuxDataset, SiimCovidAuxDataset, classes
from dataset import chest14_classes
from utils import seed_everything, collate_fn
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (8192, rlimit[1]))
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--cfg", default='configs/resnet200d.yaml', type=str)
parser.add_argument("--frac", default=1.0, type=float)
parser.add_argument("--epochs", default=30, type=int)
parser.add_argument("--patience", default=8, type=int)
args = parser.parse_args()
print(args)
SEED = 123
seed_everything(SEED)
def make_pseudo_df():
output = []
for source in ['padchest', 'pneumothorax', 'vin', 'test']:
ext_df = pd.read_csv('../../dataset/pseudo_csv_det/{}.csv'.format(source))
output.append(ext_df)
pseudo_df = pd.concat(output, ignore_index=True)
pseudo_df = pseudo_df.loc[pseudo_df['label'] != 'none 1 0 0 1 1'].reset_index(drop=True)
return pseudo_df
def make_siim_df(in_df):
image_paths = []
for _, row in in_df.iterrows():
image_path = '../../dataset/siim-covid19-detection/images/train/{}.png'.format(row['imageid'])
image_paths.append(image_path)
in_df['image_path'] = np.array(image_paths, dtype=str)
return in_df
if __name__ == "__main__":
with open(args.cfg) as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
cfg['aux_epochs'] = args.epochs
print(cfg)
ckpt_dir = 'warmup'
os.makedirs(ckpt_dir, exist_ok = True)
pseudo_df = make_pseudo_df()
rsna_df = pd.read_csv('../../dataset/external_dataset/ext_csv/rsna_pneumonia.csv')
rsna_df = rsna_df.loc[rsna_df['label'] != 'none 1 0 0 1 1'].reset_index(drop=True)
train_df = pd.concat([rsna_df, pseudo_df], ignore_index=True)
valid_df = pd.read_csv('../../dataset/siim-covid19-detection/train_kfold.csv')
valid_df = valid_df.loc[valid_df['label'] != 'none 1 0 0 1 1'].reset_index(drop=True)
valid_df = make_siim_df(valid_df)
if args.frac != 1:
print('Quick training')
train_df = train_df.sample(frac=args.frac).reset_index(drop=True)
valid_df = valid_df.sample(frac=args.frac).reset_index(drop=True)
train_dataset = PseudoAuxDataset(
df=train_df,
image_size=cfg['aux_image_size'], mode='train')
valid_dataset = SiimCovidAuxDataset(
df=valid_df,
images_dir='../../dataset/siim-covid19-detection/images/train',
image_size=cfg['aux_image_size'], mode='valid')
train_loader = DataLoader(train_dataset, batch_size=cfg['aux_batch_size'], sampler=RandomSampler(train_dataset), num_workers=cfg['workers'], pin_memory=False, drop_last=True, collate_fn=collate_fn)
valid_loader = DataLoader(valid_dataset, batch_size=cfg['aux_batch_size'], sampler=SequentialSampler(valid_dataset), num_workers=cfg['workers'], pin_memory=False, drop_last=False, collate_fn=collate_fn)
print('TRAIN: {} | VALID: {}'.format(len(train_loader.dataset), len(valid_loader.dataset)))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
pretrained_path = 'pretrain/{}_{}_pretrain_step1.pth'.format(cfg['backbone_name'], cfg['chest14_image_size'])
pretrained_num_classes = len(chest14_classes)
model = SiimCovidAuxModel(
backbone_name=cfg['backbone_name'],
imagenet_pretrained=False,
num_classes=len(classes),
in_features=cfg['in_features'],
backbone_pretrained_path=pretrained_path,
backbone_pretrained_cls_num_classes=pretrained_num_classes,
model_pretrained_path=None,
model_pretrained_cls_num_classes=None)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=cfg['aux_init_lr'])
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, cfg['aux_epochs']-1)
scaler = torch.cuda.amp.GradScaler()
LOG = '{}/{}_{}_warmup.log'.format(ckpt_dir, cfg['backbone_name'], cfg['aux_image_size'])
CHECKPOINT = '{}/{}_{}_warmup.pth'.format(ckpt_dir, cfg['backbone_name'], cfg['aux_image_size'])
val_map_max = 0
if os.path.isfile(LOG):
os.remove(LOG)
log_file = open(LOG, 'a')
log_file.write('epoch, lr, train_loss, val_map\n')
log_file.close()
count = 0
best_epoch = 0
for epoch in range(cfg['aux_epochs']):
scheduler.step()
model.train()
train_loss = []
loop = tqdm(train_loader)
for images, targets in loop:
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
optimizer.zero_grad()
with torch.cuda.amp.autocast():
det_loss_dict = model(images, targets)
loss = sum(l for l in det_loss_dict.values())
train_loss.append(loss.item())
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
loop.set_description('Epoch {:02d}/{:02d} | LR: {:.5f}'.format(epoch, cfg['aux_epochs']-1, optimizer.param_groups[0]['lr']))
loop.set_postfix(loss=np.mean(train_loss))
train_loss = np.mean(train_loss)
model.eval()
metric_fn = MetricBuilder.build_evaluation_metric("map_2d", async_mode=True, num_classes=1)
for images, targets in tqdm(valid_loader):
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
with torch.cuda.amp.autocast(), torch.no_grad():
det_outputs = model(images, targets)
for t, d in zip(targets, det_outputs):
gt_boxes = t['boxes'].data.cpu().numpy()
gt_boxes = np.hstack((gt_boxes, np.zeros((gt_boxes.shape[0], 3), dtype=gt_boxes.dtype)))
det_boxes = d['boxes'].data.cpu().numpy()
det_scores = d['scores'].data.cpu().numpy()
det_scores = det_scores.reshape(det_scores.shape[0], 1)
det_pred = np.hstack((det_boxes, np.zeros((det_boxes.shape[0], 1), dtype=det_boxes.dtype), det_scores))
metric_fn.add(det_pred, gt_boxes)
val_map = metric_fn.value(iou_thresholds=0.5, recall_thresholds=np.arange(0., 1.1, 0.1), mpolicy='soft')['mAP']
print('train loss: {:.5f} | val_map: {:.5f}'.format(train_loss, val_map))
log_file = open(LOG, 'a')
log_file.write('{}, {:.5f}, {:.5f}, {:.5f}\n'.format(
epoch, optimizer.param_groups[0]['lr'], train_loss, val_map))
log_file.close()
if val_map > val_map_max:
print('Valid map improved from {:.5f} to {:.5f} saving model to {}'.format(val_map_max, val_map, CHECKPOINT))
val_map_max = val_map
best_epoch = epoch
count = 0
torch.save(model.state_dict(), CHECKPOINT)
else:
count += 1
if count > args.patience:
break
log_file = open(LOG, 'a')
log_file.write('Best epoch {} | val loss min: {}\n'.format(best_epoch, val_map_max))
log_file.close()
print('Best epoch {} | val loss min: {}'.format(best_epoch, val_map_max))
|
11476125
|
from collections import OrderedDict
import torch as T
import torch.nn as nn
from torch._six import container_abcs
import sympy as sp
from .. import utils
__all__ = ['wrapper', 'Sequential', 'Lambda', 'Module', 'MultiSingleInputModule', 'MultiMultiInputModule',
'SingleMultiInputModule']
class _LayerMethod:
"""
This mixin class contains various attributes to extend :mod:`torch` modules.
"""
@property
def input_shape(self):
return self._input_shape
@input_shape.setter
def input_shape(self, input_shape):
if input_shape is None or isinstance(input_shape, (int, sp.Symbol)):
shape = input_shape
elif isinstance(input_shape, str):
shape = sp.symbols(input_shape, integer=True)
elif isinstance(input_shape, container_abcs.Iterable):
shape = [sp.symbols(s, integer=True) if isinstance(s, str)
else sp.symbols('x{}'.format(i), integer=True) if s is None
else s for i, s in enumerate(input_shape)]
shape = tuple(shape)
self._input_shape = shape
@property
@utils.validate
def output_shape(self):
"""
Returns the output shape of the module.
"""
raise NotImplementedError
@property
def params(self):
"""
Return a tuple of all the parameters in the module.
"""
assert not hasattr(super(), 'params')
return tuple(self.state_dict().values())
@property
def trainable(self):
"""
Return a tuple of all parameters with :attr:`requires_grad` set to `True`.
"""
assert not hasattr(super(), 'trainable')
params = []
if hasattr(self, 'parameters'):
params = [p for p in self.parameters() if p.requires_grad]
return tuple(params)
@property
def regularizable(self):
"""
Returns a tuple of parameters to be regularized.
"""
assert not hasattr(super(), 'regularizable')
params = []
if hasattr(self, 'weight'):
if self.weight.requires_grad:
params += [self.weight]
for m in list(self.children()):
if hasattr(m, 'regularizable'):
params.extend(m.regularizable)
return tuple(params)
def save(self, param_file):
"""
Save the weights of the model in :class:`numpy.nrdarray` format.
:param param_file:
path to the weight file.
"""
assert not hasattr(super(), 'save')
params_np = utils.bulk_to_numpy(self.params)
params_dict = OrderedDict(zip(list(self.state_dict().keys()), params_np))
T.save(params_dict, param_file)
print('Model weights dumped to %s' % param_file)
def load(self, param_file, eval=True):
"""
Load the `numpy.ndarray` weights from file.
:param param_file:
path to the weight file.
:param eval:
whether to use evaluation mode or not.
"""
assert not hasattr(super(), 'load')
params_dict = T.load(param_file)
self.load_state_dict(params_dict)
if eval:
self.eval()
print('Model weights loaded from %s' % param_file)
def reset_parameters(self):
"""
This overloads the :meth:`torch.Module.reset_parameters` of the module.
Used for custom weight initialization.
"""
assert not hasattr(super(), 'reset_parameters')
pass
@utils.add_simple_repr
class Module(nn.Module, _LayerMethod):
"""
Similar to :class:`torch.nn.Module`, but extended by
:class:`~neuralnet_pytorch.layers.layers._LayerMethod`.
All the usages in native Pytorch are preserved.
Parameters
----------
input_shape
shape of the tensor to be input to the modules.
Can be a list, tuple, nested list/tuple or an integer.
"""
def __init__(self, input_shape=None):
super().__init__()
self.input_shape = input_shape
@utils.add_simple_repr
class MultiSingleInputModule(Module):
"""
This is an abstract class.
This class computes the results of multiple modules given an input tensor,
then fuses the results.
Parameters
----------
modules_or_tensors
a list of modules or tensors whose results are fused together.
Attributes
----------
input_shape
a list of input shapes of the incoming modules and tensors.
"""
def __init__(self, *modules_or_tensors):
assert all(isinstance(item, (nn.Module, T.Tensor)) for item in modules_or_tensors), \
'All items in modules_or_tensors should be Pytorch modules or tensors'
super().__init__()
input_shapes = []
def foo(item):
idx = len(list(self.children()))
if isinstance(item, nn.Module):
self.add_module('module%d' % idx, item)
input_shapes.append(item.output_shape)
else:
self.add_module('tensor%d' % idx, Lambda(lambda *args, **kwargs: item, input_shape=item.shape,
output_shape=item.shape))
input_shapes.append(item.shape)
list(map(foo, modules_or_tensors))
self.input_shape = tuple(input_shapes)
def forward(self, input, *args, **kwargs):
outputs = [module(input, *args, **kwargs) for name, module in self.named_children()]
return tuple(outputs)
@property
def trainable(self):
return tuple()
@property
def params(self):
return tuple()
@property
def regularizable(self):
return tuple()
class MultiMultiInputModule(MultiSingleInputModule):
"""
Similar to :class:`MultiSingleInputModule`, but each module has its own input tensor.
"""
def __init__(self, *modules_or_tensors):
super().__init__(*modules_or_tensors)
def forward(self, *input, **kwargs):
input_it = iter(input)
outputs = [module(next(input_it), **kwargs) if name.startswith('module') else module()
for name, module in self.named_children()]
return tuple(outputs)
class SingleMultiInputModule(Module):
def __init__(self, module):
super().__init__(module.output_shape)
self.module = module
@property
@utils.validate
def output_shape(self):
return self.module.output_shape
def forward(self, *input, **kwargs):
return tuple([self.module(inp, **kwargs) for inp in input])
@property
def trainable(self):
return tuple()
@property
def params(self):
return tuple()
@property
def regularizable(self):
return tuple()
@utils.add_simple_repr
class Sequential(nn.Sequential, _LayerMethod):
"""
Similar to :class:`torch.nn.Sequential`, but extended by
:class:`~neuralnet_pytorch.layers.layers._LayerMethod`.
All the usages in native Pytorch are preserved.
Parameters
----------
args
a list of modules as in :class:`torch.nn.Sequential`.
input_shape
shape of the input tensor. If ``None``, the functionality is
the same as :class:`torch.nn.Sequential`.
"""
def __init__(self, *args, input_shape=None):
self.input_shape = input_shape
super().__init__(*args)
def __getitem__(self, idx):
if isinstance(idx, slice):
start = idx.start if idx.start else 0
modules = list(self._modules.items())
return Sequential(OrderedDict(modules[idx]), input_shape=modules[start][1].input_shape)
else:
return self._get_item_by_idx(self._modules.values(), idx)
def add_module(self, name: str, module: T.nn.Module) -> None:
r"""Adds a child module to the current module.
The module can be accessed as an attribute using the given name.
Args:
name (string): name of the child module. The child module can be
accessed from this module using the given name
module (Module): child module to be added to the module.
"""
if not isinstance(module, T.nn.Module) and module is not None:
raise TypeError("{} is not a Module subclass".format(
T.typename(module)))
elif not isinstance(name, T._six.string_classes):
raise TypeError("module name should be a string. Got {}".format(
T.typename(name)))
elif hasattr(self, name) and name not in self._modules:
raise KeyError("attribute '{}' already exists".format(name))
elif '.' in name:
raise KeyError("module name can't contain \".\"")
elif name == '':
raise KeyError("module name can't be empty string \"\"")
if not hasattr(module, 'input_shape'):
self.input_shape = None
if len(self._modules) == 0 and hasattr(module, 'input_shape') and self.input_shape is None:
self.input_shape = module.input_shape
if len(self._modules) > 0 and hasattr(module, 'input_shape') and self.output_shape is not None:
module.input_shape = self.output_shape
self._modules[name] = module
def forward(self, input, *args, **kwargs):
for module in self._modules.values():
input = module(input, *args, **kwargs)
return input
@property
@utils.validate
def output_shape(self):
layers = list(self.children())
if not layers or self.input_shape is None:
return self.input_shape
else:
return layers[-1].output_shape if hasattr(layers[-1], 'output_shape') else None
def reset_parameters(self):
for m in self.children():
m.reset_parameters()
def wrapper(input_shape=None, output_shape=None, *args, **kwargs):
"""
A class decorator to wrap any :mod:`torch` module.
:param input_shape:
shape of the input to the module.
Can be ``None``.
:param output_shape:
shape of the output tensor.
If ``None``, the output shape is calculated by performing a forward pass.
:param args:
extra arguments needed by the module.
:param kwargs:
extra keyword arguments needed by the module.
:return:
The input module extended by :class:`~neuralnet_pytorch.layers.layers._LayerMethod`.
Examples
--------
You can use this function directly on any :mod:`torch` module
>>> import torch.nn as nn
>>> import neuralnet_pytorch as nnt
>>> dropout = nnt.wrapper(p=.2)(nn.Dropout2d)() # because wrapper returns a class!
Alternatively, you can use it as a decorator
.. code-block:: python
import torch.nn as nn
import neuralnet_pytorch as nnt
@nnt.wrapper(# optional arguments for input and output shapes)
class Foo(nn.Module):
...
foo = Foo()
"""
assert input_shape is None or isinstance(input_shape, (int, container_abcs.Iterable)), 'Unknown type of input_shape'
if isinstance(input_shape, int):
input_shape = (input_shape,)
def decorator(module: nn.Module):
assert issubclass(module, nn.Module), 'module must be a subclass of Pytorch\'s Module'
@utils.add_simple_repr
class _Wrapper(module, _LayerMethod):
def __init__(self):
self.input_shape = input_shape
self.output_shape_tmp = output_shape
super().__init__(*args, **kwargs)
def forward(self, input, *args, **kwargs):
return super().forward(input, *args, **kwargs)
@property
@utils.validate
def output_shape(self):
if self.input_shape is None and self.output_shape_tmp is None:
return None
if self.output_shape_tmp is not None:
return self.output_shape_tmp
else:
none_indices = [k for k in range(len(self.input_shape)) if self.input_shape[k] is None]
shape = [1 if s is None else s for s in self.input_shape]
dummy = T.zeros(*shape)
try:
dummy = dummy.to(next(self.parameters()).device)
except StopIteration:
pass
dummy = self(dummy)
output_shape = list(dummy.shape)
for k in none_indices:
output_shape[k] = None
return tuple(output_shape)
_Wrapper.__name__ = module.__name__
_Wrapper.__doc__ = module.__doc__
_Wrapper.__module__ = module.__module__
return _Wrapper
return decorator
class Lambda(Module):
"""
Wraps a function as a :class:`~neuralnet_pytorch.layers.Module`.
Parameters
----------
func
a callable function.
input_shape
shape of the input tensor.
output_shape
shape of the output tensor.
If ``None``, the output shape is calculated by performing a forward pass.
kwargs
keyword arguments required by `func`.
Examples
--------
You can easily wrap a :mod:`torch` function
.. code-block:: python
import torch as T
import neuralnet_pytorch as nnt
a, b = T.rand(3, 1), T.rand(3, 2)
cat = nnt.Lambda(T.cat, dim=1)
c = cat((a, b))
print(c.shape)
Also, it works for any self-defined function as well
.. code-block:: python
import neuralnet_pytorch as nnt
def foo(x, y):
return x + y
a = T.rand(3, 3)
print(a)
foo_sum = nnt.Lambda(foo, y=1.)
res = foo_sum(a)
print(res)
"""
def __init__(self, func, input_shape=None, output_shape=None, **kwargs):
assert callable(func), 'The provided function must be callable'
super().__init__(input_shape)
self.output_shape_tmp = output_shape
self.func = func
self.kwargs = kwargs
def forward(self, *input):
return self.func(*input, **self.kwargs)
@property
@utils.validate
def output_shape(self):
if self.input_shape is None and self.output_shape_tmp is None:
return None
if self.output_shape_tmp is not None:
return self.output_shape_tmp
else:
none_indices = [k for k in range(len(self.input_shape)) if self.input_shape[k] is None]
shape = [1 if s is None else s for s in self.input_shape]
dummy = T.zeros(*shape)
try:
dummy = dummy.to(next(self.parameters()).device)
except StopIteration:
pass
dummy = self.forward(dummy)
output_shape = list(dummy.shape)
for k in none_indices:
output_shape[k] = None
return tuple(output_shape)
def extra_repr(self):
s = '{}'.format(self.func.__name__)
return s
|
11476143
|
from setuptools import setup
setup(
name='devops-pipeline',
packages=['devops_pipeline'],
version='0.1',
description='infrastructure as code, pipeline tool',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/samsquire/devops-pipeline',
include_package_data=True,
install_requires=[
'flask',
'pydotplus',
'psutil',
'networkx',
'boto3',
'ansible',
'websockets',
'flask-socketio',
'eventlet',
'python-socketio',
'SQLAlchemy',
'parallel-ssh'
],
entry_points = {
"console_scripts": ['devops-pipeline=devops_pipeline.pipeline:main']
},
package_data={'web': ['devops_pipeline/web/*']},
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
]
)
|
11476159
|
import datetime
import logging
import shlex
import subprocess
import sys
import time
from typing import Sequence, Dict
import click
from lib.amazon import as_client, target_group_arn_for, get_autoscaling_group
from lib.ce_utils import describe_current_release, are_you_sure, logger, \
wait_for_autoscale_state, set_update_message
from lib.cli import cli
from lib.env import Config, Environment
from lib.instance import print_instances, Instance
from lib.ssh import exec_remote_all, run_remote_shell, exec_remote
logger = logging.getLogger(__name__)
@cli.group()
def instances():
"""Instance management commands."""
@instances.command(name='exec_all')
@click.pass_obj
@click.argument('remote_cmd', required=True, nargs=-1)
def instances_exec_all(cfg: Config, remote_cmd: Sequence[str]):
"""Execute REMOTE_CMD on all the instances."""
escaped = shlex.join(remote_cmd)
if not are_you_sure(f'exec command {escaped} in all instances', cfg):
return
print("Running '{}' on all instances".format(escaped))
exec_remote_all(pick_instances(cfg), remote_cmd)
@instances.command(name='login')
@click.pass_obj
def instances_login(cfg: Config):
"""Log in to one of the instances."""
instance = pick_instance(cfg)
run_remote_shell(instance)
@instances.command(name='restart_one')
@click.pass_obj
def instances_restart_one(cfg: Config):
"""Restart one of the instances."""
instance = pick_instance(cfg)
as_instance_status = instance.describe_autoscale()
if not as_instance_status:
logger.error("Failed restarting %s - was not in ASG", instance)
return
as_group_name = as_instance_status['AutoScalingGroupName']
modified_groups: Dict[str, int] = {}
try:
restart_one_instance(as_group_name, instance, modified_groups)
except RuntimeError as e:
logger.error("Failed restarting %s - skipping: %s", instance, e)
@instances.command(name='start')
@click.pass_obj
def instances_start(cfg: Config):
"""Start up the instances."""
print("Starting version %s", describe_current_release(cfg))
exec_remote_all(pick_instances(cfg), ['sudo', 'systemctl', 'start', 'compiler-explorer'])
@instances.command(name='stop')
@click.pass_obj
def instances_stop(cfg: Config):
"""Stop the instances."""
if cfg.env == Environment.PROD:
print('Operation aborted. This would bring down the site')
print('If you know what you are doing, edit the code in bin/lib/ce.py, function instances_stop_cmd')
elif are_you_sure('stop all instances', cfg):
exec_remote_all(pick_instances(cfg), ['sudo', 'systemctl', 'stop', 'compiler-explorer'])
@instances.command(name='restart')
@click.option('--motd', type=str, default='Site is being updated',
help='Set the message of the day used during update', show_default=True)
@click.pass_obj
def instances_restart(cfg: Config, motd: str):
"""Restart the instances, picking up new code."""
if not are_you_sure('restart all instances with version {}'.format(describe_current_release(cfg)), cfg):
return
begin_time = datetime.datetime.now()
# Store old motd
set_update_message(cfg, motd)
modified_groups: Dict[str, int] = {}
failed = False
to_restart = pick_instances(cfg)
for index, instance in enumerate(to_restart):
logger.info("Restarting %s (%d of %d)...", instance, index + 1, len(to_restart))
as_instance_status = instance.describe_autoscale()
if not as_instance_status:
logger.warning("Skipping %s as it is no longer in the ASG", instance)
continue
as_group_name = as_instance_status['AutoScalingGroupName']
if as_instance_status['LifecycleState'] != 'InService':
logger.warning("Skipping %s as it is not InService (%s)", instance, as_instance_status)
continue
try:
restart_one_instance(as_group_name, instance, modified_groups)
except RuntimeError as e:
logger.error("Failed restarting %s - skipping: %s", instance, e)
failed = True
# TODO, what here?
for group, desired in iter(modified_groups.items()):
logger.info("Putting desired instances for %s back to %d", group, desired)
as_client.update_auto_scaling_group(AutoScalingGroupName=group, DesiredCapacity=desired)
set_update_message(cfg, '')
end_time = datetime.datetime.now()
delta_time = end_time - begin_time
print(f'Instances restarted in {delta_time.total_seconds()} seconds')
sys.exit(1 if failed else 0)
@instances.command(name='status')
@click.pass_obj
def instances_status(cfg: Config):
"""Get the status of the instances."""
print_instances(Instance.elb_instances(target_group_arn_for(cfg)), number=False)
def pick_instance(cfg: Config):
elb_instances = Instance.elb_instances(target_group_arn_for(cfg))
if len(elb_instances) == 1:
return elb_instances[0]
while True:
print_instances(elb_instances, number=True)
inst = input('Which instance? ')
try:
return elb_instances[int(inst)]
except (ValueError, IndexError):
pass
def pick_instances(cfg: Config):
return Instance.elb_instances(target_group_arn_for(cfg))
def restart_one_instance(as_group_name: str, instance: Instance, modified_groups: Dict[str, int]):
instance_id = instance.instance.instance_id
logger.info("Enabling instance protection for %s", instance)
as_client.set_instance_protection(AutoScalingGroupName=as_group_name,
InstanceIds=[instance_id],
ProtectedFromScaleIn=True)
as_group = get_autoscaling_group(as_group_name)
adjustment_required = as_group['DesiredCapacity'] == as_group['MinSize']
if adjustment_required:
logger.info("Group '%s' needs to be adjusted to keep enough nodes", as_group_name)
modified_groups[as_group['AutoScalingGroupName']] = as_group['DesiredCapacity']
logger.info("Putting %s into standby", instance)
as_client.enter_standby(
InstanceIds=[instance_id],
AutoScalingGroupName=as_group_name,
ShouldDecrementDesiredCapacity=not adjustment_required)
wait_for_autoscale_state(instance, 'Standby')
logger.info("Restarting service on %s", instance)
restart_response = exec_remote(instance, ['sudo', 'systemctl', 'restart', 'compiler-explorer'])
if restart_response:
logger.warning("Restart gave some output: %s", restart_response)
wait_for_healthok(instance)
logger.info("Moving %s out of standby", instance)
as_client.exit_standby(
InstanceIds=[instance_id],
AutoScalingGroupName=as_group_name)
wait_for_autoscale_state(instance, 'InService')
wait_for_elb_state(instance, 'healthy')
logger.info("Disabling instance protection for %s", instance)
as_client.set_instance_protection(AutoScalingGroupName=as_group_name,
InstanceIds=[instance_id],
ProtectedFromScaleIn=False)
logger.info("Instance restarted ok")
def wait_for_elb_state(instance, state):
logger.info("Waiting for %s to reach ELB state '%s'...", instance, state)
while True:
instance.update()
instance_state = instance.instance.state['Name']
if instance_state != 'running':
raise RuntimeError('Instance no longer running (state {})'.format(instance_state))
logger.debug("State is %s", instance.elb_health)
if instance.elb_health == state:
logger.info("...done")
return
time.sleep(5)
def is_everything_awesome(instance):
try:
response = exec_remote(instance, ['curl', '-s', '--max-time', '2', 'http://127.0.0.1/healthcheck'])
return response.strip() == "Everything is awesome"
except subprocess.CalledProcessError:
return False
def wait_for_healthok(instance):
logger.info("Waiting for instance to be Online %s", instance)
sys.stdout.write('Waiting')
while not is_everything_awesome(instance):
sys.stdout.write('.')
# Flush stdout so tmux updates
sys.stdout.flush()
time.sleep(10)
print("Ok, Everything is awesome!")
|
11476209
|
from spotipy.oauth2 import SpotifyClientCredentials
import spotipy
client_credentials_manager = SpotifyClientCredentials()
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
results = sp.search(q='weezer', limit=20)
for i, t in enumerate(results['tracks']['items']):
print(' ', i, t['name'])
|
11476261
|
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.session import Session
from tqdm.notebook import tqdm
import datetime as dt
from couchers.config import config
from couchers.db import session_scope
from couchers.models import (
Cluster,
ClusterRole,
ClusterSubscription,
Discussion,
Node,
User,
)
def create_session():
engine = create_engine(config["DATABASE_CONNECTION_STRING"])
return Session(engine)
def get_table_columns(table):
with session_scope() as session:
query = session.query(table).limit(0)
df = pd.read_sql(query.statement, query.session.bind)
return list(df.columns)
def get_dataframe(table):
with session_scope() as session:
query = session.query(table)
return pd.read_sql(query.statement, query.session.bind)
def update_community_description(node_id, description, overide_length_constraint=False):
if len(description) > 500 and not overide_length_constraint:
print(
f"The description length is {len(description)}. The limit is 500 characters."
)
return
with session_scope() as session:
community = (
session.query(Cluster).filter(Cluster.parent_node_id == node_id).one()
)
community.description = description
name = community.name
new_description = community.description
print(f"The {name} community description has been updated to:\n{new_description}")
def delete_discussion(discussion_id):
with session_scope() as session:
discussion = (
session.query(Discussion).filter(Discussion.id == discussion_id).one()
)
thread = discussion.thread
comments = thread.comments
for comment in comments:
for reply in comment.replies:
session.delete(reply)
session.delete(comment)
session.delete(thread)
session.delete(discussion)
session.commit()
def new_admin(community_node_id, username):
with session_scope() as session:
user = session.query(User).filter(User.username == username).one()
node = session.query(Node).filter(Node.id == community_node_id).one()
cluster = node.official_cluster
# if they are already a member change their role
try:
community_subscription = (
session.query(ClusterSubscription)
.filter(
(ClusterSubscription.user_id == user.id)
& (ClusterSubscription.cluster_id == cluster.id)
)
.one()
)
community_subscription.role = ClusterRole.admin
# else create new subscription
except NoResultFound:
cluster.cluster_subscriptions.append(
ClusterSubscription(
user=user,
role=ClusterRole.admin,
)
)
cluster_name = cluster.name
print(f"{username} is now an admin of {cluster_name}")
def remove_admin(community_node_id, username):
with session_scope() as session:
user = session.query(User).filter(User.username == username).one()
node = session.query(Node).filter(Node.id == community_node_id).one()
cluster = node.official_cluster
try:
community_subscription = (
session.query(ClusterSubscription)
.filter(
(ClusterSubscription.user_id == user.id)
& (ClusterSubscription.cluster_id == cluster.id)
)
.one()
)
except NoResultFound:
print(f"{username} is not an admin of the {cluster.name} community.")
return
if community_subscription.role == ClusterRole.member:
print(f"{username} is not an admin of the {cluster.name} community.")
return
community_subscription.role = ClusterRole.member
print(
f"{username} has been removed as an admin from the {cluster.name} community."
)
def get_incomplete_communities_df():
with session_scope() as session:
print("getting communities...")
community_df = get_dataframe(Cluster).query("is_official_cluster == True")
community_df["url"] = community_df.apply(
lambda row: f"app.couchers.org/community/{row.parent_node_id}/{row.slugify_1}",
axis=1,
)
result_df = community_df[
["id", "parent_node_id", "name", "url", "created"]
].copy()
print("getting discussions...")
discussion_df = get_dataframe(Discussion)
result_df["has_discussions"] = result_df.id.apply(
lambda x: _has_discussions(x, discussion_df)
)
tqdm.pandas(desc="getting properties for communities")
(
result_df["has_description_length"],
result_df["has_main_page_length"],
result_df["has_non_man_admin"],
) = zip(
*result_df.parent_node_id.progress_apply(
lambda x: _complete_community_properties(session, x)
)
)
return result_df[
~(
result_df.has_discussions
& result_df.has_description_length
& result_df.has_main_page_length
& result_df.has_non_man_admin
)
].sort_values("id")
def _has_discussions(community_id, discussion_df):
num_discussions = discussion_df.query(
f"owner_cluster_id == {str(community_id)}"
).shape[0]
return num_discussions > 0
def _complete_community_properties(session, community_node_id):
community = (
session.query(Cluster).filter(Cluster.parent_node_id == community_node_id).one()
)
return (
len(community.description) > 200,
len(community.main_page.versions[-1].content) > 200,
_has_non_man_admin(community),
)
def _has_non_man_admin(community):
admins = community.admins.all()
for admin in admins:
if admin.gender not in ["Man", "Male"]:
return True
return False
def users_per_day_plot(average_over_days=7):
df = get_dataframe(User)
df = (
df.sort_values("joined")
.reset_index(drop=True)
.reset_index()
.set_index("joined")
)
print(f"Average new users per day over the last {average_over_days} days")
return (
df.apply(
lambda row: df[
row.name - dt.timedelta(days=average_over_days) : row.name
].shape[0]
/ average_over_days,
axis=1,
)
.plot()
.grid()
)
def users_over_time_plot():
df = get_dataframe(User)
df = df.sort_values("joined").reset_index(drop=True).reset_index()
return df.plot("joined", "index", logy=True).grid()
|
11476289
|
import logging
from functools import reduce
from operator import mul as operator_mul
import cfdm
import numpy as np
from . import mixin
from .constructs import Constructs
from .data import Data
from .decorators import _inplace_enabled, _inplace_enabled_define_and_cleanup
from .functions import _DEPRECATION_ERROR_ARG, parse_indices
logger = logging.getLogger(__name__)
_empty_set = set()
class Domain(mixin.FieldDomain, mixin.Properties, cfdm.Domain):
"""A domain construct of the CF data model.
The domain represents a set of discrete "locations" in what
generally would be a multi-dimensional space, either in the real
world or in a model's simulated world. The data array elements of
a field construct correspond to individual location of a domain.
The domain construct is defined collectively by the following
constructs of the CF data model: domain axis, dimension
coordinate, auxiliary coordinate, cell measure, coordinate
reference, and domain ancillary constructs; as well as properties
to describe the domain.
"""
def __new__(cls, *args, **kwargs):
"""Creates a new Domain instance."""
instance = super().__new__(cls)
instance._Data = Data
instance._Constructs = Constructs
return instance
def __repr__(self):
"""Called by the `repr` built-in function.
x.__repr__() <==> repr(x)
"""
return super().__repr__().replace("<", "<CF ", 1)
@property
def _cyclic(self):
"""Storage for axis cyclicity.
Do not change the value in-place.
"""
return self._custom.get("_cyclic", _empty_set)
@_cyclic.setter
def _cyclic(self, value):
"""value must be a set.
Do not change the value in-place.
"""
self._custom["_cyclic"] = value
@_cyclic.deleter
def _cyclic(self):
self._custom["_cyclic"] = _empty_set
@property
def size(self):
"""The number of locations in the domain.
If there are no domain axis constructs, or any domain axis
construct has a size of 0, then the size is 0.
"""
domain_axes = self.domain_axes(todict=True)
if not domain_axes:
return 0
return reduce(
operator_mul,
[domain_axis.get_size(0) for domain_axis in domain_axes.values()],
1,
)
def close(self):
"""Close all files referenced by the domain construct.
Note that a closed file will be automatically reopened if its
contents are subsequently required.
:Returns:
`None`
**Examples:**
>>> d.close()
"""
# TODODASK - is this still needed?
self.constructs.close()
# def cyclic(
# self, *identity, iscyclic=True, period=None, config={}, **filter_kwargs
# ):
# """Set the cyclicity of an axis.
#
# .. versionadded:: 3.11.0
#
# .. seealso:: `autocyclic`, `domain_axis`, `iscyclic`
#
# :Parameters:
#
# identity, filter_kwargs: optional
# Select the unique domain axis construct returned by
# ``f.domain_axis(*identity, **filter_kwargs)``. See
# `domain_axis` for details.
#
# iscyclic: `bool`, optional
# If False then the axis is set to be non-cyclic. By
# default the selected axis is set to be cyclic.
#
# period: optional
# The period for a dimension coordinate construct which
# spans the selected axis. May be any numeric scalar
# object that can be converted to a `Data` object (which
# includes numpy array and `Data` objects). The absolute
# value of *period* is used. If *period* has units then
# they must be compatible with those of the dimension
# coordinates, otherwise it is assumed to have the same
# units as the dimension coordinates.
#
# config: `dict`
# Additional parameters for optimizing the
# operation. See the code for details.
#
# :Returns:
#
# `set`
# The construct keys of the domain axes which were cyclic
# prior to the new setting, or the current cyclic domain
# axes if no axis was specified.
#
# **Examples:**
#
# >>> f.cyclic()
# set()
# >>> f.cyclic('X', period=360)
# set()
# >>> f.cyclic()
# {'domainaxis2'}
# >>> f.cyclic('X', iscyclic=False)
# {'domainaxis2'}
# >>> f.cyclic()
# set()
#
# """
# cyclic = self._cyclic
# old = cyclic.copy()
#
# if identity is None:
# return old
#
# axis = self.domain_axis(identity, key=True)
#
# if iscyclic:
# dim = self.dimension_coordinate(axis, default=None)
# if dim is not None:
# if period is not None:
# dim.period(period)
# elif dim.period() is None:
# raise ValueError(
# "A cyclic dimension coordinate must have a period"
# )
#
# # Never change _cyclic in-place
# self._cyclic = cyclic.union((axis,))
#
# return old
#
# def domain_axis(self, identity=None, key=False, item=False,
# default=ValueError()):
# """Return a domain axis construct, or its key.
#
# .. versionadded:: 3.11.0
#
# .. seealso:: `construct`, `auxiliary_coordinate`, `cell_measure`,
# `cell_method`, `coordinate`, `coordinate_reference`,
# `dimension_coordinate`, `domain_ancillary`,
# `domain_axes`, `field_ancillary`
#
# :Parameters:
#
# identity: optional
# Select the domain axis construct.
#
# {{domain axis selection}}
#
# If *identity is `None` (the default) then the unique
# domain axis construct is selected when there is only one
# of them.
#
# *Parameter example:*
# ``identity='time'``
#
# *Parameter example:*
# ``identity='domainaxis2'``
#
# *Parameter example:*
# ``identity='ncdim%y'``
#
# key: `bool`, optional
# If True then return the selected construct key. By
# default the construct itself is returned.
#
# default: optional
# Return the value of the *default* parameter if a construct
# can not be found.
#
# {{default Exception}}
#
# :Returns:
#
# `DomainAxis` or `str`
# The selected domain axis construct, or its key.
#
# **Examples:**
#
# """
# c = self.domain_axes(identity)
#
# n = len(c)
# if n == 1:
# k, construct = c.popitem()
# if key:
# return k
#
# if item:
# return k, construct
#
# return construct
# elif n > 1:
# if default is None:
# return default
#
# return self._default(
# default,
# f"{self.__class__.__name__}.{_method}() can't return {n} "
# "constructs",
# )
#
# # identity is not a unique domain axis construct identity
# da_key = self.domain_axis_key(identity, default=None)
# if da_key is None:
# if default is None:
# return default
#
# return self._default(
# default,
# message=f"No domain axis found from identity {identity!r}",
# )
#
# if key:
# return da_key
#
# return self.constructs[da_key]
@_inplace_enabled(default=False)
def flip(self, axes=None, inplace=False):
"""Flip (reverse the direction of) domain axes.
.. seealso:: `domain_axis`, `transpose`
:Parameters:
axes: (sequence of) `str` , optional
Select the domain axes to flip.
A domain axis is identified by that which would be
selected by passing a given axis description to a call of
the `domain_axis` method. For example, a value of ``'X'``
would select the domain axis construct returned by
``f.domain_axis('X')``.
If no axes are provided then all axes are flipped.
{{inplace: `bool`, optional}}
:Returns:
`Domain` or `None`
The domain with flipped axes, or `None` if the operation
was in-place.
**Examples:**
>>> d = cf.example_field(0).domain
>>> print(d)
Dimension coords: latitude(5) = [-75.0, ..., 75.0] degrees_north
: longitude(8) = [22.5, ..., 337.5] degrees_east
: time(1) = [2019-01-01 00:00:00]
>>> print(d.flip('X'))
Dimension coords: latitude(5) = [-75.0, ..., 75.0] degrees_north
: longitude(8) = [337.5, ..., 22.5] degrees_east
: time(1) = [2019-01-01 00:00:00]
>>> print(d.flip(['T', 'Y']))
Dimension coords: latitude(5) = [75.0, ..., -75.0] degrees_north
: longitude(8) = [22.5, ..., 337.5] degrees_east
: time(1) = [2019-01-01 00:00:00]
>>> print(d.flip())
Dimension coords: latitude(5) = [75.0, ..., -75.0] degrees_north
: longitude(8) = [337.5, ..., 22.5] degrees_east
: time(1) = [2019-01-01 00:00:00]
"""
d = _inplace_enabled_define_and_cleanup(self)
if axes is None:
# Flip all the axes
axes = self.domain_axes(todict=True)
else:
axes = self._parse_axes(axes)
axes = set(axes)
# Flip constructs with data
d.constructs._flip(axes)
return d
def get_data(self, default=ValueError(), _units=None, _fill_value=True):
"""Return a default value when data is requested.
A `Domain` instance can never have data, so a default value
must be returned if data is requested. This is useful for
cases when it is not known in advance if a `Field` or `Domain`
instance is in use.
.. versionadded:: 3.11.0
.. seealso:: `has_data`
:Parameters:
default: optional
Return the value of the *default* parameter.
{{default Exception}}
_units: optional
Ignored.
_fill_value: optional
Ignored.
:Returns:
The value of the *default* parameter, if an exception
has not been raised.
**Examples:**
>>> d = cf.example_domain(0)
>>> print(d.get_data(None))
None
>>> d.get_data()
Traceback (most recent call last):
...
ValueError: Domain has no data
"""
if default is None:
return
return self._default(
default, message=f"{self.__class__.__name__} has no data"
)
def get_data_axes(self, identity, default=ValueError()):
"""Return the keys of the domain axis constructs spanned by the
data of a metadata construct.
.. versionadded:: 3.11.0
.. seealso:: `del_data_axes`, `has_data_axes`, `set_data_axes`
:Parameters:
identity: optional
Select the construct by one of
* A metadata construct identity.
{{construct selection identity}}
* The key of a metadata construct
* `None`. This is the default, which selects the metadata
construct when there is only one of them.
*Parameter example:*
``identity='latitude'``
*Parameter example:*
``identity='T'
*Parameter example:*
``identity='long_name=Cell Area'``
*Parameter example:*
``identity='cellmeasure1'``
*Parameter example:*
``identity='measure:area'``
*Parameter example:*
``identity=cf.eq('time')'``
*Parameter example:*
``identity=re.compile('^lat')``
default: optional
Return the value of the *default* parameter if the data
axes have not been set.
{{default Exception}}
:Returns:
`tuple`
The keys of the domain axis constructs spanned by the data.
**Examples:**
>>> d = cf.example_field(7).domain
>>> print(d)
Dimension coords: time(3) = [1979-05-01 12:00:00, 1979-05-02 12:00:00, 1979-05-03 12:00:00] gregorian
: air_pressure(1) = [850.0] hPa
: grid_latitude(4) = [0.44, ..., -0.88] degrees
: grid_longitude(5) = [-1.18, ..., 0.58] degrees
Auxiliary coords: latitude(grid_latitude(4), grid_longitude(5)) = [[52.4243, ..., 51.1163]] degrees_north
: longitude(grid_latitude(4), grid_longitude(5)) = [[8.0648, ..., 10.9238]] degrees_east
Coord references: grid_mapping_name:rotated_latitude_longitude
>>> print(d.constructs)
Constructs:
{'auxiliarycoordinate0': <CF AuxiliaryCoordinate: latitude(4, 5) degrees_north>,
'auxiliarycoordinate1': <CF AuxiliaryCoordinate: longitude(4, 5) degrees_east>,
'coordinatereference0': <CF CoordinateReference: grid_mapping_name:rotated_latitude_longitude>,
'dimensioncoordinate0': <CF DimensionCoordinate: time(3) days since 1979-1-1 gregorian>,
'dimensioncoordinate1': <CF DimensionCoordinate: air_pressure(1) hPa>,
'dimensioncoordinate2': <CF DimensionCoordinate: grid_latitude(4) degrees>,
'dimensioncoordinate3': <CF DimensionCoordinate: grid_longitude(5) degrees>,
'domainaxis0': <CF DomainAxis: size(3)>,
'domainaxis1': <CF DomainAxis: size(1)>,
'domainaxis2': <CF DomainAxis: size(4)>,
'domainaxis3': <CF DomainAxis: size(5)>}
>>> d.get_data_axes('grid_latitude')
('domainaxis2',)
>>> d.get_data_axes('latitude')
('domainaxis2', 'domainaxis3')
"""
key = self.construct(identity, key=True, default=None)
if key is None:
return self.construct_key(identity, default=default)
return super().get_data_axes(key=key, default=default)
def identity(self, default="", strict=False, relaxed=False, nc_only=False):
"""Return the canonical identity.
By default the identity is the first found of the following:
* The "id" attribute, preceded by ``'id%'``.
* The "cf_role" property, preceded by ``'cf_role='``.
* The "long_name" property, preceded by ``'long_name='``.
* The netCDF variable name, preceded by ``'ncvar%'``.
* The value of the *default* parameter.
.. versionadded:: 3.11.0
.. seealso:: `id`, `identities`
:Parameters:
default: optional
If no identity can be found then return the value of the
default parameter.
strict: `bool`, optional
If True then the identity is the first found of only the
"standard_name" property or the "id" attribute.
relaxed: `bool`, optional
If True then the identity is the first found of only the
"standard_name" property, the "id" attribute, the
"long_name" property or the netCDF variable name.
nc_only: `bool`, optional
If True then only take the identity from the netCDF
variable name.
:Returns:
The identity.
**Examples:**
>>> f.properties()
{'foo': 'bar',
'long_name': 'Air Temperature',
'standard_name': 'air_temperature'}
>>> f.nc_get_variable()
'tas'
>>> f.identity()
'air_temperature'
>>> f.del_property('standard_name')
'air_temperature'
>>> f.identity(default='no identity')
'air_temperature'
>>> f.identity()
'long_name=Air Temperature'
>>> f.del_property('long_name')
>>> f.identity()
'ncvar%tas'
>>> f.nc_del_variable()
'tas'
>>> f.identity()
'ncvar%tas'
>>> f.identity()
''
>>> f.identity(default='no identity')
'no identity'
"""
if nc_only:
if strict:
raise ValueError(
"'strict' and 'nc_only' parameters cannot both be True"
)
if relaxed:
raise ValueError(
"'relaxed' and 'nc_only' parameters cannot both be True"
)
n = self.nc_get_variable(None)
if n is not None:
return f"ncvar%{n}"
return default
n = getattr(self, "id", None)
if n is not None:
return f"id%{n}"
if relaxed:
n = self.get_property("long_name", None)
if n is not None:
return f"long_name={n}"
n = self.nc_get_variable(None)
if n is not None:
return f"ncvar%{n}"
return default
if strict:
return default
for prop in ("cf_role", "long_name"):
n = self.get_property(prop, None)
if n is not None:
return f"{prop}={n}"
n = self.nc_get_variable(None)
if n is not None:
return f"ncvar%{n}"
return default
def identities(self):
"""Return all possible identities.
The identities comprise:
* The "id" attribute, preceded by ``'id%'``.
* The ``cf_role`` property, preceeded by ``'cf_role='``.
* The ``long_name`` property, preceeded by ``'long_name='``.
* All other properties, preceeded by the property name and a
equals e.g. ``'foo=bar'``.
* The netCDF variable name, preceeded by ``'ncvar%'``.
.. versionadded:: (cfdm) 1.9.0.0
.. seealso:: `identity`
:Returns:
`list`
The identities.
**Examples:**
>>> d = {{package}}.Domain()
>>> d.set_properties({'foo': 'bar',
... 'long_name': 'Domain for model'})
>>> d.nc_set_variable('dom1')
>>> d.identities()
['long_name=Domain for model', 'foo=bar', 'ncvar%dom1']
"""
out = super().identities()
i = getattr(self, "id", None)
if i is not None:
# Insert id attribute
i = f"id%{i}"
if not out:
out = [i]
else:
out.insert(0, i)
return out
def indices(self, *mode, **kwargs):
"""Create indices that define a subspace of the domain
construct.
The indices returned by this method be used to create the subspace
by passing them to the `subspace` method of the original domain
construct.
The subspace is defined by identifying indices based on the
metadata constructs.
Metadata constructs are selected conditions are specified on their
data. Indices for subspacing are then automatically inferred from
where the conditions are met.
Metadata constructs and the conditions on their data are defined
by keyword parameters.
* Any domain axes that have not been identified remain unchanged.
* Multiple domain axes may be subspaced simultaneously, and it
doesn't matter which order they are specified in.
* Explicit indices may also be assigned to a domain axis
identified by a metadata construct, with either a Python `slice`
object, or a sequence of integers or booleans.
* For a dimension that is cyclic, a subspace defined by a slice or
by a `Query` instance is assumed to "wrap" around the edges of
the data.
* Conditions may also be applied to multi-dimensional metadata
constructs. The "compress" mode is still the default mode (see
the positional arguments), but because the indices may not be
acting along orthogonal dimensions, some missing data may still
need to be inserted into the field construct's data.
.. versionadded:: 3.11.0
.. seealso:: `subspace`, `where`, `__getitem__`, `__setitem__`
:Parameters:
mode: `str`, *optional*
There are two modes of operation, each of which provides
indices for a different type of subspace:
============== ==========================================
*mode* Description
============== ==========================================
``'compress'`` Return indices that identify only the
requested locations.
This is the default mode.
Note that if a multi-dimensional metadata
construct is being used to define the
indices then some unrequested locations
may also be selected.
``'envelope'`` The returned subspace is the smallest that
contains all of the requested locations.
============== ==========================================
kwargs: *optional*
A keyword name is an identity of a metadata construct, and
the keyword value provides a condition for inferring
indices that apply to the dimension (or dimensions)
spanned by the metadata construct's data. Indices are
created that select every location for which the metadata
construct's data satisfies the condition.
:Returns:
`dict`
A dictionary of indices, keyed by the domain axis
construct identifiers to which they apply.
**Examples:**
>>> d = cf.example_field(0).domain
>>> print(d)
Dimension coords: latitude(5) = [-75.0, ..., 75.0] degrees_north
: longitude(8) = [22.5, ..., 337.5] degrees_east
: time(1) = [2019-01-01 00:00:00]
>>> indices = d.indices(X=112.5)
>>> indices
{'domainaxis0': slice(0, 5, 1),
'domainaxis1': slice(2, 3, 1),
'domainaxis2': slice(0, 1, 1)}
>>> print(d.subspace(**indices))
Dimension coords: latitude(5) = [-75.0, ..., 75.0] degrees_north
: longitude(1) = [112.5] degrees_east
: time(1) = [2019-01-01 00:00:00]
>>> indices = d.indices(X=112.5, Y=cf.wi(-60, 30))
>>> indices
{'domainaxis0': slice(1, 3, 1),
'domainaxis1': slice(2, 3, 1),
'domainaxis2': slice(0, 1, 1)}
>>> print(d.subspace(**indices))
Dimension coords: latitude(2) = [-45.0, 0.0] degrees_north
: longitude(1) = [112.5] degrees_east
: time(1) = [2019-01-01 00:00:00]
>>> d.indices(X=[-1, 0], Y=slice(1, -1))
{'domainaxis0': slice(1, 4, 1),
'domainaxis1': slice(7, None, -7),
'domainaxis2': slice(0, 1, 1)}
>>> print(print(d.subspace(**indices)))
Dimension coords: latitude(3) = [-45.0, 0.0, 45.0] degrees_north
: longitude(2) = [337.5, 22.5] degrees_east
: time(1) = [2019-01-01 00:00:00]
"""
if len(mode) > 1:
raise ValueError(
"Can't provide more than one positional argument. "
f"Got: {', '.join(repr(x) for x in mode)}"
)
if not mode or "compress" in mode:
mode = "compress"
elif "envelope" in mode:
mode = "envelope"
else:
raise ValueError(f"Invalid value for 'mode' argument: {mode[0]!r}")
# ------------------------------------------------------------
# Get the indices for every domain axis in the domain, without
# any auxiliary masks.
# ------------------------------------------------------------
domain_indices = self._indices(mode, None, False, **kwargs)
# ------------------------------------------------------------
# Return the indices
# ------------------------------------------------------------
return domain_indices["indices"]
def match_by_construct(self, *identities, OR=False, **conditions):
"""Whether or not there are particular metadata constructs.
.. versionadded:: 3.11.0
.. seealso:: `match`, `match_by_property`, `match_by_rank`,
`match_by_identity`, `match_by_ncvar`
:Parameters:
identities: optional
Identify the metadata constructs by one or more of
* A metadata construct identity.
{{construct selection identity}}
* The key of a metadata construct
If a cell method construct identity is given (such as
``'method:mean'``) then it will only be compared with the
most recently applied cell method operation.
Alternatively, one or more cell method constucts may be
identified in a single string with a CF-netCDF cell
methods-like syntax for describing both the collapse
dimensions, the collapse method, and any cell method
construct qualifiers. If N cell methods are described in
this way then they will collectively identify the N most
recently applied cell method operations. For example,
``'T: maximum within years T: mean over years'`` will be
compared with the most two most recently applied cell
method operations.
*Parameter example:*
``identity='latitude'``
*Parameter example:*
``'T'
*Parameter example:*
``'latitude'``
*Parameter example:*
``'long_name=Cell Area'``
*Parameter example:*
``'cellmeasure1'``
*Parameter example:*
``'measure:area'``
*Parameter example:*
``cf.eq('time')'``
*Parameter example:*
``re.compile('^lat')``
*Parameter example:*
``'domainancillary2', 'longitude'``
*Parameter example:*
``'area: mean T: maximum'``
*Parameter example:*
``'grid_latitude', 'area: mean T: maximum'``
conditions: optional
Identify the metadata constructs that have any of the
given identities or construct keys, and whose data satisfy
conditions.
A construct identity or construct key (as defined by the
*identities* parameter) is given as a keyword name and a
condition on its data is given as the keyword value.
The condition is satisfied if any of its data values
equals the value provided.
*Parameter example:*
``longitude=180.0``
*Parameter example:*
``time=cf.dt('1959-12-16')``
*Parameter example:*
``latitude=cf.ge(0)``
*Parameter example:*
``latitude=cf.ge(0), air_pressure=500``
*Parameter example:*
``**{'latitude': cf.ge(0), 'long_name=soil_level': 4}``
OR: `bool`, optional
If True then return `True` if at least one metadata
construct matches at least one of the criteria given by
the *identities* or *conditions* arguments. By default
`True` is only returned if the field constructs matches
each of the given criteria.
:Returns:
`bool`
Whether or not the domain construct contains the specfied
metadata constructs.
**Examples:**
>>> d = cf.example_field(0).domain
>>> print(d)
Dimension coords: latitude(5) = [-75.0, ..., 75.0] degrees_north
: longitude(8) = [22.5, ..., 337.5] degrees_east
: time(1) = [2019-01-01 00:00:00]
>>> d.match_by_construct("latitude")
True
>>> d.match_by_construct("air_pressure")
False
>>> d.match_by_construct("longitude", "time")
True
>>> d.match_by_construct(longitude=22.5)
True
>>> d.match_by_construct(longitude=15.5)
False
>>> d.match_by_construct(longitude=cf.gt(340))
False
>>> d.match_by_construct(longitude=cf.gt(240))
True
>>> d.match_by_construct(time=cf.dt("2019-01-01"))
True
>>> d.match_by_construct(time=cf.dt("2020-01-01"))
False
"""
if identities:
if identities[0] == "or":
_DEPRECATION_ERROR_ARG(
self,
"match_by_construct",
"or",
message="Use 'OR=True' instead.",
version="3.1.0",
) # pragma: no cover
if identities[0] == "and":
_DEPRECATION_ERROR_ARG(
self,
"match_by_construct",
"and",
message="Use 'OR=False' instead.",
version="3.1.0",
) # pragma: no cover
if not identities and not conditions:
return True
constructs = self.constructs
if not constructs:
return False
n = 0
for identity in identities:
filtered = constructs(identity)
if filtered:
n += 1
elif not OR:
return False
if conditions:
for identity, value in conditions.items():
if self.subspace("test", **{identity: value}):
n += 1
elif not OR:
return False
if OR:
return bool(n)
return True
@_inplace_enabled(default=False)
def roll(self, axis, shift, inplace=False):
"""Roll the field along a cyclic axis.
A unique axis is selected with the axes and kwargs parameters.
.. versionadded:: 1.0
.. seealso:: `anchor`, `axis`, `cyclic`, `iscyclic`, `period`
:Parameters:
axis:
The cyclic axis to be rolled, defined by that which would
be selected by passing the given axis description to a
call of the field construct's `domain_axis` method. For
example, for a value of ``'X'``, the domain axis construct
returned by ``f.domain_axis('X')`` is selected.
shift: `int`
The number of places by which the selected cyclic axis is
to be rolled.
{{inplace: `bool`, optional}}
:Returns:
`Field`
The rolled field.
**Examples:**
Roll the data of the "X" axis one elements to the right:
>>> f.roll('X', 1)
Roll the data of the "X" axis three elements to the left:
>>> f.roll('X', -3)
"""
# TODODASK - allow multiple roll axes
axis = self.domain_axis(
axis,
key=True,
default=ValueError(
f"Can't roll {self.__class__.__name__}. "
f"Bad axis specification: {axis!r}"
),
)
d = _inplace_enabled_define_and_cleanup(self)
# Roll the metadata constructs in-place
axes = d._parse_axes(axis)
d._roll_constructs(axes, shift)
return d
def subspace(self, *mode, **kwargs):
"""Create indices that define a subspace of the domain
construct.
The indices returned by this method be used to create the subspace
by passing them to the `subspace` method of the original domain
construct.
The subspace is defined by identifying indices based on the
metadata constructs.
Metadata constructs are selected conditions are specified on their
data. Indices for subspacing are then automatically inferred from
where the conditions are met.
Metadata constructs and the conditions on their data are defined
by keyword parameters.
* Any domain axes that have not been identified remain unchanged.
* Multiple domain axes may be subspaced simultaneously, and it
doesn't matter which order they are specified in.
* Explicit indices may also be assigned to a domain axis
identified by a metadata construct, with either a Python `slice`
object, or a sequence of integers or booleans.
* For a dimension that is cyclic, a subspace defined by a slice or
by a `Query` instance is assumed to "wrap" around the edges of
the data.
* Conditions may also be applied to multi-dimensional metadata
constructs. The "compress" mode is still the default mode (see
the positional arguments), but because the indices may not be
acting along orthogonal dimensions, some missing data may still
need to be inserted into the field construct's data.
.. versionadded:: 3.11.0
.. seealso:: `indices`
:Parameters:
mode: `str`, *optional*
There are two modes of operation, each of which provides
indices for a different type of subspace:
============== ==========================================
*mode* Description
============== ==========================================
``'compress'`` Return indices that identify only the
requested locations.
This is the default mode.
Note that if a multi-dimensional metadata
construct is being used to define the
indices then some unrequested locations
may also be selected.
``'envelope'`` The returned subspace is the smallest that
contains all of the requested locations.
``'test'`` May be used on its own or in addition to
one of the other positional arguments. Do
not create a subspace, but return `True`
or `False` depending on whether or not it
is possible to create the specified
subspace.
============== ==========================================
kwargs: *optional*
A keyword name is an identity of a metadata construct, and
the keyword value provides a condition for inferring
indices that apply to the dimension (or dimensions)
spanned by the metadata construct's data. Indices are
created that select every location for which the metadata
construct's data satisfies the condition.
:Returns:
`Domain` or `bool`
An independent domain construct containing the subspace of
the original domain. If the ``'test'`` positional argument
has been set then return `True` or `False` depending on
whether or not it is possible to create specified
subspace.
**Examples:**
>>> d = cf.example_field(0).domain
>>> print(d)
Dimension coords: latitude(5) = [-75.0, ..., 75.0] degrees_north
: longitude(8) = [22.5, ..., 337.5] degrees_east
: time(1) = [2019-01-01 00:00:00]
>>> print(d.subspace(X=112.5))
Dimension coords: latitude(5) = [-75.0, ..., 75.0] degrees_north
: longitude(1) = [112.5] degrees_east
: time(1) = [2019-01-01 00:00:00]
>>> print(d.indices(X=112.5, Y=cf.wi(-60, 30)))
Dimension coords: latitude(2) = [-45.0, 0.0] degrees_north
: longitude(1) = [112.5] degrees_east
: time(1) = [2019-01-01 00:00:00]
>>> print(d.indices(X=[-1, 0], Y=slice(1, -1))
Dimension coords: latitude(3) = [-45.0, 0.0, 45.0] degrees_north
: longitude(2) = [337.5, 22.5] degrees_east
: time(1) = [2019-01-01 00:00:00]
"""
logger.debug(
f"{self.__class__.__name__}.subspace\n"
f" input kwargs = {kwargs}"
) # pragma: no cover
test = False
if "test" in mode:
mode = list(mode)
mode.remove("test")
test = True
if not mode and not kwargs:
if test:
return True
return self.copy()
try:
indices = self.indices(*mode, **kwargs)
except ValueError as error:
if test:
return False
raise ValueError(error)
if test:
return True
domain_axes = self.domain_axes(todict=True)
axes = []
shape = []
indices2 = []
for a, b in indices.items():
axes.append(a)
shape.append(domain_axes[a].get_size())
indices2.append(b)
indices, roll = parse_indices(
tuple(shape), tuple(indices2), cyclic=True
)
logger.debug(
f" axes = {axes!r}\n"
f" parsed indices = {indices!r}\n"
f" roll = {roll!r}"
) # pragma: no cover
if roll:
new = self
cyclic_axes = self.cyclic()
for iaxis, shift in roll.items():
axis = axes[iaxis]
if axis not in cyclic_axes:
raise IndexError(
"Can't take a cyclic slice from non-cyclic "
f"{self.constructs.domain_axis_identity(axis)!r} "
"axis"
)
new = new.roll(axis, shift)
else:
new = self.copy()
# ------------------------------------------------------------
# Set sizes of domain axes
# ------------------------------------------------------------
domain_axes = new.domain_axes(todict=True)
for axis, index in zip(axes, indices):
if isinstance(index, slice):
old_size = domain_axes[axis].get_size()
start, stop, step = index.indices(old_size)
size = abs((stop - start) / step)
int_size = round(size)
if size > int_size:
size = int_size + 1
else:
size = int_size
else:
size = np.size(index)
domain_axes[axis].set_size(size)
# ------------------------------------------------------------
# Subspace constructs that have data
# ------------------------------------------------------------
construct_data_axes = new.constructs.data_axes()
for key, construct in new.constructs.filter_by_data().items():
construct_axes = construct_data_axes[key]
dice = [indices[axes.index(axis)] for axis in construct_axes]
# Replace existing construct with its subspace
new.set_construct(
construct[tuple(dice)],
key=key,
axes=construct_axes,
copy=False,
)
return new
@_inplace_enabled(default=False)
def transpose(self, axes, inplace=False):
"""Permute the data axes of the metadata constructs.
Each metadata construct has its data axis order changed to the
relative ordering defined by the *axes* parameter. For instance,
if the given *axes* are ``['X', 'Z', 'Y']`` then a metadata
construct whose data axis order is ('Y', 'X') will be tranposed to
have data order ('X', 'Y').
.. versionadded:: 3.11.0
.. seealso:: `domain_axis`, `flip`
:Parameters:
axes: sequence of `str`
Define the new domain axis order.
A domain axis is identified by that which would be
selected by passing a given axis description to a call of
the `domain_axis` method. For example, a value of ``'X'``
would select the domain axis construct returned by
``f.domain_axis('X')``.
Each domain axis of the domain construct data must be
specified.
constructs: `bool`, optional
If True then metadata constructs are also transposed so
that their axes are in the same relative order as in the
transposed data array of the field. By default metadata
constructs are not altered.
{{inplace: `bool`, optional}}
:Returns:
`Domain` or `None`
The domain construct with transposed constructs, or `None`
if the operation was in-place.
**Examples:**
>>> d = cf.example_field(7).domain
>>> print(d)
Dimension coords: time(3) = [1979-05-01 12:00:00, 1979-05-02 12:00:00, 1979-05-03 12:00:00] gregorian
: air_pressure(1) = [850.0] hPa
: grid_latitude(4) = [0.44, ..., -0.88] degrees
: grid_longitude(5) = [-1.18, ..., 0.58] degrees
Auxiliary coords: latitude(grid_latitude(4), grid_longitude(5)) = [[52.4243, ..., 51.1163]] degrees_north
: longitude(grid_latitude(4), grid_longitude(5)) = [[8.0648, ..., 10.9238]] degrees_east
Coord references: grid_mapping_name:rotated_latitude_longitude
>>> print(d.transpose(['X', 'T', 'Y', 'Z']))
Dimension coords: time(3) = [1979-05-01 12:00:00, 1979-05-02 12:00:00, 1979-05-03 12:00:00] gregorian
: air_pressure(1) = [850.0] hPa
: grid_latitude(4) = [0.44, ..., -0.88] degrees
: grid_longitude(5) = [-1.18, ..., 0.58] degrees
Auxiliary coords: latitude(grid_longitude(5), grid_latitude(4)) = [[52.4243, ..., 51.1163]] degrees_north
: longitude(grid_longitude(5), grid_latitude(4)) = [[8.0648, ..., 10.9238]] degrees_east
Coord references: grid_mapping_name:rotated_latitude_longitude
"""
d = _inplace_enabled_define_and_cleanup(self)
# Parse the axes
if axes is None:
raise ValueError(
f"Can't transpose {self.__class__.__name__}. "
f"Must provide an order for all axes. Got: {axes}"
)
axes = d._parse_axes(axes)
rank = self.rank
if len(set(axes)) != rank:
raise ValueError(
f"Can't transpose {self.__class__.__name__}. "
f"Must provide an unambiguous order for all "
f"{rank} domain axes. Got: {axes}"
)
data_axes = d.constructs.data_axes()
for key, construct in d.constructs.filter_by_data().items():
construct_axes = data_axes[key]
if len(construct_axes) < 2:
# No need to transpose 1-d constructs
continue
# Transpose the construct
iaxes = [
construct_axes.index(a) for a in axes if a in construct_axes
]
construct.transpose(iaxes, inplace=True)
# Update the axis order
new_axes = [construct_axes[i] for i in iaxes]
d.set_data_axes(axes=new_axes, key=key)
return d
|
11476296
|
import tensorflow as tf
k= int(tf.__version__.split('.')[0])
if k >=2:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensornets as nets
import cv2
import numpy as np
import time
import argparse
import sys
from create_folder import createFolder
tf.disable_v2_behavior()
class YoloObjectDetection():
def __init__(self):
self.url1 = []
self.inputs = tf.placeholder(tf.float32, [None, 416, 416, 3])
self.model = nets.YOLOv3COCO(self.inputs, nets.Darknet19)
self.cap = []
self.query_obj_type = []
# model = nets.YOLOv2(inputs, nets.Darknet19)
# frame=cv2.imread("D://pyworks//yolo//truck.jpg",1)
self.count = 0
self.classes = {'0': 'person', '1': 'bicycle', '2': 'car', '3': 'bike', '5': 'bus', '7': 'truck', '8': 'chair'}
self.list_of_classes = [0, 1, 2, 3, 5, 7, 8]
def init_tf_session(self):
with tf.Session().as_default() as self.sess:
self.sess.run(self.model.pretrained())
def get_cropped_image(self):
ret, frame = self.cap.read()
img = cv2.resize(frame, (416, 416))
copy_img = img.copy()
imge = np.array(img).reshape(-1, 416, 416, 3)
start_time = time.time()
preds = self.sess.run(self.model.preds, {self.inputs: self.model.preprocess(imge)})
#print("--- %s seconds ---" % (time.time() - start_time))
boxes = self.model.get_boxes(preds, imge.shape[1:3])
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
#
# cv2.resizeWindow('image', 700, 700)
# print("--- %s seconds ---" % (time.time() - start_time))
boxes1 = np.array(boxes)
img_list = []
box_list = []
for j in self.list_of_classes:
count = 0
if str(j) in self.classes:
lab = self.classes[str(j)]
if lab == self.query_obj_type:
if len(boxes1) != 0:
for i in range(len(boxes1[j])):
box = boxes1[j][i]
if boxes1[j][i][4] >= 0.5:
count += 1
crop_img = copy_img[int(box[1]):int(box[3]), int(box[0]):int(box[2])]
# cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 1)
# cv2.putText(img, lab, (box[0], box[1]), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255),
# lineType=cv2.LINE_AA)
img_list.append(crop_img)
box_list.append(box)
return box_list,img_list, img
def crop_and_save(self):
with tf.Session() as sess:
sess.run(self.model.pretrained())
# "D://pyworks//yolo//videoplayback.mp4"
cap = cv2.VideoCapture(self.url1)
while (cap.isOpened()):
ret, frame = cap.read()
img = cv2.resize(frame, (416, 416))
copy_img = img.copy()
imge = np.array(img).reshape(-1, 416, 416, 3)
start_time = time.time()
preds = sess.run(self.model.preds, {self.inputs: self.model.preprocess(imge)})
print("--- %s seconds ---" % (time.time() - start_time))
boxes = self.model.get_boxes(preds, imge.shape[1:3])
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.resizeWindow('image', 700, 700)
# print("--- %s seconds ---" % (time.time() - start_time))
boxes1 = np.array(boxes)
for j in self.list_of_classes:
count = 0
if str(j) in self.classes:
lab = self.classes[str(j)]
if len(boxes1) != 0:
for i in range(len(boxes1[j])):
box = boxes1[j][i]
if boxes1[j][i][4] >= 0.5:
count += 1
crop_img = copy_img[int(box[1]):int(box[3] ), int(box[0]):int(box[2])]
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 1)
cv2.putText(img, lab, (box[0], box[1]), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255),
lineType=cv2.LINE_AA)
cv2.imshow("cropped_image", crop_img)
cv2.waitKey(1)
dir = "temp/" + lab + "/"
createFolder(dir)
s1 = dir + '{}.jpg'.format(self.count)
self.count = self.count + 1
print("\n object_count :", self.count)
cv2.imwrite(s1, crop_img)
#print(lab, ": ", count)
cv2.imshow("image", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def main(args):
objectDetetcion = YoloObjectDetection()
objectDetetcion.url1 = args.video_dir
objectDetetcion.cap = cv2.VideoCapture(objectDetetcion.url1)
objectDetetcion.crop_and_save()
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--video_dir', type=str,
help='Path to the data directory containing aligned LFW face patches.')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
#objectDetetcion.init_tf_session()
# for i in range(1000):
# print("Press Enter ")
# imgs = objectDetetcion.get_cropped_image()
#
# for index, img in enumerate(imgs) :
# cv2.waitKey(0)
#
# if index == 3:
# break
#
# cv2.imshow(str(index), img)
#
# print(len(imgs))
|
11476300
|
from __future__ import print_function
import time
import numpy as np
from keras.callbacks import Callback
import keras.backend as K
import tensorflow as tf
# NOTE: So far we observed asynchronous feeding for StagingAreaCallback.
# There's a simpligied implementation StagingAreaCallbackFeedDict which uses
# feed_dict instead of intermediate tf.Variable, but it's still synchronous.
class StagingAreaCallback(Callback):
"""
It allows to prefetch input batches to GPU using TensorFlow StagingArea,
making a simple asynchronous pipeline.
The classic mechanism of copying input data to GPU in Keras with TensorFlow
is `feed_dict`: a numpy array is synchronously copied from Python to TF memory
and then using a host-to-device memcpy to GPU memory. The computation,
however has to wait, which is wasteful.
This class makes the HtoD memcpy asynchronous using a GPU-resident queue
of size two (implemented by StaginArea). The mechanism is as follows:
- at the beginning of an epoch one batch is `put()` into the queue
- during each training step another is is `put()` into the queue and in
parallel the batch already present at the GPU is `get()` from the queue
at provide as tesnor input to the Keras model (this runs within a single
`tf.Session.run()`)
The input numpy arrays (features and targets) are provided via this
callback and sliced into batches inside it. The last batch might be of
smaller size without any problem (the StagingArea supports variable-sized
batches and allows to enforce constant data sample shape). In the last
batch zero-length slice is still put into the queue to keep the get+put
operation uniform across all batches.
Since it's hard to modify Keras to add more data to `feed_dict`, the data
from numpy is fed into StagingArea in another `tf.Session.run()` before each
training step via an intermediate `tf.Variable` and `feed_dict`. It is still
synchronous. A better, though more complicated way would be to use TF queues
(depracated) or Dataset API.
In order to provide extra put() operation to `fetches`, we depend on a fork
of Keras (https://github.com/bzamecnik/keras/tree/tf-function-session-run-args).
A pull request to upstream will be made soon.
Example usage:
```
staging_area_callback = StagingAreaCallback(x_train, y_train, batch_size)
image = Input(tensor=staging_area_callback.input_tensor)
x = Dense(512, activation='relu')(image)
digit = Dense(num_classes, activation='softmax')(x)
model = Model(inputs=image, outputs=digit)
model.compile(optimizer='sgd', loss='categorical_crossentropy',
target_tensors=[staging_area_callback.target_tensor],
fetches=staging_area_callback.extra_ops)
model.fit(steps_per_epoch=steps_per_epoch, epochs=2,
callbacks=[staging_area_callback])
```
Full example: https://gist.github.com/bzamecnik/b520e2b1e199b193b715477929e39b22
"""
def __init__(self, x, y, batch_size, prefetch_count=1):
self.x = x
self.y = y
self.batch_size = batch_size
self.prefetch_count = prefetch_count
features_shape = (None,) + x.shape[1:]
labels_shape = (None,) + y.shape[1:]
with tf.device('/cpu:0'):
# for feeding inputs to the the StagingArea
# Let's try to decouple feeding data to StagingArea.put()
# from the training batch session.run()
# https://www.tensorflow.org/api_guides/python/reading_data#Preloaded_data
self.features_batch_next_value = tf.placeholder(dtype=x.dtype, shape=features_shape)
# - prevent the variable to be used as a model parameter: trainable=False, collections=[]
# - allow dynamic variable shape (for the last batch): validate_shape=False
features_batch_next = tf.Variable(self.features_batch_next_value, trainable=False, collections=[], validate_shape=False)
self.labels_batch_next_value = tf.placeholder(dtype=y.dtype, shape=labels_shape)
labels_batch_next = tf.Variable(self.labels_batch_next_value, trainable=False, collections=[], validate_shape=False)
self.assign_next_batch = tf.group(features_batch_next.initializer, labels_batch_next.initializer)
# will be used for prefetching to GPU
area = tf.contrib.staging.StagingArea(
dtypes=[x.dtype, y.dtype],
shapes=[features_shape, labels_shape])
self.area_put = area.put([features_batch_next.value(), labels_batch_next.value()])
area_get_features, area_get_labels = area.get()
self.area_size = area.size()
self.area_clear = area.clear()
self.input_tensor = area_get_features
self.target_tensor = area_get_labels
self.extra_ops = [self.area_put]
def set_params(self, params):
super().set_params(params)
self.steps_per_epoch = self.params['steps']
def _slice_batch(self, i):
start = i * self.batch_size
end = start + self.batch_size
return (self.x[start:end], self.y[start:end])
def _assign_batch(self, session, data):
x_batch, y_batch = data
session.run(self.assign_next_batch, feed_dict={
self.features_batch_next_value: x_batch,
self.labels_batch_next_value: y_batch})
def on_epoch_begin(self, epoch, logs=None):
sess = K.get_session()
for i in range(self.prefetch_count):
self._assign_batch(sess, self._slice_batch(i))
sess.run(self.area_put)
def on_batch_begin(self, batch, logs=None):
sess = K.get_session()
# Slice for `prefetch_count` last batches is empty.
# It serves as a dummy value which is put into StagingArea
# but never read.
data = self._slice_batch(batch + self.prefetch_count)
self._assign_batch(sess, data)
def on_epoch_end(self, epoch, logs=None):
sess = K.get_session()
sess.run(self.area_clear)
class StagingAreaCallbackFeedDict(Callback):
"""
It allows to prefetch input batches to GPU using TensorFlow StagingArea,
making a simple asynchronous pipeline.
The classic mechanism of copying input data to GPU in Keras with TensorFlow
is `feed_dict`: a numpy array is synchronously copied from Python to TF memory
and then using a host-to-device memcpy to GPU memory. The computation,
however has to wait, which is wasteful.
This class makes the HtoD memcpy asynchronous using a GPU-resident queue
of size two (implemented by StaginArea). The mechanism is as follows:
- at the beginning of an epoch one batch is `put()` into the queue
- during each training step another is is `put()` into the queue and in
parallel the batch already present at the GPU is `get()` from the queue
at provide as tesnor input to the Keras model (this runs within a single
`tf.Session.run()`)
The input numpy arrays (features and targets) are provided via this
callback and sliced into batches inside it. The last batch might be of
smaller size without any problem (the StagingArea supports variable-sized
batches and allows to enforce constant data sample shape). In the last
batch zero-length slice is still put into the queue to keep the get+put
operation uniform across all batches.
We feed input data to StagingArea via `feed_dict` as an additional input
besides Keras inputs. Note that the `feed_dict` dictionary is passed as a
reference and its values are updated inside the callback. It is still
synchronous. A better, though more complicated way would be to use TF queues
(depracated) or Dataset API.
It seems to help on GPUs with low host-device bandwidth, such as desktop
machines with many GPUs sharing a limited number of PCIe channels.
In order to provide extra put() operation to `fetches`, we depend on a fork
of Keras (https://github.com/bzamecnik/keras/tree/tf-function-session-run-args).
A pull request to upstream will be made soon.
Example usage:
```
staging_area_callback = StagingAreaCallback(x_train, y_train, batch_size)
image = Input(tensor=staging_area_callback.input_tensor)
x = Dense(512, activation='relu')(image)
digit = Dense(num_classes, activation='softmax')(x)
model = Model(inputs=image, outputs=digit)
model.compile(optimizer='sgd', loss='categorical_crossentropy',
target_tensors=[staging_area_callback.target_tensor],
feed_dict=staging_area_callback.feed_dict,
fetches=staging_area_callback.extra_ops)
model.fit(steps_per_epoch=steps_per_epoch, epochs=2,
callbacks=[staging_area_callback])
```
Full example: https://gist.github.com/bzamecnik/b520e2b1e199b193b715477929e39b22
"""
def __init__(self, x, y, batch_size, prefetch_count=1):
self.x = x
self.y = y
self.batch_size = batch_size
self.prefetch_count = prefetch_count
features_shape = (None,) + x.shape[1:]
labels_shape = (None,) + y.shape[1:]
# inputs for feeding inputs to the the StagingArea
self.features_batch_next = tf.placeholder(dtype=x.dtype, shape=features_shape)
self.labels_batch_next = tf.placeholder(dtype=y.dtype, shape=labels_shape)
# We'll assign self.features_batch_next, self.labels_batch_next before
# each StagingArea.put() - feed_dict is passed by reference and updated
# from outside.
self.feed_dict = {}
# will be used for prefetching to GPU
area = tf.contrib.staging.StagingArea(
dtypes=[x.dtype, y.dtype],
shapes=[features_shape, labels_shape])
self.area_put = area.put([self.features_batch_next, self.labels_batch_next])
area_get_features, area_get_labels = area.get()
self.area_size = area.size()
self.area_clear = area.clear()
self.input_tensor = area_get_features
self.target_tensor = area_get_labels
self.extra_ops = [self.area_put]
def set_params(self, params):
super().set_params(params)
self.steps_per_epoch = self.params['steps']
def _slice_batch(self, i):
start = i * self.batch_size
end = start + self.batch_size
return (self.x[start:end], self.y[start:end])
def _update_feed_dict(self, data):
x_batch, y_batch = data
self.feed_dict[self.features_batch_next] = x_batch
self.feed_dict[self.labels_batch_next] = y_batch
def on_epoch_begin(self, epoch, logs=None):
sess = K.get_session()
# initially fill the StagingArea
for i in range(self.prefetch_count):
self._update_feed_dict(self._slice_batch(i))
sess.run(feed_dict=self.feed_dict, fetches=[self.area_put])
def on_batch_begin(self, batch, logs=None):
sess = K.get_session()
# Slice for `prefetch_count` last batches is empty.
# It serves as a dummy value which is put into StagingArea
# but never read.
self._update_feed_dict(self._slice_batch(batch + self.prefetch_count))
def on_epoch_end(self, epoch, logs=None):
sess = K.get_session()
sess.run(self.area_clear)
class BatchTiming(Callback):
"""
It measure robust stats for timing of batches and epochs.
Useful for measuring the training process.
For each epoch it prints median batch time and total epoch time.
After training it prints overall median batch time and median epoch time.
Usage: model.fit(X_train, Y_train, callbacks=[BatchTiming()])
All times are in seconds.
More info: https://keras.io/callbacks/
"""
def on_train_begin(self, logs={}):
self.all_batch_times = []
self.all_epoch_times = []
def on_epoch_begin(self, epoch, logs={}):
self.epoch_batch_times = []
def on_batch_begin(self, batch, logs={}):
self.start_time = time.time()
def on_batch_end(self, batch, logs={}):
end_time = time.time()
elapsed_time = end_time - self.start_time
self.epoch_batch_times.append(elapsed_time)
self.all_batch_times.append(elapsed_time)
def on_epoch_end(self, epoch, logs={}):
epoch_time = np.sum(self.epoch_batch_times)
self.all_epoch_times.append(epoch_time)
median_batch_time = np.median(self.epoch_batch_times)
print('Epoch timing - batch (median): %0.5f, epoch: %0.5f (sec)' % \
(median_batch_time, epoch_time))
def on_train_end(self, logs={}):
median_batch_time = np.median(self.all_batch_times)
median_epoch_time = np.median(self.all_epoch_times)
print('Overall - batch (median): %0.5f, epoch (median): %0.5f (sec)' % \
(median_batch_time, median_epoch_time))
class SamplesPerSec(Callback):
def __init__(self, batch_size):
self.batch_size = batch_size
def on_train_begin(self, logs={}):
self.all_samples_per_sec = []
def on_batch_begin(self, batch, logs={}):
self.start_time = time.time()
# self.batch_size = logs['size']
def on_batch_end(self, batch, logs={}):
end_time = time.time()
elapsed_time = end_time - self.start_time
samples_per_sec = self.batch_size / elapsed_time
self.all_samples_per_sec.append(samples_per_sec)
def on_epoch_end(self, epoch, logs={}):
self.print_results()
def print_results(self):
print('Samples/sec: %0.2f' % np.median(self.all_samples_per_sec))
"""
Enables CUDA profiling (for usage in nvprof) just for a few batches.
The reasons are:
- profiling outputs are big (easily 100s MB - GBs) and repeating
- without a proper stop the outputs sometimes fail to save
Since initially the TensorFlow runtime may take time to optimize the graph we
skip a few epochs and then enable profiling for a few batches within the next
epoch.
It requires the `cudaprofile` package.
"""
class CudaProfile(Callback):
def __init__(self, warmup_epochs=0, batches_to_profile=None):
self.warmup_epochs = warmup_epochs
self.batches_to_profile = batches_to_profile
self.enabled = False
def set_params(self, params):
self.params = params
def on_epoch_begin(self, epoch, logs={}):
import cudaprofile
if epoch == self.warmup_epochs:
cudaprofile.start()
self.enabled = True
def on_batch_end(self, batch, logs={}):
import cudaprofile
if self.enabled and batch >= batches_to_profile:
cudaprofile.stop()
|
11476320
|
import os
from preprocessing.caselaw_stat_corpus import preprocess_label_file, count_words
from analysis.caselaw_compare_bm25_dpr import read_in_run_from_pickle, remove_query_from_ranked_list, evaluate_weight
from analysis.diff_bm25_dpr import first_diff_analysis, write_case_lines, get_diff_query_ids, compare_overlap_rel
def read_in_qrels():
# read in the label files
label_file = '/mnt/c/Users/salthamm/Documents/coding/ussc-caselaw-collection/airs2017-collection/qrel.txt'
qrels = preprocess_label_file(label_file)
qrels_updated = {}
for key, value in qrels.items():
qrels_updated.update({key: {}})
for val in value:
qrels_updated.get(str(key)).update({str(val): 1})
return qrels_updated
def write_diff_cases(query_diff_length_parm, query_diff_parm, output_dir, corpus_dir, query_file):
diff_query_ids = get_diff_query_ids(query_diff_length_parm)
query_dict = read_in_queries(query_file)
for id in diff_query_ids:
query_text = query_dict.get(id)
bm25_text = {}
dpr_text = {}
intersect_text = {}
if query_diff_parm.get(id).get('only_bm25'):
for id_bm25 in query_diff_parm.get(id).get('only_bm25'):
bm25_text.update({id_bm25: read_in_file(corpus_dir, id_bm25)})
if query_diff_parm.get(id).get('only_dpr'):
for id_dpr in query_diff_parm.get(id).get('only_dpr'):
dpr_text.update({id_dpr: read_in_file(corpus_dir, id_dpr)})
if query_diff_parm.get(id).get('intersect'):
for id_int in query_diff_parm.get(id).get('intersect'):
intersect_text.update({id_int: read_in_file(corpus_dir, id_int)})
os.makedirs(os.path.join(output_dir, 'query_{}'.format(id)))
with open(os.path.join(output_dir, 'query_{}'.format(id), 'query_{}.txt'.format(id)), 'w') as f:
write_case_lines(query_text, f)
for id_bm25, text in bm25_text.items():
with open(os.path.join(output_dir, 'query_{}'.format(id), 'bm25_{}.txt'.format(id_bm25)), 'w') as f:
write_case_lines(text, f)
for id_dpr, text in dpr_text.items():
with open(os.path.join(output_dir, 'query_{}'.format(id), 'dpr_{}.txt'.format(id_dpr)), 'w') as f:
write_case_lines(text, f)
for id_int, text in intersect_text.items():
with open(os.path.join(output_dir, 'query_{}'.format(id), 'int_{}.txt'.format(id_int)), 'w') as f:
write_case_lines(text, f)
def read_in_file(corpus_dir, id):
dict_paragraphs = {}
file = '{}.txt'.format(id)
with open(os.path.join(corpus_dir, file), 'r') as f:
lines = f.readlines()
lines = [line.strip() for line in lines if line.strip('\n') is not ' ' and line.strip() is not '']
paragraphs = {}
paragraph = ''
key = 'intro'
for line in lines:
if not line.split('.')[0].isdigit():
paragraph = paragraph + ' ' + line
else:
# if paragraph is multiple times in document (for example in different languages)
if key in paragraphs.keys():
para = paragraphs.get(key)
para.append(paragraph)
paragraphs.update({key: para})
else:
paragraphs.update({key: [paragraph]})
key = line.split('.')[0]
paragraph = line
if paragraphs:
paragraphs = only_string_in_dict(paragraphs)
dict_paragraphs.update({file.split('.')[0]: paragraphs})
# print('lengths for file {} done'.format(file))
else:
print('reading in of file {} doesnt work'.format(file))
return dict_paragraphs.get(id)
def read_in_queries(query_file):
dict_paragraphs = {}
with open(query_file, 'r') as f:
lines = f.read().splitlines()
for case in lines:
query_id = case.split('||')[0]
case = ' '.join(case.split('||')[1:])
splitted_sentences = case.split('. ')
paragraphs = {}
line = ''
i = 0
lengths_docs = []
for sentence in splitted_sentences:
line = line + ' ' + sentence
if len(line.split(' ')) > 200:
line_length = count_words(line)
lengths_docs.append(line_length)
paragraphs.update({str(i): line})
line = ''
i += 1
# if there is one file which does not exceed the length of 200
if not paragraphs:
line_length = count_words(line)
lengths_docs.append(line_length)
paragraphs.update({'0': line})
if paragraphs:
dict_paragraphs.update({query_id: paragraphs})
else:
print(query_id)
return dict_paragraphs
if __name__ == "__main__":
mode = ['train', 'separate_para', 'overlap_ranks', 'legal_task1']
# legalbert para
#dpr_file_parm = '/mnt/c/Users/salthamm/Documents/phd/data/caselaw/dpr/legalbert/eval/run_dpr_aggregate_legalbert_parm_overlap_ranks.pickle'
#dpr_file_doc = '/mnt/c/Users/salthamm/Documents/phd/data/caselaw/dpr/legalbert/eval/run_dpr_aggregate_legalbert_doc.pickle'
# legalbert doc
dpr_file_parm = '/mnt/c/Users/salthamm/Documents/phd/data/caselaw/dpr/legalbert_doc/eval/run_aggregated_train_vrrf.pickle'
dpr_file_doc = '/mnt/c/Users/salthamm/Documents/phd/data/caselaw/dpr/legalbert_doc/eval/run_dpr_aggregate_firstp.pickle'
bm25_file_parm = '/mnt/c/Users/salthamm/Documents/phd/data/caselaw/bm25/eval/run_bm25_aggregate2_parm_overlap_ranks.pickle'
bm25_file_doc = '/mnt/c/Users/salthamm/Documents/phd/data/caselaw/bm25/eval/run_bm25_aggregate2_doc_overlap_ranks.pickle'
output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/caselaw/bm25_dpr/legalbert_doc'
# read in files for parm and doc
dpr_dict_parm = read_in_run_from_pickle(dpr_file_parm)
dpr_dict_parm = remove_query_from_ranked_list(dpr_dict_parm)
dpr_dict_parm_new = {}
for key, value in dpr_dict_parm.items():
dpr_dict_parm_new.update({key.strip('id'):{}})
for key2, value2 in value.items():
dpr_dict_parm_new.get(key.strip('id')).update({key2.strip('id'): value2})
dpr_dict_parm = dpr_dict_parm_new
bm25_dict_parm = read_in_run_from_pickle(bm25_file_parm)
bm25_dict_parm = remove_query_from_ranked_list(bm25_dict_parm)
dpr_dict_doc = read_in_run_from_pickle(dpr_file_doc)
dpr_dict_doc = remove_query_from_ranked_list(dpr_dict_doc)
dpr_dict_parm_new = {}
for key, value in dpr_dict_doc.items():
dpr_dict_parm_new.update({key.strip('id'): {}})
for key2, value2 in value.items():
dpr_dict_parm_new.get(key.strip('id')).update({key2.strip('id'): value2})
dpr_dict_doc = dpr_dict_parm_new
bm25_dict_doc = read_in_run_from_pickle(bm25_file_doc)
bm25_dict_doc = remove_query_from_ranked_list(bm25_dict_doc)
# read in qrels
qrels = read_in_qrels()
qrels_len = []
for key, value in qrels.items():
qrels_len.append(len(value))
print('number of total relevant docs is {}'.format(sum(qrels_len)))
dpr_dict_parm_rel, bm25_dict_parm_rel, query_diff_parm, query_diff_length_parm = first_diff_analysis(dpr_dict_parm,
bm25_dict_parm,
qrels)
compare_overlap_rel(dpr_dict_parm, bm25_dict_parm, qrels)
dpr_dict_doc_rel, bm25_dict_doc_rel, query_diff_doc, query_diff_length_doc = first_diff_analysis(dpr_dict_doc,
bm25_dict_doc,
qrels)
compare_overlap_rel(dpr_dict_doc, bm25_dict_doc, qrels)
# compare doc and parm for bm25
bm25_dict_rel_doc, bm25_dict_rel_parm, query_diff_bm25, query_diff_length_bm25 = first_diff_analysis(bm25_dict_doc,
bm25_dict_parm,
qrels)
compare_overlap_rel(bm25_dict_rel_doc, bm25_dict_rel_parm, qrels)
# compare doc and parm for dpr
dpr_dict_rel_doc, dpr_dict_rel_parm, query_diff_dpr, query_diff_length_dpr = first_diff_analysis(dpr_dict_doc,
dpr_dict_parm,
qrels)
compare_overlap_rel(dpr_dict_rel_doc, dpr_dict_rel_parm, qrels)
|
11476347
|
import re
from collections import defaultdict
p5 = re.compile("(\S+)", re.MULTILINE|re.I)
def length_in_words(sub_element):
tmp2 = p5.findall(sub_element)
return len(tmp2)
f = open("756109104-10-K-19960321.txt")
#f = open("test.txt", 'r')
x = ""
for line in f:
x += line
p1 = re.compile("([^\"])(item\s+7[^0-9a-z\"]*management(?:[^0-9a-z]{0,3}s)?\s+discussions?\s+and\s+analysis\s+of\s+(?:financial\s+conditions?\s+|results\s+of\s+operations?)(?:\s+and\s+results\s+of\s+operations?|\s+and\s+financial\s+conditions?)?)", re.I|re.DOTALL|re.MULTILINE)
p2 = re.compile("([^\"])(item\s+7[^0-9a-z\"]*a[^0-9a-z\"]*(?:quantitative\s+and\s+(?:qualitative|qualification)\s+disclosures?\s+about\s+)?market\s+risk)", re.I|re.DOTALL|re.MULTILINE)
p3 = re.compile("([^\"])(item\s+8[^0-9a-z\"]*.{0,40}financial\s+statements[^\.])", re.I|re.DOTALL|re.MULTILINE)
x = p1.sub("\1#######ITEM7:\2#######", x)
x = p2.sub("\1#######ITEM7A:\2#######", x)
x = p3.sub("\1#######ITEM8:\2#######", x)
#x = "####### abc ####### abc #########"
x_list = x.split("#######") # <== we can not use regular exp!!
y_list = []
z_list = []
p4 = re.compile("^(ITEM(?:7|7A|8)):(.*)$", re.MULTILINE)
for i in range(len(x_list)):
tmp = p4.findall(x_list[i])
if(len(tmp) != 0):
z_list.append(tmp[0][1])
y_list.append(str(i) + ':' + tmp[0][0])
else:
z_list.append(x_list[i])
y_list.append(str(i) + ':' + str(length_in_words(x_list[i])))
#print(y_list)
y = ' '.join(y_list)
#print y
p6 = re.compile("((?:\d+:ITEM7 \d+:\d+ )+(?:\d+:ITEM7A \d+:\d+ )*)(?:\d+:ITEM8 \d+:\d+\s*)+")
M_list = p6.findall(y)
p7 = re.compile("\d+:")
p8 = re.compile("^\d+$")
best = 0
bestseq = ""
for i in range(len(M_list)):
m = M_list[i]
m = p7.sub("", m)
m_list = m.split(" ")
v = 0
for j in range(len(m_list)):
q = m_list[j]
t_list = p8.findall(q)
if (len(t_list) != 0):
v += int(q)
if (v > best):
best = v
bestseq = M_list[i]
# v, best, bestseq
# 1071 57633 3895:ITEM7 3896:57628 3897:ITEM7 3898:5
p9 = re.compile(":\S+")
p10 = re.compile("\s*$")
kept = defaultdict(lambda: 0)
if(bestseq != ""):
bestseq = p9.sub("", bestseq)
mm_list = bestseq.split(" ")
print mm_list
for i in range(len(mm_list)):
mm = mm_list[i]
if mm == '':
continue
z_list[int(mm)] = p10.sub("\n", z_list[int(mm)])
print z_list[int(mm)]
kept[int(mm)] = 1
else:
print "no match"
p11 = re.compile("\b\d+:", re.MULTILINE)
y = p11.sub("", y )
#print y
yy_list = y.split(" ")
print(yy_list)
for i in range(len(yy_list)):
if kept[i] != 0:
print "*"
print yy_list[i]+" "
print "\n"
#print(m_list)
#print(x)
#print y
#item_list = p.findall("hello")
#for item in item_list[:]:
# print(item)
|
11476379
|
from typing import List
class Solution:
def numIdenticalPairs(self, nums: List[int]) -> int:
return sum(a == b for i, a in enumerate(nums) for b in nums[i + 1 :])
|
11476407
|
import sqlite3
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import numpy as np
import pandas as pd
from dash.dependencies import Input, Output
from components import card, tweet
from utils import human_format, get_color_from_score
from pathlib import Path
import logging
UPDATE_INTERVAL = 30
ROOT_DIR = Path(__file__).resolve().parents[1]
DATA_DIR = ROOT_DIR / "data"
DATABASE_PATH = DATA_DIR / "tweets.db"
TARGETS_DF = pd.read_csv(DATA_DIR / "accounts.csv")
LOGS_PATH = Path(__file__).parent / "logs" / "dash_app.log"
external_stylesheets = [
dbc.themes.BOOTSTRAP,
"assets/style.css",
{
"href": "https://fonts.googleapis.com/css2?"
"family=Lato:wght@400;500;600;700;800&display=swap",
"rel": "stylesheet",
},
]
app = dash.Dash(
__name__,
external_stylesheets=external_stylesheets,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}],
)
server = app.server
app.title = "Tweets Scorer: Analyze sentiment of tweets in real-time"
app.layout = html.Div(
[
dbc.Jumbotron(
[
dbc.Container(
[
html.H1("Tweets Scorer", className="title-jumbotron"),
html.P(
"Analyze the sentiment of tweets in real-time",
className="text-jumbotron",
),
]
),
],
className="jumbotron-fluid",
),
dbc.Container(
[
dbc.Card(
html.Div(
[
html.Div(
[
html.P(
"0",
id="total-responses",
className="total-responses",
),
html.P("INTERACTIONS", className="total-subtitle",),
],
className="order-0 text-center",
),
html.Div(
[
html.P(
"0%",
id="total-approval",
className="total-score",
),
html.P("AVG. APPROVAL", className="total-subtitle",),
],
className="order-1 text-center",
),
html.Div(
[
dcc.Checklist(
options=[
{"label": " Exclude Retweets", "value": 1},
],
value=[],
id="exclude-rt-checkbox",
className="exclude-rt-checkbox text-center",
),
dcc.Dropdown(
id="total-dropdown",
options=[
{"label": "Last 15 minutes", "value": 15},
{"label": "Last hour", "value": 60},
{"label": "Last 24 hours", "value": 1440},
],
value=15,
clearable=False,
searchable=False,
),
],
className="d-flex flex-column justify-content-between "
"total-dropdowns order-2 my-md-auto",
),
],
className="d-flex flex-column flex-sm-row align-items-center "
"justify-content-around flex-wrap",
),
className="top-card mx-auto",
),
html.Div(
[
html.Div(
[
html.P(
"Summary",
className="title-summary text-center text-sm-left mt-1",
),
html.Div(
id="summary-cards",
className="d-flex flex-wrap flex-column flex-sm-row "
"justify-content-sm-center justify-content-md-end",
),
]
),
html.Div(
[
html.P(
"Latest Interactions",
className="title-summary text-center text-sm-left mt-4 mt-sm-1",
),
html.Div(id="summary-tweets"),
]
),
],
id="summary-container",
className="""d-flex flex-column align-items-center align-items-md-start
flex-md-row justify-content-sm-around mt-4""",
),
dcc.Interval(
id="overview-interval",
interval=UPDATE_INTERVAL * 1000, # in milliseconds
n_intervals=0,
),
],
id="cards-container",
),
]
)
@app.callback(
Output("summary-tweets", "children"),
[
Input("overview-interval", "n_intervals"),
Input("total-dropdown", "value"),
Input("exclude-rt-checkbox", "value"),
],
)
def update_tweets(n, time_range, exclude_rt):
conn = sqlite3.connect(DATABASE_PATH)
time_range = 15 if time_range not in (15, 60, 1440) else time_range
filter_rt = True if exclude_rt == [1] else False
query = f"""
select
target as target,
tweet_timestamp as tweet_timestamp,
full_text as full_text,
sentiment as score
from tweets
where
datetime(tweet_timestamp) >= datetime('now', '-{time_range} minutes')
{"and IS_RT = 0" if filter_rt else ""}
order by datetime(tweet_timestamp) desc
limit 5;
"""
df = pd.read_sql_query(query, conn)
df["tweet_timestamp"] = pd.to_datetime(df.tweet_timestamp.values)
tweets = []
for _, row in df.iterrows():
time = row["tweet_timestamp"].strftime("%T - %b %-d, %Y")
img = TARGETS_DF.loc[TARGETS_DF.id == row["target"]]["image"].item()
if row["score"] < 0.5:
color = "hsl(360, 67%, 44%)"
sentiment = "NEGATIVE"
else:
color = "hsl(184, 77%, 34%)"
sentiment = "POSITIVE"
tweets.append(tweet(time, img, row["full_text"], sentiment, color))
return html.Div(tweets)
@app.callback(
[
Output("summary-cards", "children"),
Output("total-responses", "children"),
Output("total-approval", "children"),
Output("total-approval", "style"),
],
[
Input("overview-interval", "n_intervals"),
Input("total-dropdown", "value"),
Input("exclude-rt-checkbox", "value"),
],
)
def update_cards(n, time_range, exclude_rt):
conn = sqlite3.connect(DATABASE_PATH)
time_range = 15 if time_range not in (15, 60, 1440) else time_range
filter_rt = True if exclude_rt == [1] else False
query = f"""
select
target as target,
count(*) as responses,
avg(case when sentiment < 0.5 then 0 else 1 end) * 100 as sentiment
from tweets
where
datetime(tweet_timestamp) >= datetime('now', '-{time_range} minutes')
{"and IS_RT = 0" if filter_rt else ""}
group by target;
"""
df = pd.read_sql_query(query, conn)
cards = []
for target in TARGETS_DF.itertuples():
try:
responses = df.loc[df.target == target.id, "responses"].item()
sentiment_score = df.loc[df.target == target.id, "sentiment"].item()
cards.append(card(target, responses, sentiment_score))
except Exception as e:
logging.debug(e)
pass
total_responses_num = df.responses.sum()
total_responses = human_format(total_responses_num)
total_approval_num = 0
try:
total_approval_num = np.nanmean(df.sentiment)
except Exception as e:
logging.debug(e)
pass
total_approval = f"{total_approval_num:.0f}%"
approval_style = {"color": get_color_from_score(total_approval_num)}
return cards, total_responses, total_approval, approval_style
if __name__ == "__main__":
logging.basicConfig(filename=LOGS_PATH, filemode="w", level=logging.DEBUG)
app.run_server(host="0.0.0.0", debug=True, port=8050)
|
11476411
|
import pytest
import numpy as np
from . import full_path, remove
@pytest.fixture
def dummy_metadata():
import zarr
from ..metadata import MetaData
fn = full_path("dummy_metadata.zarr")
remove(fn)
g = zarr.open(fn)
data = np.array([1, 1, 1, 1, 0, 0, 1, 1, 1]).astype(bool)
g.create_dataset(
"I", data=data, chunks=(100000,), shape=len(data), dtype=data.dtype
)
yield MetaData(g)
remove(fn)
def test_metadata_attrs(dummy_metadata):
assert dummy_metadata.N == 9
assert np.all(dummy_metadata.index == np.array(range(9)))
def test_metadata_fetch(dummy_metadata):
assert len(dummy_metadata.fetch("I")) == 7
assert len(dummy_metadata.fetch_all("I")) == 9
def test_metadata_verify_bool(dummy_metadata):
assert dummy_metadata._verify_bool("I") is True
def test_metadata_active_index(dummy_metadata):
a = np.array([0, 1, 2, 3, 6, 7, 8])
assert np.all(dummy_metadata.active_index(key="I") == a)
|
11476418
|
from __future__ import print_function
import numpy as np
import tensorflow as tf
in_channels = 3 # 3 for RGB, 32, 64, 128, ...
out_channels = 6 # 128, 256, ...
input = np.ones((5,5,in_channels)) # input is 3d, in_channels = 32
# filter must have 3d-shpae x number of filters = 4D
weight_4d = np.ones((3,3,in_channels, out_channels))
strides_2d = [1, 1, 1, 1]
in_3d = tf.constant(input, dtype=tf.float32)
filter_4d = tf.constant(weight_4d, dtype=tf.float32)
in_width = int(in_3d.shape[0])
in_height = int(in_3d.shape[1])
filter_width = int(filter_4d.shape[0])
filter_height = int(filter_4d.shape[1])
input_3d = tf.reshape(in_3d, [1, in_height, in_width, in_channels])
kernel_4d = tf.reshape(filter_4d, [filter_height, filter_width, in_channels, out_channels])
#output stacked shape is 3D = 2D x N matrix
output_3d = tf.nn.conv2d(input_3d, kernel_4d, strides=strides_2d, padding='SAME')
with tf.Session() as sess:
print(sess.run(output_3d))
|
11476430
|
from maestro_agent.app_state import ApplicationState
from maestro_agent.services.maestro_api.run import RunApi
from maestro_agent.logging import Logger
from maestro_agent.services.agent.hooks import AgentHooks
from maestro_agent.services.running_test import (
RunningTestThreadsManager,
prepare_for_running,
)
class EventHandlerBase:
event = None
agent = None
agent_hooks = None
def __init__(self, event, agent):
agent_hooks = AgentHooks(event.run_id, agent.id)
self.event = event
self.agent = agent
self.agent_hooks = agent_hooks
def process(self):
try:
self.run = RunApi.get(self.event.run_id)
self.event_type_process()
except Exception as e:
# Log errors with specific event type based logic
self.agent_hooks.error(str(e))
raise e
def event_type_process(self):
raise NotImplementedError(
"`event_type_process` method should be called from base class"
)
class StartRunEventHandler(EventHandlerBase):
def event_type_process(self):
self.agent_hooks.preparation_started()
Logger.info("Preparing prerequisites to start a test")
prepare_for_running(self.run)
Logger.info("Starting a test")
running_test_threads = RunningTestThreadsManager.instance()
running_test_threads.start_test(run=self.run, agent=self.agent)
self.agent_hooks.running()
Logger.info("Test is running")
class StopRunEventHandler(EventHandlerBase):
def event_type_process(self):
Logger.info(f"Stop test execution. run_id={self.run.id}")
running_test_threads = RunningTestThreadsManager.instance()
running_test_threads.stop_test()
Logger.info("Test stopped")
ApplicationState.available()
|
11476436
|
import pytest
from qtpy import PYQT5, PYSIDE2
@pytest.mark.skipif(not (PYQT5 or PYSIDE2), reason="Only available in Qt5 bindings")
def test_qtwebchannel():
"""Test the qtpy.QtWebChannel namespace"""
from qtpy import QtWebChannel
assert QtWebChannel.QWebChannel is not None
assert QtWebChannel.QWebChannelAbstractTransport is not None
|
11476443
|
import discograph
from abjad.tools import stringtools
class Test(discograph.DiscographTestCase):
def test_01(self):
entity = discograph.PostgresEntity.get(entity_type=1, entity_id=32550)
roles = ['Alias', 'Member Of']
relations = entity.structural_roles_to_relations(roles)
relations = [v for k, v in sorted(relations.items())]
actual = '\n'.join(repr(_) for _ in relations)
expected = stringtools.normalize('''
PostgresRelation(
entity_one_id=100600,
entity_one_type=1,
entity_two_id=32550,
entity_two_type=1,
release_id=-1,
role='Member Of',
year=-1
)
PostgresRelation(
entity_one_id=113965,
entity_one_type=1,
entity_two_id=32550,
entity_two_type=1,
release_id=-1,
role='Member Of',
year=-1
)
PostgresRelation(
entity_one_id=152882,
entity_one_type=1,
entity_two_id=32550,
entity_two_type=1,
release_id=-1,
role='Member Of',
year=-1
)
PostgresRelation(
entity_one_id=23446,
entity_one_type=1,
entity_two_id=32550,
entity_two_type=1,
release_id=-1,
role='Member Of',
year=-1
)
PostgresRelation(
entity_one_id=241356,
entity_one_type=1,
entity_two_id=32550,
entity_two_type=1,
release_id=-1,
role='Member Of',
year=-1
)
PostgresRelation(
entity_one_id=32550,
entity_one_type=1,
entity_two_id=2561672,
entity_two_type=1,
release_id=-1,
role='Alias',
year=-1
)
PostgresRelation(
entity_one_id=354129,
entity_one_type=1,
entity_two_id=32550,
entity_two_type=1,
release_id=-1,
role='Member Of',
year=-1
)
PostgresRelation(
entity_one_id=37806,
entity_one_type=1,
entity_two_id=32550,
entity_two_type=1,
release_id=-1,
role='Member Of',
year=-1
)
PostgresRelation(
entity_one_id=409502,
entity_one_type=1,
entity_two_id=32550,
entity_two_type=1,
release_id=-1,
role='Member Of',
year=-1
)
PostgresRelation(
entity_one_id=453969,
entity_one_type=1,
entity_two_id=32550,
entity_two_type=1,
release_id=-1,
role='Member Of',
year=-1
)
PostgresRelation(
entity_one_id=53261,
entity_one_type=1,
entity_two_id=32550,
entity_two_type=1,
release_id=-1,
role='Member Of',
year=-1
)
PostgresRelation(
entity_one_id=55449,
entity_one_type=1,
entity_two_id=32550,
entity_two_type=1,
release_id=-1,
role='Member Of',
year=-1
)
''')
assert actual == expected
|
11476458
|
import os
import itertools
import numpy as np
import tensorflow as tf
from utils import *
from collections import Counter
from nltk.tokenize import TreebankWordTokenizer
EOS_TOKEN = "_eos_"
class TextReader(object):
def __init__(self, data_path):
train_path = os.path.join(data_path, "train.txt")
valid_path = os.path.join(data_path, "valid.txt")
test_path = os.path.join(data_path, "test.txt")
vocab_path = os.path.join(data_path, "vocab.pkl")
if os.path.exists(vocab_path):
self._load(vocab_path, train_path, valid_path, test_path)
else:
self._build_vocab(train_path, vocab_path)
self.train_data = self._file_to_data(train_path)
self.valid_data = self._file_to_data(valid_path)
self.test_data = self._file_to_data(test_path)
self.idx2word = {v:k for k, v in self.vocab.items()}
self.vocab_size = len(self.vocab)
def _read_text(self, file_path):
with open(file_path) as f:
return f.read().replace("\n", " %s " % EOS_TOKEN)
def _build_vocab(self, file_path, vocab_path):
counter = Counter(self._read_text(file_path).split())
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
self.vocab = dict(zip(words, range(len(words))))
save_pkl(vocab_path, self.vocab)
def _file_to_data(self, file_path):
texts = self._read_text(file_path).split(EOS_TOKEN)
data = []
for text in texts:
data.append(np.array(map(self.vocab.get, text.split())))
save_npy(file_path + ".npy", data)
return data
def _load(self, vocab_path, train_path, valid_path, test_path):
self.vocab = load_pkl(vocab_path)
self.train_data = load_npy(train_path + ".npy")
self.valid_data = load_npy(valid_path + ".npy")
self.test_data = load_npy(test_path + ".npy")
def get_data_from_type(self, data_type):
if data_type == "train":
raw_data = self.train_data
elif data_type == "valid":
raw_data = self.valid_data
elif data_type == "test":
raw_data = self.test_data
else:
raise Exception(" [!] Unkown data type %s: %s" % data_type)
return raw_data
def onehot(self, data, min_length=None):
if min_length == None:
min_length = self.vocab_size
return np.bincount(data, minlength=min_length)
def iterator(self, data_type="train"):
raw_data = self.get_data_from_type(data_type)
return itertools.cycle(([self.onehot(data), data] for data in raw_data if data != []))
def get(self, text=["medical"]):
if type(text) == str:
text = text.lower()
text = TreebankWordTokenizer().tokenize(text)
try:
data = np.array(map(self.vocab.get, text))
return self.onehot(data), data
except:
unknowns = []
for word in text:
if self.vocab.get(word) == None:
unknowns.append(word)
raise Exception(" [!] unknown words: %s" % ",".join(unknowns))
def random(self, data_type="train"):
raw_data = self.get_data_from_type(data_type)
idx = np.random.randint(len(raw_data))
data = raw_data[idx]
return self.onehot(data), data
|
11476477
|
c = 0
j = 0
i = 0
while(j < 10):
i = 0
while (i < 1000000):
b = 1
c = c + b
i = i + 1
j = j + 1
print(c)
|
11476490
|
import collections
import logging
import math
import sys
import copy
import torch
import torch.distributed as dist
import functools
def flatten_tensors(tensors):
"""
Reference: https://github.com/facebookresearch/stochastic_gradient_push
Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Arguments:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A 1D buffer containing input tensors.
"""
if len(tensors) == 1:
return tensors[0].view(-1).clone()
flat = torch.cat([t.view(-1) for t in tensors], dim=0)
return flat
def unflatten_tensors(flat, tensors):
"""
Reference: https://github.com/facebookresearch/stochastic_gradient_push
View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by flatten_dense_tensors.
Arguments:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return tuple(outputs)
def communicate(tensors, communication_op):
"""
Reference: https://github.com/facebookresearch/stochastic_gradient_push
Communicate a list of tensors.
Arguments:
tensors (Iterable[Tensor]): list of tensors.
communication_op: a method or partial object which takes a tensor as
input and communicates it. It can be a partial object around
something like torch.distributed.all_reduce.
"""
flat_tensor = flatten_tensors(tensors)
communication_op(tensor=flat_tensor)
for f, t in zip(unflatten_tensors(flat_tensor, tensors), tensors):
t.set_(f)
|
11476508
|
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
import os
import secrets
import encrypted_secrets.conf as secrets_conf
from encrypted_secrets.util import write_secrets
DEFAULT_YAML_PATH = f'{secrets_conf.SECRETS_ROOT}/secrets.yml.enc'
DEFAULT_ENV_PATH = f'{secrets_conf.SECRETS_ROOT}/secrets.env.enc'
class Command(BaseCommand):
help = 'Initialize django-encrypted-secrets install by generating a master key file.'
def add_arguments(self, parser):
parser.add_argument('--mode', type=str, help='Maintain secrets in YAML or env-file format. Options are "env" or "yaml" (default is yaml).')
def handle(self, *args, **options):
self.mode = options.get('mode', 'yaml')
self.key = secrets.token_urlsafe(256)
path = f'{settings.BASE_DIR}/master.key'
file = open(path, 'w')
file.write(self.key)
file.close()
if self.mode == 'env':
encrypted_secrets_path = DEFAULT_ENV_PATH
else:
encrypted_secrets_path = DEFAULT_YAML_PATH
encrypted_file_exists = os.path.isfile(encrypted_secrets_path)
if not encrypted_file_exists:
self.write_default_encrypted_secrets_file(encrypted_secrets_path)
def new_yaml_file_template(self):
message = "# Write the credentials that you want to encrypt in YAML format below.\n" \
"# for example:\n" \
"#\n" \
"# aws:\n" \
"# access_key_id: 123\n" \
"# secret_access_key: 345"
return message
def new_env_file_template(self):
message = "# Write the credentials that you want to encrypt in key=value format below.\n" \
"# for example:\n" \
"#\n" \
"KEY_1=\"value 1\"\n" \
"KEY_2=123"
return message
def write_default_encrypted_secrets_file(self, encrypted_secrets_path):
if self.mode == 'env':
write_secrets(self.new_env_file_template(), self.key, encrypted_secrets_path)
else:
write_secrets(self.new_yaml_file_template(), self.key, encrypted_secrets_path)
|
11476521
|
from .convolution import CFConv
from .dense import Dense
from .embedding import Embedding
from .pooling import PoolSegments
from .rbf import RBFExpansion
from .module import Module
from .distances import EuclideanDistances
|
11476541
|
import os
# Directories where data and output will be saved.
SETTINGS_DIR = os.path.abspath(os.path.dirname(__file__))
DATA_DIR = os.path.expandvars('$HOME/stormtracks_data/data')
OUTPUT_DIR = os.path.expandvars('$HOME/stormtracks_data/output')
SECOND_OUTPUT_DIR = os.path.expandvars('$HOME/stormtracks_data/output')
LOGGING_DIR = 'logs'
FIGURE_OUTPUT_DIR = os.path.expandvars('$HOME/stormtracks_data/figures')
# 20th C Reanalysis project version.
C20_VERSION = 'v2'
# Lat/Lon range.
MIN_LON = 260
MAX_LON = 340
MIN_LAT = 0
MAX_LAT = 60
RESULTS = 'prod_release_1'
CONSOLE_LOG_LEVEL = 'info'
FILE_LOG_LEVEL = 'debug'
CHUNK_SIZE = 1024*1000
MINIMUM_DOWNLOAD_RATE_1 = 300000 # B/s - 0.3 MB/s.
MINIMUM_DOWNLOAD_RATE_2 = 1000000 # B/s - 1 MB/s.
|
11476623
|
from xml.dom import minidom
import numpy as np
from pathlib import Path
import sys
import os
sys.path.append('../')
from utils import plot_stroke
# for flask app
def path_string_to_stroke(path, str_len, down_sample=False):
path_data = path.split(" ")[:-1]
print(len(path_data))
stroke = np.zeros((len(path_data), 3))
i = 0
while i < len(path_data):
command = path_data[i][0]
coord = path_data[i][1:].split(',')
if command == 'M':
stroke[i,0] = 1.0
elif command == 'L':
stroke[i,0] = 0.0
stroke[i,1] = float(coord[0])
stroke[i,2] = -float(coord[1])
i += 1
stroke[0,0] = 0.0
stroke[-1, 0] = 1.
print("initial shape of data: ", stroke.shape)
cuts = np.where(stroke[:, 0] == 1.)[0]
print("EOS index:",cuts)
k = 1
ratio = len(stroke) // str_len
print("LPC: ", ratio)
if ratio > 30 or down_sample:
k = 2
print("downsampling by 2")
start = 0
down_sample_data = []
for eos in cuts:
down_sample_data.append(stroke[start:eos:k])
down_sample_data.append(stroke[eos])
start = eos + 1
down_sample_stroke = np.vstack(down_sample_data)
# convert absolute coordinates into offset
down_sample_stroke[1:,1:] = down_sample_stroke[1:,1:] - down_sample_stroke[:-1,1:]
print("After downsampling shape of data: ",len(down_sample_stroke))
return down_sample_stroke
def svg_xml_parser(svg_path="./mobile/writing_8.svg"):
"""
Extract path data from svg xml file
return:
path_data: list of points in path
"""
doc = minidom.parse(svg_path)
path_strings = [path.getAttribute('d') for path
in doc.getElementsByTagName('path')]
doc.unlink()
path = path_strings[0]
# print(len(path))
path_data = path.split(" ")[:-1]
print(len(path_data))
return path_data
def path_to_stroke(path_data, k=1, save_path="./mobile/"):
"""
Convert svg path data into stroke data with offset coordinates
args:
path_data: list of svg path points
k: downsample factor, default 1 means no downsampling
save_path: directory path to save stroke.npy file
"""
save_path = Path(save_path)
stroke = np.zeros((len(path_data), 3))
i = 0
while i < len(path_data):
command = path_data[i][0]
coord = path_data[i][1:].split(',')
if command == 'M':
stroke[i,0] = 1.0
elif command == 'L':
stroke[i,0] = 0.0
stroke[i,1] = float(coord[0])
stroke[i,2] = -float(coord[1])
i += 1
stroke[0,0] = 0.0
stroke[-1, 0] = 1.
print("initial shape of data: ", stroke.shape)
cuts = np.where(stroke[:, 0] == 1.)[0]
print("EOS index:",cuts)
start = 0
down_sample_data = []
for eos in cuts:
down_sample_data.append(stroke[start:eos:k])
down_sample_data.append(stroke[eos])
start = eos + 1
down_sample_stroke = np.vstack(down_sample_data)
# convert absolute coordinates into offset
down_sample_stroke[1:,1:] = down_sample_stroke[1:,1:] - down_sample_stroke[:-1,1:]
print("final shape of data: ", down_sample_stroke.shape)
plot_stroke(down_sample_stroke, "img.png")
np.save(save_path, down_sample_stroke, allow_pickle=True)
if __name__ == '__main__':
path_data = svg_xml_parser(svg_path="./static/mobile/writing_10.svg")
path_to_stroke(path_data, k=1, save_path="./static/mobile/style_10.npy")
with open('./static/mobile/inpText_10.txt') as file:
texts = file.read().splitlines()
real_text = texts[0]
print(len(list(real_text)))
|
11476628
|
import sys
import cv2
import numpy as np
def get_all_contours(img):
ref_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(ref_gray, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, 1, 2)
return contours
if __name__=='__main__':
#img = cv2.imread('../images/input_nike_logo_shapes.png')
img = cv2.imread(sys.argv[1])
input_contours = get_all_contours(img)
factor = 0.00001
while True:
output_img = np.zeros(img.shape, np.uint8) + 255
for contour in input_contours:
epsilon = factor * cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, epsilon, True)
cv2.drawContours(output_img, [approx], -1, (0,0,0), 3)
cv2.imshow('Output', output_img)
c = cv2.waitKey()
if c == 27:
break
factor *= 0.75
|
11476629
|
load("@bazel_skylib//lib:paths.bzl", "paths")
BUILD_PRELUDE = """
package(default_visibility = ["//visibility:public"])
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
"""
BUILD_TAR_TEMPLATE = """
pkg_tar(
name = "{}",
deps = [":{}"],
)
"""
def _archive_url(folder, version, archive):
return paths.join("https://dl.k8s.io", folder, version, archive)
def _fetch_kube_release(ctx):
build_file_contents = BUILD_PRELUDE
for archive in ctx.attr.archives:
ctx.download(
url = _archive_url(ctx.attr.folder, ctx.attr.version, archive),
output = archive,
sha256 = ctx.attr.archives[archive],
)
build_file_contents += BUILD_TAR_TEMPLATE.format(
paths.basename(archive).split(".")[0],
archive,
)
ctx.file("BUILD", content = build_file_contents)
fetch_kube_release = repository_rule(
implementation = _fetch_kube_release,
attrs = {
"folder": attr.string(default = "release"),
"version": attr.string(mandatory = True),
"archives": attr.string_dict(mandatory = True),
},
)
|
11476639
|
from django.shortcuts import render
# Create your views here.
from .serializers import UserRegSerializer, SmsSerializer, UserDetailSerializer, LogSerializer, EmailSerializer, \
UserUpdateSerializer
from .models import UserProfile, VerifyCode, UserLoginLog
from rest_framework import mixins, generics, permissions
from rest_framework import viewsets
from rest_framework.authentication import BaseAuthentication # 基础验证。必须重写其中的方法
from rest_framework.permissions import IsAuthenticated, IsAdminUser # 直接调用
from .permissions import UserPermission
from .filters import UserFilter
from rest_framework import filters
from rest_framework.pagination import PageNumberPagination
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.authentication import SessionAuthentication
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.generics import ListAPIView, RetrieveAPIView
# from .serializers import
from random import choice
from utils.SMS import SendSMS
from rest_framework import status
from rest_framework.response import Response
import platform
import user_agents
class MyAuth(BaseAuthentication):
'''
自定义认证
'''
def authenticate(self, request):
pass
def authenticate_header(self, request):
pass
class UserProfilePagination(PageNumberPagination):
page_size = 10
page_size_query_param = 'page_size'
page_query_param = 'page'
max_page_size = 50
from django.contrib.auth import get_user_model
User = get_user_model() # 获取setting.py中AUTH_USER_MODEL指定的User model
class UserViewset(mixins.UpdateModelMixin, mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.ListModelMixin,
viewsets.GenericViewSet):
'''
User
增加:
删除:
修改:
查询:
'''
# queryset = UserProfile.objects.all()
# serializer_class = UserRegSerializer
pagination_class = UserProfilePagination # fix warining #20
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
# permission_classes = (UserPermission,)
# lookup_field = 'id' #自定义设置搜索哪个字段、在get_queryset之后过滤
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filter_class = UserFilter # 自定义
ordering_fields = ('id',)
search_fields = ('=username', '=id') # 搜索指定字段,支持多种搜索模式,默认模糊搜索
def get_queryset(self):
'''
list: 只能显示当前用户信息
# 如果有了这个那上面那句查询就不需要
# 在这可以获取url后面的过滤然后进行一些操作
# return UserProfile.objects.filter(id__gt=0)
:return:
'''
return UserProfile.objects.filter(username=self.request.user)
def get_permissions(self):
if self.action == 'create':
return []
elif self.action == 'update':
return [permissions.IsAuthenticated()]
return [permissions.IsAuthenticated()]
def get_serializer_class(self):
if self.action == 'update': # 可能重置密码
return UserUpdateSerializer
elif self.action == 'retrieve':
return UserDetailSerializer
elif self.action == 'create':
return UserRegSerializer
return UserDetailSerializer
def get_object(self):
'''
Retrieve和Delete 会调用
所有查询或者删除都是只返回当前用户,也就是基于当前用户。
:return:
'''
return self.request.user
class SmsCodeViewset(mixins.CreateModelMixin, viewsets.GenericViewSet):
"""
发送短信验证码
"""
serializer_class = SmsSerializer
def generate_code(self):
"""
生成5位数字的验证码字符串
"""
seeds = "1234567890"
random_str = []
for i in range(6):
random_str.append(choice(seeds))
return "".join(random_str)
from celery_tasks.SendCode import tasks as SendCode
def create(self, request, *args, **kwargs):
'''
对CreateModelMixin 中的create方法进行重写,发送验证码并保存到数据库中
:param request:
:param args:
:param kwargs:
:return:
'''
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
mobile = serializer.validated_data["mobile"]
code = self.generate_code()
data = [code, '5'] # 验证码在五分钟后失效
SendCode.SendSMS.delay(to=mobile, data=data, tempId=1)
code_record = VerifyCode(code=code, mobile=mobile, type='mobile')
code_record.save()
return Response({
"mobile": mobile
}, status=status.HTTP_201_CREATED)
from utils.Email import SendMail
from celery_tasks.SendCode import tasks as SendCode
class EmailCodeViewSet(mixins.CreateModelMixin, viewsets.GenericViewSet):
serializer_class = EmailSerializer
def generate_code(self):
"""
生成5位数字的验证码字符串
"""
seeds = "1234567890"
random_str = []
for i in range(6):
random_str.append(choice(seeds))
return "".join(random_str)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
mobile = serializer.validated_data['mobile']
list = []
list.append(mobile)
code = self.generate_code()
SendCode.SendMail.delay(code, list)
code_record = VerifyCode(code=code, mobile=mobile, type='email')
code_record.save()
return Response({
"mobile": mobile
}, status=status.HTTP_201_CREATED)
# class UserLogViewSet(mixins.CreateModelMixin, mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):
class UserLogViewSet(viewsets.ModelViewSet):
'''
增加:开放api
删除:不开放api
修改:不开放api
查询:只能查询当前用户
'''
queryset = UserLoginLog.objects.all()
permission_classes = (IsAuthenticated,)
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
serializer_class = LogSerializer
def get_queryset(self):
return UserLoginLog.objects.filter(user=self.request.user)
# 获取OS
def get_os(self, request):
return platform.platform(request)
# 获取IP地址
def get_ip(self, request):
if 'HTTP_X_FORWARDED_FOR' in request.META:
ip = request.META['HTTP_X_FORWARDED_FOR']
else:
ip = request.META['REMOTE_ADDR']
return ip
# 获取user-agent
def get_ua(self, request):
ua_string = request.META.get('HTTP_USER_AGENT', '')
if len(ua_string) > 200:
ua_string = ua_string[:200]
# 解析为user_agent
user_agent = user_agents.parse(ua_string)
# 判断浏览器
bw = user_agent.browser.family
# 判断操作系统
s = user_agent.os.family
# 输出
return bw
def perform_create(self, serializer):
UserLogin = UserLoginLog()
UserLogin.user = self.request.user
UserLogin.user_login_os = self.get_os(request=self.request)
UserLogin.user_login_ip = self.get_ip(request=self.request)
UserLogin.user_login_agent = self.get_ua(request=self.request)
UserLogin.save()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.