id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11462402
|
import locale
import logging
import os
import re
import subprocess
from io import open
import click
import sqlparse
from .favoritequeries import favoritequeries
from .main import NO_QUERY, PARSED_QUERY, special_command
from .utils import handle_cd_command
TIMING_ENABLED = False
use_expanded_output = False
PAGER_ENABLED = True
tee_file = None
once_file = written_to_once_file = None
def set_timing_enabled(val):
global TIMING_ENABLED
TIMING_ENABLED = val
def set_pager_enabled(val):
global PAGER_ENABLED
PAGER_ENABLED = val
def is_pager_enabled():
return PAGER_ENABLED
@special_command('pager', '\\P [command]',
'Set PAGER. Print the query results via PAGER.',
arg_type=PARSED_QUERY, aliases=('\\P', ), case_sensitive=True)
def set_pager(arg, **_):
if arg:
os.environ['PAGER'] = arg
msg = 'PAGER set to %s.' % arg
set_pager_enabled(True)
else:
if 'PAGER' in os.environ:
msg = 'PAGER set to %s.' % os.environ['PAGER']
else:
# This uses click's default per echo_via_pager.
msg = 'Pager enabled.'
set_pager_enabled(True)
return [(None, None, None, msg)]
@special_command('nopager', '\\n', 'Disable pager, print to stdout.',
arg_type=NO_QUERY, aliases=('\\n', ), case_sensitive=True)
def disable_pager():
set_pager_enabled(False)
return [(None, None, None, 'Pager disabled.')]
@special_command('\\timing', '\\t', 'Toggle timing of commands.', arg_type=NO_QUERY, aliases=('\\t', ), case_sensitive=True)
def toggle_timing():
global TIMING_ENABLED
TIMING_ENABLED = not TIMING_ENABLED
message = "Timing is "
message += "on." if TIMING_ENABLED else "off."
return [(None, None, None, message)]
def is_timing_enabled():
return TIMING_ENABLED
def set_expanded_output(val):
global use_expanded_output
use_expanded_output = val
def is_expanded_output():
return use_expanded_output
_logger = logging.getLogger(__name__)
def editor_command(command):
"""
Is this an external editor command?
:param command: string
"""
# It is possible to have `\e filename` or `SELECT * FROM \e`. So we check
# for both conditions.
return command.strip().startswith('ed')
def get_filename(sql):
if sql.strip().startswith('ed'):
command, _, filename = sql.partition(' ')
return filename.strip() or None
def get_editor_query(sql):
"""Get the query part of an editor command."""
sql = sql.strip()
pattern = re.compile('^ed')
return pattern.sub('', sql)
def open_external_editor(filename=None, sql=None):
"""Open external editor, wait for the user to type in their query, return
the query.
:return: list with one tuple, query as first element.
"""
message = None
filename = filename.strip().split(' ', 1)[0] if filename else None
sql = sql or ''
MARKER = '# Type your query above this line.\n'
# Populate the editor buffer with the partial sql (if available) and a
# placeholder comment.
query = click.edit('{sql}\n\n{marker}'.format(sql=sql, marker=MARKER),
filename=filename, extension='.sql')
if filename:
try:
with open(filename, encoding='utf-8') as f:
query = f.read()
except IOError:
message = 'Error reading file: %s.' % filename
if query is not None:
query = query.split(MARKER, 1)[0].rstrip('\n')
else:
# Don't return None for the caller to deal with.
# Empty string is ok.
query = sql
return (query, message)
@special_command('\\f', '\\f [name]', 'List or execute favorite queries.', arg_type=PARSED_QUERY, case_sensitive=True)
def execute_favorite_query(cur, arg, **_):
"""Returns (title, rows, headers, status)"""
if arg == '':
for result in list_favorite_queries():
yield result
query = favoritequeries.get(arg)
if query is None:
message = "No favorite query: %s" % (arg)
yield (None, None, None, message)
else:
for sql in sqlparse.split(query):
sql = sql.rstrip(';')
title = '> %s' % (sql)
cur.execute(sql)
if cur.description:
headers = [x[0] for x in cur.description]
yield (title, cur, headers, None)
else:
yield (title, None, None, None)
def list_favorite_queries():
"""List of all favorite queries.
Returns (title, rows, headers, status)"""
headers = ["Name", "Query"]
rows = [(r, favoritequeries.get(r)) for r in favoritequeries.list()]
if not rows:
status = '\nNo favorite queries found.' + favoritequeries.usage
else:
status = ''
return [('', rows, headers, status)]
@special_command('\\fs', '\\fs name query', 'Save a favorite query.')
def save_favorite_query(arg, **_):
"""Save a new favorite query.
Returns (title, rows, headers, status)"""
usage = 'Syntax: \\fs name query.\n\n' + favoritequeries.usage
if not arg:
return [(None, None, None, usage)]
name, _, query = arg.partition(' ')
# If either name or query is missing then print the usage and complain.
if (not name) or (not query):
return [(None, None, None,
usage + 'Err: Both name and query are required.')]
favoritequeries.save(name, query)
return [(None, None, None, "Saved.")]
@special_command('\\fd', '\\fd [name]', 'Delete a favorite query.')
def delete_favorite_query(arg, **_):
"""Delete an existing favorite query.
"""
usage = 'Syntax: \\fd name.\n\n' + favoritequeries.usage
if not arg:
return [(None, None, None, usage)]
status = favoritequeries.delete(arg)
return [(None, None, None, status)]
@special_command('!', '! [command]', 'Execute a system shell commmand.', aliases=['system'], case_sensitive=False)
def execute_system_command(arg, **_):
"""Execute a system shell command."""
usage = "Syntax: system [command].\n"
if not arg:
return [(None, None, None, usage)]
try:
command = arg.strip()
if command.startswith('cd'):
ok, error_message = handle_cd_command(arg)
if not ok:
return [(None, None, None, error_message)]
return [(None, None, None, '')]
args = arg.split(' ')
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
response = output if not error else error
# Python 3 returns bytes. This needs to be decoded to a string.
if isinstance(response, bytes):
encoding = locale.getpreferredencoding(False)
response = response.decode(encoding)
return [(None, None, None, response)]
except OSError as e:
return [(None, None, None, 'OSError: %s' % e.strerror)]
def parseargfile(arg):
if arg.startswith('-o '):
mode = "w"
filename = arg[3:]
else:
mode = 'a'
filename = arg
if not filename:
raise TypeError('You must provide a filename.')
return {'file': filename, 'mode': mode}
@special_command('spool', 'spool [-o] [filename]',
'Append all results to an output file (overwrite using -o).',
aliases=['spo', 'tee'], case_sensitive=False)
def set_tee(arg, **_):
global tee_file
try:
tee_file = open(**parseargfile(arg))
except (IOError, OSError) as e:
raise OSError("Cannot write to file '{}': {}".format(e.filename, e.strerror))
return [(None, None, None, "")]
def close_tee():
global tee_file
if tee_file:
tee_file.close()
tee_file = None
@special_command('nospool', 'nospool', 'Stop writing results to an output file.',
case_sensitive=False)
def no_tee(arg, **_):
close_tee()
return [(None, None, None, "")]
def write_tee(output):
global tee_file
if tee_file:
click.echo(output, file=tee_file, nl=False)
@special_command('\\once', '\\o [-o] filename',
'Append next result to an output file (overwrite using -o).',
aliases=('\\o', ))
def set_once(arg, **_):
global once_file
once_file = parseargfile(arg)
return [(None, None, None, "")]
def write_once(output):
global once_file, written_to_once_file
if output and once_file:
try:
f = open(**once_file)
except (IOError, OSError) as e:
once_file = None
raise OSError("Cannot write to file '{}': {}".format(
e.filename, e.strerror))
with f:
f.write(output)
f.write(u"\n")
written_to_once_file = True
def unset_once_if_written():
"""Unset the once file, if it has been written to."""
global once_file
if written_to_once_file:
once_file = None
|
11462408
|
import sys
from io import BytesIO
from collections import OrderedDict
import urllib
import shlex
import subprocess
import logging
from .. import argparser
log = logging.getLogger('curlbomb.share')
def add_parser(subparsers):
share_parser = subparsers.add_parser(
'share', help="Share a resource URL with any number of people.")
share_args = share_parser.add_argument_group("share args", "Share a resource URL with any number of people")
share_args.add_argument('resource', metavar="FILE", help="path of file to share, or - to read from STDIN",
nargs='?', default=sys.stdin)
argparser.add_inheritible_args(share_parser, "share")
share_parser.set_defaults(subcommand="share", prepare_command=prepare)
def prepare(args, settings, parser):
settings['survey'] = True
settings['receive_postbacks'] = False
settings['num_gets'] = argparser.get_arg_value(args, "num_gets", 0)
if settings['num_gets'] == 0:
log.warn("server set to serve resource an unlimited number of times (-n 0)")
# Share command outputs just a URL to share rather than a curlbomb:
def get_share_command(settings, unwrapped=None):
params = OrderedDict()
if settings['require_knock']:
params['knock'] = settings['knock']
return "http{ssl}://{host}:{port}/r{query_params}".format(
ssl="s" if settings['ssl'] is not False else "",
host=settings['display_host'],
port=settings['display_port'],
query_params="?"+urllib.parse.urlencode(
params) if len(params)>0 else ""
)
settings['get_curlbomb_command'] = get_share_command
if args.resource == sys.stdin:
args.resource = settings['stdin']
if args.resource == settings['stdin'] and settings['stdin'].isatty():
parser.print_help()
sys.stderr.write("\nYou must specify a file or pipe one to this command's stdin\n")
sys.exit(1)
if settings.get('resource', None) is None:
if args.resource == settings['stdin'] or args.resource == '-':
# Read resource from stdin:
settings['resource'] = BytesIO(settings['stdin'].buffer.read())
else:
# Read resource from disk:
settings['resource'] = open(args.resource, 'br')
|
11462413
|
import os
import tensorflow as tf
from gans.callbacks import callback
from gans.utils import constants
from gans.utils import logging
log = logging.get_logger(__name__)
class GANCheckpointManager(callback.Callback):
def __init__(
self,
components_to_save,
root_checkpoint_path,
continue_training,
):
self.root_checkpoint_path = root_checkpoint_path
self.continue_training = continue_training
self.training_checkpoint_path = os.path.join(
self.root_checkpoint_path,
constants.CHECKPOINT_DIR,
)
self.checkpoint = tf.train.Checkpoint(
**components_to_save,
)
self.checkpoint_manager = tf.train.CheckpointManager(
checkpoint=self.checkpoint,
directory=self.training_checkpoint_path,
max_to_keep=3,
)
def load_for_predict(self):
pass
def load_for_train(self):
pass
def regenerate_training(self):
latest_checkpoint_epoch = 0
if self.continue_training:
latest_checkpoint = self.checkpoint_manager.latest_checkpoint
if latest_checkpoint is not None:
latest_checkpoint_epoch = int(latest_checkpoint[latest_checkpoint.index("-") + 1:])
self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint)
log.info(f'Training regeneration from checkpoint: {self.root_checkpoint_path}.')
else:
log.info('No checkpoints found. Starting training from scratch.')
return latest_checkpoint_epoch
def save(self, checkpoint_number):
self.checkpoint_manager.save(checkpoint_number=checkpoint_number)
def on_training_step_end(self, trainer):
if trainer.global_step % trainer.save_model_every_n_step == 0:
self.save(checkpoint_number=trainer.epoch)
log.info(f'Saved model for {trainer.global_step} step and {trainer.epoch} epoch.')
def on_epoch_end(self, trainer):
self.save(checkpoint_number=trainer.epoch)
log.info(f'Saved model for the end of training.')
|
11462457
|
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from wireframe.utils import argsort2d
DX = [0, 0, 1, -1, 1, 1, -1, -1]
DY = [1, -1, 0, 0, 1, -1, 1, -1]
def ap(tp, fp, npos):
recall = tp / npos
precision = tp / np.maximum(tp + fp, 1e-9)
recall = np.concatenate(([0.0], recall, [1.0]))
precision = np.concatenate(([0.0], precision, [0.0]))
for i in range(precision.size - 1, 0, -1):
precision[i - 1] = max(precision[i - 1], precision[i])
i = np.where(recall[1:] != recall[:-1])[0]
return np.sum((recall[i + 1] - recall[i]) * precision[i + 1])
def eval_depth(pred, pred_depth, gt, gt_depth, max_distance):
confidence = pred[:, -1]
sorted_ind = np.argsort(-confidence)
nd = len(pred)
pred = pred[sorted_ind, :-1]
pred_depth = pred_depth[sorted_ind]
d = np.sqrt(np.sum(pred ** 2, 1)[:, None] + np.sum(gt ** 2, 1)[None, :] - 2 * pred @ gt.T)
choice = np.argmin(d, 1)
hit = np.zeros(len(gt), np.bool)
dist = np.min(d, 1)
depth_diff = np.zeros(len(pred))
for i in range(nd):
if dist[i] < max_distance and not hit[choice[i]]:
hit[choice[i]] = True
a = np.maximum(-pred_depth[i], 1e-5)
b = -gt_depth[choice[i]]
depth_diff[i] = np.log(a) - np.log(b)
n = np.maximum(np.sum(hit), 1)
rst = np.sum(depth_diff @ depth_diff.T) / n - np.sum(depth_diff) * np.sum(depth_diff) / (n * n)
return rst
def mAP_jlist(v0, v1, max_distance, im_ids, weight,
pred_dirs=None, gt_dirs=None, weight_dirs=None):
if len(v0) == 0:
return 0
# whether simultaneously evaluate direction prediction
eval_dir = False
if pred_dirs is not None:
assert (gt_dirs is not None) and (weight_dirs is not None)
eval_dir = True
weight_dir_sum = sum([np.sum(j) for j in weight_dirs])
gt_num = sum([np.sum(len(j)) for j in weight_dirs])
weight_dirs = [_ / weight_dir_sum * gt_num for _ in weight_dirs]
v0 = np.array(v0)
v1 = np.array(v1)
weight_sum = sum([np.sum(j) for j in weight])
gt_num = sum([np.sum(len(j)) for j in weight])
weight = [_ / weight_sum * gt_num for _ in weight]
confidence = v0[:, -1]
# sort by confidence
sorted_ind = np.argsort(-confidence)
v0 = v0[sorted_ind, :]
im_ids = im_ids[sorted_ind]
nd = len(im_ids)
tp, fp = np.zeros(nd, dtype=np.float), np.zeros(nd, dtype=np.float)
hit = [[False for _ in j] for j in v1]
if eval_dir:
pred_dirs = pred_dirs[sorted_ind]
tp_dir, fp_dir = np.zeros(nd, dtype=np.float), np.zeros(nd, dtype=np.float)
hit_dir = [[False for _ in j] for j in v1]
# go down dets and mark TPs and FPs
for i in range(nd):
gt_juns = v1[im_ids[i]]
pred_juns = v0[i][:-1]
if len(gt_juns) > 0:
# compute overlaps
dists = np.linalg.norm((pred_juns[None, :] - gt_juns), axis=1)
choice = np.argmin(dists)
dist = np.min(dists)
if dist < max_distance and not hit[im_ids[i]][choice]:
tp[i] = weight[im_ids[i]][choice]
hit[im_ids[i]][choice] = True
# theta is correct only when junction is correct first
if eval_dir:
gt_dir = gt_dirs[im_ids[i]][choice]
pred_dir = pred_dirs[i]
d_theta = np.fmod(gt_dir - pred_dir, 2 * np.pi)
d_theta = d_theta + 2 * np.pi if d_theta < 0 else d_theta
d_theta = np.minimum(np.abs(d_theta),
np.abs(2 * np.pi - d_theta))
if d_theta < 2 * np.pi / 48.0 and \
not hit_dir[im_ids[i]][choice]:
tp_dir[i] = weight_dirs[im_ids[i]][choice]
hit_dir[im_ids[i]][choice] = True
else:
fp_dir[i] = 1
else:
fp[i] = 1
if eval_dir:
fp_dir[i] = 1
tp = np.cumsum(tp)
fp = np.cumsum(fp)
if eval_dir:
tp_dir = np.cumsum(tp_dir)
fp_dir = np.cumsum(fp_dir)
return ap(tp, fp, gt_num), ap(tp_dir, fp_dir, gt_num)
else:
return ap(tp, fp, gt_num)
def nms_junction(heatmap, delta=1):
heatmap = heatmap.copy()
disable = np.zeros_like(heatmap, dtype=np.bool)
for x, y in argsort2d(heatmap):
for dx, dy in zip(DX, DY):
xp, yp = x + dx, y + dy
if not (0 <= xp < heatmap.shape[0] and 0 <= yp < heatmap.shape[1]):
continue
if heatmap[x, y] >= heatmap[xp, yp]:
disable[xp, yp] = True
heatmap[disable] = 0
return heatmap
def ap_jheatmap(pred, truth, distances, im_ids, weight,
pred_dir=None, gt_dir=None, weight_dir=None):
# note the distance is junction prediction requirement
# theta requirement is always fixed for now
if pred_dir is not None:
assert (gt_dir is not None) and (weight_dir is not None)
ap_jt, ap_dirt = [], []
for d in distances:
j, d = mAP_jlist(pred, truth, d, im_ids, weight,
pred_dir, gt_dir, weight_dir)
ap_jt.append(j)
ap_dirt.append(d)
return sum(ap_jt) / len(ap_jt) * 100, \
sum(ap_dirt) / len(ap_dirt) * 100
else:
return sum(mAP_jlist(pred, truth, d, im_ids, weight)
for d in distances) / len(distances) * 100
def post_jheatmap(heatmap, offset=None, delta=1, dir_map=None, jdep_map=None):
# heatmap = nms_junction(heatmap, delta=delta)
# only select the best 1000 junctions for efficiency
v0 = argsort2d(-heatmap)[:1000]
confidence = -np.sort(-heatmap.ravel())[:1000]
keep_id = np.where(confidence >= 1e-2)[0]
if len(keep_id) == 0:
return np.zeros((0, 3))
v0 = v0[keep_id]
confidence = confidence[keep_id]
if offset is not None:
v0 = np.array([v + offset[:, v[0], v[1]] for v in v0])
v0 = np.hstack((v0, confidence[:, np.newaxis]))
if dir_map is not None:
assert offset is None
# take the theta corresponding to v0
# currently only support T direction so
if len(dir_map.shape) == 2:
dir = np.array([dir_map[int(v[0]), int(v[1])] for v in v0])
else:
raise NotImplementedError
return v0, dir
if jdep_map is not None:
if len(jdep_map.shape) == 2:
jdep = np.array([jdep_map[int(v[0]), int(v[1])] for v in v0])
else:
raise NotImplementedError
return v0, jdep
return v0
def get_confusion_mat(pred, gt):
index = gt * 2 + pred
label_count = np.bincount(index.reshape(-1).astype(np.int32))
confusion_mat = np.zeros((2, 2))
for i_label in range(2):
for j_label in range(2):
cur_index = i_label * 2 + j_label
if cur_index < len(label_count):
confusion_mat[i_label, j_label] = label_count[cur_index]
return confusion_mat
def iou_line(confusion_mat, target_cls):
pos = confusion_mat.sum(1)
res = confusion_mat.sum(0)
tp = np.diag(confusion_mat)
iou = (tp / np.maximum(1.0, pos + res - tp))
line_iou = iou[target_cls] * 100
return line_iou
# def main():
# a = np.random.randn(100, 2)
# b = np.random.randn(100, 2)
# c = np.concatenate([a, b], axis=0)
#
# print("total match", mAP_jlist(a, a, 0.01))
# print("half match", mAP_jlist(a[:50], a, 0.01))
# print("ordered", mAP_jlist(c, a, 0.01))
# np.random.shuffle(c)
# print("disordered", mAP_jlist(c, a, 0.01))
# print("no match", mAP_jlist(b, a, 0.01))
# if __name__ == "__main__":
# main()
|
11462499
|
import os
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from PIL import Image
class FontData():
def __init__(self, font_name, font_path, image=None):
self.font_name = font_name
self.font_path = font_path
self.image = None
def load_data(self, loader):
if self.image == None:
self.image = loader(self.font_path)
return self.image
def __repr__(self):
return "<FontData font_name: %s>" % self.font_name
class FontDataset(Dataset):
"""The Font Dataset."""
def __init__(self, root_dir, glyph_size=(64, 64), glyphs_per_image=26):
self.fonts = self.load_font_filenames(root_dir)
self.root_dir = root_dir
self.glyph_size = glyph_size
self.glyphs_per_image = glyphs_per_image
def __len__(self):
return len(self.fonts)
def __getitem__(self, index):
_index = index
if torch.is_tensor(_index):
_index = _index.tolist()
font = self.fonts[_index]
font_data = font.load_data(image_loader)
transform = transforms.Compose([
transforms.Resize(self.glyph_size[0]),
transforms.Grayscale(num_output_channels=1), # Drop to 1 channel
transforms.ToTensor()
])
return transform(font_data)
def load_font_filenames(self, root_dir):
font_images = []
assert os.path.isdir(root_dir), '%s is not a valid directory!' % root_dir
for root, _, filenames in sorted(os.walk(root_dir)):
for filename in filenames:
font_images.append(FontData(filename, os.path.join(root, filename)))
return font_images
# Helper Functions
def image_loader(path):
return Image.open(path).convert('RGB')
|
11462508
|
import tensorflow as tf
class LoggerHook(tf.train.SessionRunHook):
def __init__(self, display_step):
self._display_step = display_step
def begin(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
if self._step % self._display_step == 0:
return self.before_display_step_run(run_context)
def after_run(self, run_context, run_values):
if self._step % self._display_step == 0:
self.after_display_step_run(run_context, run_values)
def before_display_step_run(self, run_context):
pass
def after_display_step_run(self, run_context, run_values):
pass
|
11462530
|
import clify
import argparse
from dps import cfg
from dps.config import DEFAULT_CONFIG
from dps.projects.nips_2018.envs import air_testing_config as env_config
# from dps.projects.nips_2018.envs import grid_fullsize_config as env_config
from dps.projects.nips_2018.algs import air_config as alg_config
distributions = [
dict(cnn=True),
dict(cnn=True, vae_likelihood_std=0.0),
dict(),
dict(vae_likelihood_std=0.0),
]
config = DEFAULT_CONFIG.copy()
config.update(alg_config)
config.update(env_config)
config.update(
per_process_gpu_memory_fraction=0.45,
render_step=5000,
eval_step=1000,
max_experiences=10000000,
patience=10000000,
max_steps=1000000,
z_pres_prior_log_odds=10.0,
)
config.log_name = "{}_VERSUS_{}".format(alg_config.log_name, env_config.log_name)
print("Forcing creation of first dataset.")
with config.copy():
cfg.build_env()
print("Forcing creation of second dataset.")
with config.copy(config.curriculum[-1]):
cfg.build_env()
run_kwargs = dict(
n_repeats=1,
kind="slurm",
pmem=5000,
ignore_gpu=False,
)
parser = argparse.ArgumentParser()
parser.add_argument("kind", choices="long_cedar long_graham short_graham short_cedar other short_other".split())
args, _ = parser.parse_known_args()
kind = args.kind
if kind == "long_cedar":
kind_args = dict(
max_hosts=1, ppn=4, cpp=2, gpu_set="0,1", wall_time="24hours",
cleanup_time="30mins", slack_time="30mins")
elif kind == "long_graham":
kind_args = dict(
max_hosts=2, ppn=8, cpp=1, gpu_set="0,1", wall_time="6hours", project="def-jpineau",
cleanup_time="30mins", slack_time="30mins", n_param_settings=16)
elif kind == "short_cedar":
kind_args = dict(
max_hosts=1, ppn=4, cpp=1, gpu_set="0", wall_time="20mins",
cleanup_time="2mins", slack_time="2mins", n_param_settings=4)
elif kind == "short_graham":
kind_args = dict(
max_hosts=1, ppn=4, cpp=1, gpu_set="0", wall_time="20mins", project="def-jpineau",
cleanup_time="2mins", slack_time="2mins", n_param_settings=4)
elif kind == "other":
kind_args = dict(
max_hosts=1, ppn=4, cpp=2, gpu_set="0,1", wall_time="6hours", project="def-jpineau",
cleanup_time="30mins", slack_time="30mins", n_param_settings=4)
elif kind == "short_other":
kind_args = dict(
max_hosts=1, ppn=4, cpp=2, gpu_set="0,1", wall_time="30mins", project="def-jpineau",
cleanup_time="3mins", slack_time="3mins", n_param_settings=4)
else:
raise Exception("Unknown kind: {}".format(kind))
run_kwargs.update(kind_args)
from dps.hyper import build_and_submit
clify.wrap_function(build_and_submit)(
name="AIR_v_grid_task_param_search_{}".format(kind), config=config,
distributions=distributions, **run_kwargs)
|
11462560
|
from trakt.core.helpers import from_iso8601_datetime, to_iso8601_datetime, deprecated
from trakt.objects.core.helpers import update_attributes
from trakt.objects.video import Video
class Episode(Video):
def __init__(self, client, keys=None, index=None):
super(Episode, self).__init__(client, keys, index)
self.show = None
"""
:type: :class:`trakt.objects.show.Show`
Show
"""
self.season = None
"""
:type: :class:`trakt.objects.season.Season`
Season
"""
self.title = None
"""
:type: :class:`~python:str`
Title
"""
self.first_aired = None
"""
:type: :class:`~python:datetime.datetime`
First air date
"""
self.updated_at = None
"""
:type: :class:`~python:datetime.datetime`
Updated date/time
"""
self.available_translations = None
"""
:type: :class:`~python:list`
Available translations (for title, overview, etc..)
"""
def to_identifier(self):
"""Returns the episode identifier which is compatible with requests that require
episode definitions.
:return: Episode identifier/definition
:rtype: :class:`~python:dict`
"""
_, number = self.pk
return {
'number': number
}
@deprecated('Episode.to_info() has been moved to Episode.to_dict()')
def to_info(self):
"""**Deprecated:** use the :code:`to_dict()` method instead"""
return self.to_dict()
def to_dict(self):
"""Dump episode to a dictionary
:return: Episode dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result.update({
'title': self.title,
'watched': 1 if self.is_watched else 0,
'collected': 1 if self.is_collected else 0,
'plays': self.plays if self.plays is not None else 0,
'in_watchlist': self.in_watchlist if self.in_watchlist is not None else 0,
'progress': self.progress,
'last_watched_at': to_iso8601_datetime(self.last_watched_at),
'collected_at': to_iso8601_datetime(self.collected_at),
'paused_at': to_iso8601_datetime(self.paused_at),
'ids': dict([
(key, value) for (key, value) in self.keys[1:] # NOTE: keys[0] is the (<season>, <episode>) identifier
])
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.available_translations:
result['available_translations'] = self.available_translations
return result
def _update(self, info=None, **kwargs):
if not info:
return
super(Episode, self)._update(info, **kwargs)
update_attributes(self, info, [
'title',
# Extended Info
'available_translations'
])
# Extended Info
if 'first_aired' in info:
self.first_aired = from_iso8601_datetime(info.get('first_aired'))
if 'updated_at' in info:
self.updated_at = from_iso8601_datetime(info.get('updated_at'))
@classmethod
def _construct(cls, client, keys, info=None, index=None, **kwargs):
episode = cls(client, keys, index=index)
episode._update(info, **kwargs)
return episode
def __repr__(self):
if self.show and self.title:
return '<Episode %r - S%02dE%02d - %r>' % (self.show.title, self.pk[0], self.pk[1], self.title)
if self.show:
return '<Episode %r - S%02dE%02d>' % (self.show.title, self.pk[0], self.pk[1])
if self.title:
return '<Episode S%02dE%02d - %r>' % (self.pk[0], self.pk[1], self.title)
return '<Episode S%02dE%02d>' % self.pk
|
11462591
|
import itertools
import sys
from difflib import SequenceMatcher as SM
def uprint(*objects, sep=' ', end='\n', file=sys.stdout):
enc = file.encoding
if enc == 'UTF-8':
print(*objects, sep=sep, end=end, file=file)
else:
def f(obj):
return str(obj) \
.encode(enc, errors='backslashreplace') \
.decode(enc)
print(*map(f, objects), sep=sep, end=end, file=file)
def grouper(iterable, n):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
def get_similarity(s1, s2):
"""
Return similarity of both strings as a float between 0 and 1
"""
return SM(None, s1, s2).ratio()
def find_closest_match(target_track, tracks):
"""
Return closest match to target track
"""
track = None
# Get a list of (track, artist match ratio, name match ratio)
tracks_with_match_ratio = [(
track,
get_similarity(target_track.artist, track.artist),
get_similarity(target_track.name, track.name),
) for track in tracks]
# Sort by artist then by title
sorted_tracks = sorted(
tracks_with_match_ratio,
key=lambda t: (t[1], t[2]),
reverse=True # Descending, highest match ratio first
)
if sorted_tracks:
track = sorted_tracks[0][0] # Closest match to query
return track
|
11462612
|
from collections import defaultdict
class Solution:
def mostVisitedPattern(self, username: List[str], timestamp: List[int], website: List[str]) -> List[str]:
data = [[username[i], timestamp[i], website[i]] for i in range(len(username))]
data.sort()
dic = defaultdict(list)
for u, t, w in data:
dic[u].append(w)
seq = defaultdict(set)
for u, w in dic.items():
if len(w)>=3:
for i in range(len(w)-2):
for j in range(i+1,len(w)-1):
for k in range(j+1,len(w)):
webs = (w[i],w[j],w[k])
seq[webs].add(u)
return sorted(seq.items(),key=lambda x:(-len(x[1]),x[0]))[0][0]
|
11462632
|
from __future__ import print_function
import cw
import cw.slurm
import bluelet
import threading
import concurrent.futures
import os
import sys
class Client(object):
def __init__(self, host=None, port=cw.PORT):
# if no host specified, then auto-detect if slurm should be used
if host is None:
if cw.is_slurm_available():
host = cw.slurm.master_host()
else:
host = 'localhost'
self.host = host
self.port = port
def connection_ready(self):
pass
def handle_results(self, callback):
self.conn = yield bluelet.connect(self.host, self.port)
self.connection_ready()
while True:
result = yield cw._readmsg(self.conn)
if result is None:
print('server connection closed')
return
assert isinstance(result, cw.ResultMessage)
callback(result.jobid, result.success,
cw.slow_deser(result.result_blob))
def send_job(self, jobid, func, *args, **kwargs):
task = cw.TaskMessage(
jobid,
cw.func_ser(func), cw.slow_ser(args), cw.slow_ser(kwargs),
os.getcwd(),
sys.path,
)
yield cw._sendmsg(self.conn, task)
class BaseClientThread(threading.Thread, Client):
def __init__(self, callback, host=None, port=cw.PORT):
threading.Thread.__init__(self)
Client.__init__(self, host, port)
self.callback = callback
self.daemon = True
self.ready_condition = threading.Condition()
self.ready = False
self.shutdown = False
self.shutdown_lock = threading.Lock()
def connection_ready(self):
with self.ready_condition:
self.ready = True
self.ready_condition.notify_all()
def main_coro(self):
handler = self.handle_results(self.callback)
yield bluelet.spawn(handler)
# Poll for thread shutdown.
while True:
yield bluelet.sleep(1)
with self.shutdown_lock:
if self.shutdown:
break
# Halt the handler thread.
yield bluelet.kill(handler)
def stop(self):
with self.shutdown_lock:
self.shutdown = True
def run(self):
# Receive on the socket in this thread.
bluelet.run(self.main_coro())
def start_job(self, jobid, func, *args, **kwargs):
# Synchronously send on the socket in the *calling* thread.
with self.ready_condition:
while not self.ready:
self.ready_condition.wait()
bluelet.run(self.send_job(jobid, func, *args, **kwargs))
class RemoteException(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return '\n' + self.error.strip()
class ClientThread(BaseClientThread):
"""A slightly nicer ClientThread that generates job IDs for you and
raises exceptions when things go wrong on the remote side.
"""
def __init__(self, callback, host=None, port=cw.PORT):
super(ClientThread, self).__init__(self._completion, host, port)
self.app_callback = callback
self.active_jobs = 0
self.remote_exception = None
self.jobs_cond = threading.Condition()
def submit(self, jobid, func, *args, **kwargs):
with self.jobs_cond:
self.active_jobs += 1
self.start_job(jobid, func, *args, **kwargs)
def _completion(self, jobid, success, result):
with self.jobs_cond:
if success:
self.app_callback(jobid, result)
self.active_jobs -= 1
else:
self.remote_exception = RemoteException(result)
self.active_jobs = 0
self.jobs_cond.notify_all()
def wait(self):
"""Block until all outstanding jobs have finished.
"""
with self.jobs_cond:
while self.active_jobs:
self.jobs_cond.wait()
# Raise worker exception on main thread.
exc = self.remote_exception
if exc:
self.remote_exception = None
raise exc
def __enter__(self):
"""Start the context by spinning up a thread.
"""
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Stop the context (i.e., call `wait`).
"""
self.wait()
class ClusterExecutor(concurrent.futures.Executor):
def __init__(self, host=None, port=cw.PORT):
self.thread = BaseClientThread(self._completion, host, port)
self.thread.start()
self.futures = {}
self.jobs_lock = threading.Lock()
self.jobs_empty_cond = threading.Condition(self.jobs_lock)
def _completion(self, jobid, success, result):
with self.jobs_lock:
future = self.futures.pop(jobid)
if not self.futures:
self.jobs_empty_cond.notify_all()
if success:
future.set_result(result)
else:
future.set_exception(RemoteException(result))
def submit(self, func, *args, **kwargs):
future = concurrent.futures.Future()
jobid = cw.randid()
with self.jobs_lock:
self.futures[jobid] = future
self.thread.start_job(jobid, func, *args, **kwargs)
return future
def shutdown(self, wait=True):
if wait:
with self.jobs_lock:
if self.futures:
self.jobs_empty_cond.wait()
self.thread.stop()
self.thread.join()
class SlurmExecutor(ClusterExecutor):
def __init__(self):
super(SlurmExecutor, self).__init__(cw.slurm.master_host())
def test():
def square(n):
return n * n
with ClusterExecutor() as executor:
for res in executor.map(square, range(1000)):
print(res)
if __name__ == '__main__':
test()
|
11462680
|
import types
from collections.abc import Iterable
from typing import Optional, Tuple, Union
import astromodels
import numba as nb
import numpy as np
from threeML.io.logging import setup_logger
from threeML.plugin_prototype import PluginPrototype
__instrument_name = "n.a."
log = setup_logger(__name__)
_tiny = np.float64(np.finfo(1.).tiny)
class EventObservation(object):
def __init__(
self,
events: np.ndarray,
exposure: float,
start: Union[float, np.ndarray],
stop: Union[float, np.ndarray],
):
self._events = np.array(events)
self._exposure: float = exposure
if isinstance(start, Iterable) or isinstance(stop, Iterable):
assert isinstance(start, Iterable)
assert isinstance(stop, Iterable)
assert len(start) == len(stop)
for i, v in enumerate(start):
assert v < stop[i]
self._start: np.ndarray = start
self._stop: np.ndarray = stop
self._is_multi_interval: bool = True
else:
assert start < stop
self._start: float = float(start)
self._stop: float = float(stop)
self._is_multi_interval: bool = False
self._n_events: int = len(self._events)
log.debug(f"created event observation with")
log.debug(f"{self._start} {self._stop}")
@property
def events(self) -> np.ndarray:
return self._events
@property
def n_events(self) -> int:
return self._n_events
@property
def exposure(self) -> float:
return self._exposure
@property
def start(self) -> Union[float, np.ndarray]:
return self._start
@property
def stop(self) -> Union[float, np.ndarray]:
return self._stop
@property
def is_multi_interval(self) -> bool:
return self._is_multi_interval
class UnbinnedPoissonLike(PluginPrototype):
def __init__(
self,
name: str,
observation: EventObservation,
source_name: Optional[str] = None,
) -> None:
"""
This is a generic likelihood for unbinned Poisson data.
It is very slow for many events.
:param name: the plugin name
:param observation: and EventObservation container
:param source_name: option source name to apply to the source
"""
assert isinstance(observation, EventObservation)
self._observation: EventObservation = observation
self._source_name: str = source_name
self._n_events: int = self._observation.n_events
super(UnbinnedPoissonLike, self).__init__(
name=name, nuisance_parameters={})
def set_model(self, model: astromodels.Model) -> None:
"""
Set the model to be used in the joint minimization. Must be a LikelihoodModel instance.
"""
self._like_model: astromodels.Model = model
# We assume there are no extended sources, since we cannot handle them here
assert self._like_model.get_number_of_extended_sources() == 0, (
"SpectrumLike plugins do not support " "extended sources"
)
# check if we set a source name that the source is in the model
if self._source_name is not None:
assert self._source_name in self._like_model.sources, (
"Source %s is not contained in "
"the likelihood model" % self._source_name
)
differential, integral = self._get_diff_and_integral(self._like_model)
self._integral_model = integral
self._model = differential
def _get_diff_and_integral(
self, likelihood_model: astromodels.Model
) -> Tuple[types.FunctionType, types.FunctionType]:
if self._source_name is None:
n_point_sources = likelihood_model.get_number_of_point_sources()
# Make a function which will stack all point sources (OGIP do not support spatial dimension)
def differential(energies):
fluxes = likelihood_model.get_point_source_fluxes(
0, energies, tag=self._tag
)
# If we have only one point source, this will never be executed
for i in range(1, n_point_sources):
fluxes += likelihood_model.get_point_source_fluxes(
i, energies, tag=self._tag
)
return fluxes
else:
# This SpectrumLike dataset refers to a specific source
# Note that we checked that self._source_name is in the model when the model was set
try:
def differential_flux(energies):
return likelihood_model.sources[self._source_name](
energies, tag=self._tag
)
except KeyError:
raise KeyError(
"This plugin has been assigned to source %s, "
"which does not exist in the current model" % self._source_name
)
# New way with simpson rule.
# Make sure to not calculate the model twice for the same energies
def integral(e1, e2):
# Simpson's rule
# single energy values given
return (
(e2 - e1)
/ 6.0
* (
differential(e1)
+ 4 * differential((e2 + e1) / 2.0)
+ differential(e2)
)
)
return differential, integral
def get_log_like(self) -> float:
"""
Return the value of the log-likelihood with the current values for the
parameters
"""
n_expected_counts: float = 0.
if self._observation.is_multi_interval:
for start, stop in zip(self._observation.start, self._observation.stop):
n_expected_counts += self._integral_model(start, stop)
else:
n_expected_counts += self._integral_model(
self._observation.start, self._observation.stop
)
M = self._model(self._observation.events) * self._observation.exposure
negative_mask = M < 0
if negative_mask.sum() > 0:
M[negative_mask] = 0.0
# use numba to sum the events
sum_logM = _evaluate_logM_sum(M, self._n_events)
minus_log_like = -n_expected_counts + sum_logM
return minus_log_like
def inner_fit(self) -> float:
"""
This is used for the profile likelihood. Keeping fixed all parameters in the
LikelihoodModel, this method minimize the logLike over the remaining nuisance
parameters, i.e., the parameters belonging only to the model for this
particular detector. If there are no nuisance parameters, simply return the
logLike value.
"""
return self.get_log_like()
def get_number_of_data_points(self):
return self._n_events
@nb.njit(fastmath=True)
def _evaluate_logM_sum(M, size):
# Evaluate the logarithm with protection for negative or small
# numbers, using a smooth linear extrapolation (better than just a sharp
# cutoff)
non_tiny_mask = M > 2.0 * _tiny
tink_mask = np.logical_not(non_tiny_mask)
if tink_mask.sum() > 0:
logM = np.zeros(size)
logM[tink_mask] = (np.abs(M[tink_mask])/_tiny) + np.log(_tiny) - 1
logM[non_tiny_mask] = np.log(M[non_tiny_mask])
else:
logM = np.log(M)
return logM.sum()
|
11462694
|
import torch
import torch.nn as nn
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t).float()
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
class DDPMv2(nn.Module):
def __init__(
self,
decoder,
beta_1=1e-4,
beta_2=0.02,
T=1000,
var_type="fixedlarge",
ddpm_latents=None,
):
super().__init__()
self.decoder = decoder
self.T = T
self.beta_1 = beta_1
self.beta_2 = beta_2
self.var_type = var_type
self.ddpm_latents = ddpm_latents
# Main constants
self.register_buffer(
"betas", torch.linspace(self.beta_1, self.beta_2, steps=self.T).double()
)
dev = self.betas.device
alphas = 1.0 - self.betas
alpha_bar = torch.cumprod(alphas, dim=0)
alpha_bar_shifted = torch.cat([torch.tensor([1.0], device=dev), alpha_bar[:-1]])
assert alpha_bar_shifted.shape == torch.Size(
[
self.T,
]
)
# Auxillary consts
self.register_buffer("sqrt_alpha_bar", torch.sqrt(alpha_bar))
self.register_buffer("minus_sqrt_alpha_bar", torch.sqrt(1.0 - alpha_bar))
self.register_buffer("sqrt_recip_alphas_cumprod", torch.sqrt(1.0 / alpha_bar))
self.register_buffer(
"sqrt_recipm1_alphas_cumprod", torch.sqrt(1.0 / alpha_bar - 1)
)
# Posterior q(x_t-1|x_t,x_0,t) covariance of the forward process
self.register_buffer(
"post_variance", self.betas * (1.0 - alpha_bar_shifted) / (1.0 - alpha_bar)
)
# Clipping because post_variance is 0 before the chain starts
self.register_buffer(
"post_log_variance_clipped",
torch.log(
torch.cat(
[
torch.tensor([self.post_variance[1]], device=dev),
self.post_variance[1:],
]
)
),
)
# q(x_t-1 | x_t, x_0) mean coefficients
self.register_buffer(
"post_coeff_1",
self.betas * torch.sqrt(alpha_bar_shifted) / (1.0 - alpha_bar),
)
self.register_buffer(
"post_coeff_2",
torch.sqrt(alphas) * (1 - alpha_bar_shifted) / (1 - alpha_bar),
)
self.register_buffer(
"post_coeff_3",
1 - self.post_coeff_2,
)
def _predict_xstart_from_eps(self, x_t, t, eps, cond=None):
assert x_t.shape == eps.shape
x_hat = 0 if cond is None else cond
assert x_hat.shape == x_t.shape
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_hat
- extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def get_posterior_mean_covariance(self, x_t, t, clip_denoised=True, cond=None):
B = x_t.size(0)
t_ = torch.full((x_t.size(0),), t, device=x_t.device, dtype=torch.long)
assert t_.shape == torch.Size(
[
B,
]
)
x_hat = 0 if cond is None else cond
# Generate the reconstruction from x_t
x_recons = self._predict_xstart_from_eps(
x_t, t_, self.decoder(x_t, t_, low_res=cond), cond=cond
)
# Clip
if clip_denoised:
x_recons.clamp_(-1.0, 1.0)
# Compute posterior mean from the reconstruction
post_mean = (
extract(self.post_coeff_1, t_, x_t.shape) * x_recons
+ extract(self.post_coeff_2, t_, x_t.shape) * x_t
+ extract(self.post_coeff_3, t_, x_t.shape) * x_hat
)
# Extract posterior variance
p_variance, p_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
"fixedlarge": (
self.betas,
torch.log(
torch.cat(
[
torch.tensor([self.post_variance[1]], device=x_t.device),
self.betas[1:],
]
)
),
),
"fixedsmall": (
self.post_variance,
self.post_log_variance_clipped,
),
}[self.var_type]
post_variance = extract(p_variance, t_, x_t.shape)
post_log_variance = extract(p_log_variance, t_, x_t.shape)
return post_mean, post_variance, post_log_variance
def sample(self, x_t, cond=None, n_steps=None, checkpoints=[]):
# The sampling process goes here!
x = x_t
B, *_ = x_t.shape
sample_dict = {}
if self.ddpm_latents is not None:
self.ddpm_latents = self.ddpm_latents.to(x_t.device)
num_steps = self.T if n_steps is None else n_steps
checkpoints = [num_steps] if checkpoints == [] else checkpoints
for idx, t in enumerate(reversed(range(0, num_steps))):
z = (
torch.randn_like(x_t)
if self.ddpm_latents is None
else torch.stack([self.ddpm_latents[idx]] * B)
)
(
post_mean,
post_variance,
post_log_variance,
) = self.get_posterior_mean_covariance(
x,
t,
cond=cond,
)
nonzero_mask = (
torch.tensor(t != 0, device=x.device)
.float()
.view(-1, *([1] * (len(x_t.shape) - 1)))
) # no noise when t == 0
# Langevin step!
x = post_mean + nonzero_mask * torch.exp(0.5 * post_log_variance) * z
if t == 0:
# NOTE: In the final step we remove the vae reconstruction bias
# added to the images as it degrades quality
x -= cond
# Add results
if idx + 1 in checkpoints:
sample_dict[str(idx + 1)] = x
return sample_dict
def compute_noisy_input(self, x_start, eps, t, low_res=None):
assert eps.shape == x_start.shape
x_hat = 0 if low_res is None else low_res
# Samples the noisy input x_t ~ N(x_t|x_0) in the forward process
return (
x_start * extract(self.sqrt_alpha_bar, t, x_start.shape)
+ x_hat
+ eps * extract(self.minus_sqrt_alpha_bar, t, x_start.shape)
)
def forward(self, x, eps, t, low_res=None):
# Predict noise
x_t = self.compute_noisy_input(x, eps, t, low_res=low_res)
return self.decoder(x_t, t, low_res=low_res)
|
11462695
|
import string
from utils import strings
def print_results(file_bin, signature_range_list, output_file):
out = open(output_file, "w")
out.write("=== AVSignSeek ===\n")
for signature_range in signature_range_list:
start = signature_range[0]
end = signature_range[1]
print("[+] Signature between bytes %d and %d" % (start, end))
out.write("[+] Signature between bytes %d and %d\n" % (start, end))
print("[+] Bytes:")
out.write("[+] Bytes:\n")
b = file_bin[start:end]
while len(b) > 0:
row = b[:16]
output_line = "".join(["{:02x} ".format(c) for c in row]).ljust(60)
output_line += "".join([chr(c) if chr(c) in string.printable[:-5] else "." for c in row])
print(output_line)
out.write(output_line + "\n")
b = b[16:]
b = file_bin[start:end]
print("[+] Strings:")
out.write("[+] Strings:\n")
for s in strings(b):
print("> %s" % s)
out.write("> %s\n" % s)
out.close()
|
11462713
|
import pynes
from pynes.game import Game
from pynes.bitbag import *
from pynes.nes_types import *
game = Game()
palette = game.assign('palette',
NesArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,15,
0x0F, 48, 49, 50, 51, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63])
)
sprite = game.assign('sprite', game.call('define_sprite', [128, 128, 0, 3]))
game.assign('chr_asset', NesChrFile('player.chr'))
game.asmFunction("reset")
game.call('wait_vblank')
game.call('clearmem')
game.call('wait_vblank')
game.call('load_palette', [palette])
game.call('load_sprite', [sprite, 0])
game.asmFunction("joypad1_up")
game.minusAssign(game.call('get_sprite', [0]).y, 1)
#game.asmFunction("joypad1_up")
#game.call(load_sprite(sprite, 0))
#game.asmFunction("reset")
#game.call(wait_vblank())
#game.call(clearmem())
#game.call(wait_vblank())
#game.call(load_palette(palette))
game.press_start()
'''
def waitvblank()
asm.bit(0x2002)
asm.bpl(waitvblank)
sprite = define_sprite(128, 128, 0, 3)
def reset():
global palette, sprite
wait_vblank()
clearmem()
wait_vblank()
load_palette(palette)
load_sprite(sprite, 0)
def joypad1_up():
get_sprite(0).y -= 1
def joypad1_down():
get_sprite(0).y += 1
def joypad1_left():
get_sprite(0).x -=1
def joypad1_right():
get_sprite(0).x +=1
'''
|
11462738
|
import numpy as np
import os
import shutil
import tensorflow as tf
import tempfile
from gdmix.io.input_data_pipeline import per_record_input_fn
from gdmix.util import constants
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
num_records = 10
np.random.seed(0)
labels = np.random.randint(2, size=num_records, dtype=np.int32)
weight_shape = 5
weight_length = np.random.randint(weight_shape-1, size=num_records)+1
weight_values = [np.random.random(l).astype(np.float32) for l in weight_length]
weight_indices = [np.sort(np.random.choice(
weight_shape, l, replace=False).astype(np.int64)) for l in weight_length]
f1 = np.arange(num_records, dtype=np.float32) + 10.0
json_string = """
{
"numberOfTrainingSamples": 20,
"features": [
{
"name": "weight",
"dtype": "float",
"shape": [5],
"isSparse": true
},
{
"name": "f1",
"dtype": "float",
"shape": [],
"isSparse": false
}
],
"labels": [
{
"name": "response",
"dtype": "int",
"shape": [],
"isSparse": false
}
]
}
"""
class TestPerRecordInputFn(tf.test.TestCase):
"""Test per_record_input_fn."""
def setUp(self):
self.test_data_dir = tempfile.mkdtemp()
self.fd, self.test_metadata_file = tempfile.mkstemp()
self.num_shards = 2
self.shard_index = 1
self.batch_size = 2
# generate tf record files
self.generate_tfrecords(labels, weight_indices, weight_values, f1,
self.num_shards,
self.test_data_dir)
# generate meatadata file
self.generate_metadata(json_string, self.test_metadata_file)
def tearDown(self):
shutil.rmtree(self.test_data_dir)
os.close(self.fd)
os.remove(self.test_metadata_file)
@staticmethod
def generate_tfrecords(label_tensor, weight_indices_tensor,
weight_value_tensor, f1_tensor,
num_shards, output_dir):
"""
Create tfrecords from a few tensors
:param label_tensor: The tensor representing labels
:param weight_indices_tensor: The indices for the weight (sparse) feature
:param weight_value_tensor: The values for the weight (sparse) feature
:param f1_tensor: A feature tensor
:param num_shards: The number of shards
:param output_dir: The output directory where the tfrecord files are saved.
:return: None
"""
def get_example(w_i, w_v, f, l):
features = tf.train.Features(feature={
'weight_indices': tf.train.Feature(int64_list=tf.train.Int64List(
value=w_i)),
'weight_values': tf.train.Feature(float_list=tf.train.FloatList(
value=w_v)),
'f1': tf.train.Feature(float_list=tf.train.FloatList(
value=[f])),
'response': tf.train.Feature(int64_list=tf.train.Int64List(
value=[l]))
})
return tf.train.Example(features=features)
for s in range(num_shards):
output_filename = os.path.join(output_dir, 'data_{}.tfrecord'.format(s))
with tf.io.TFRecordWriter(output_filename) as writer:
for i in range(len(label_tensor)):
example = get_example(weight_indices_tensor[i],
weight_value_tensor[i] + s,
f1_tensor[i] + s,
label_tensor[i] + s)
writer.write(example.SerializeToString())
@staticmethod
def generate_metadata(metadata, output_file):
"""
Create metadata file from a Json string.
:param metadata:
:param output_file:
:return:
"""
with open(output_file, 'w') as f:
f.write(metadata)
@staticmethod
def generate_sparse_tensors(indices, values, shape, batch_size, shard_idx):
"""
Generate sparse tensor from indices and values.
This is used to check the ones generated from dataset
:param indices: The tensor for all the indices
:param values: The tensor for all the values
:param shape: The dense shape of the sparse tensor
:param batch_size: batch size
:param shard_idx: shard index, added to the value
:return: A list of sparse tensors
"""
length = len(indices)
assert(length % batch_size == 0)
sparse_tensors = []
for i in range(length // batch_size):
sparse_indices = []
sparse_values = []
for j in range(batch_size):
row_idx = i * batch_size + j
curr_indices = indices[row_idx]
curr_values = values[row_idx] + shard_idx
for k in range(len(curr_indices)):
sparse_indices.append([j, curr_indices[k]])
sparse_values += curr_values.tolist()
sparse_tensors.append(tf.sparse.SparseTensor(indices=sparse_indices,
values=sparse_values,
dense_shape=[batch_size, shape]))
return sparse_tensors
def test_input_fn(self):
"""
Test training dataset.
:return: None
"""
batch_size = self.batch_size
d = per_record_input_fn(self.test_data_dir,
self.test_metadata_file,
self.num_shards,
self.shard_index,
batch_size,
constants.TFRECORD)
d_iter = tf.compat.v1.data.make_one_shot_iterator(d)
item = d_iter.get_next()
i = 0
sparse_tensors = self.generate_sparse_tensors(weight_indices,
weight_values, 5, batch_size, self.shard_index)
with self.session() as sess:
sparse_tensors_val = sess.run(sparse_tensors)
try:
while True:
features, response = sess.run(item)
self.assertAllEqual(features['weight'].values, sparse_tensors_val[i].values)
self.assertAllEqual(features['weight'].indices, sparse_tensors_val[i].indices)
self.assertAllEqual(features['f1'], f1[i*batch_size:(i+1)*batch_size]
+ self.shard_index)
self.assertAllEqual(response['response'], labels[i*batch_size:(i+1)*batch_size]
+ self.shard_index)
i += 1
except tf.errors.OutOfRangeError:
pass
self.assertEqual(i, num_records // self.batch_size)
if __name__ == '__main__':
tf.test.main()
|
11462742
|
import json
class TestListSwitchHost:
def test_no_args(self, host, add_host, add_switch):
# Add interfaces to our switch and backend
result = host.run('stack add host interface switch-0-0 interface=eth0 network=private')
assert result.rc == 0
result = host.run('stack add host interface backend-0-0 interface=eth0 network=private')
assert result.rc == 0
# Add our backend to the test switch
result = host.run('stack add switch host switch-0-0 host=backend-0-0 interface=eth0 port=1')
assert result.rc == 0
# List the switch hosts
result = host.run('stack list switch host output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [{
'host': 'backend-0-0',
'interface': 'eth0',
'mac': None,
'port': 1,
'switch': 'switch-0-0',
'vlan': None
}]
def test_one_arg(self, host, add_host, add_switch):
# Add interfaces to our switch and backend
result = host.run('stack add host interface switch-0-0 interface=eth0 network=private')
assert result.rc == 0
result = host.run('stack add host interface backend-0-0 interface=eth0 network=private')
assert result.rc == 0
# Add our backend to the test switch
result = host.run('stack add switch host switch-0-0 host=backend-0-0 interface=eth0 port=1')
assert result.rc == 0
# List the switch hosts
result = host.run('stack list switch host switch-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [{
'host': 'backend-0-0',
'interface': 'eth0',
'mac': None,
'port': 1,
'switch': 'switch-0-0',
'vlan': None
}]
def test_skip_interface(self, host, add_host, add_switch):
# Add interfaces to our switch and backend
result = host.run('stack add host interface switch-0-0 interface=eth0 network=private')
assert result.rc == 0
result = host.run('stack add host interface backend-0-0 interface=eth0 network=private')
assert result.rc == 0
# Add our backend to the test switch
result = host.run('stack add switch host switch-0-0 host=backend-0-0 interface=eth0 port=1')
assert result.rc == 0
# Now remove the backend interface, which should now get skipped
result = host.run('stack remove host interface backend-0-0 interface=eth0')
assert result.rc == 0
# List the switch hosts
result = host.run('stack list switch host switch-0-0 output-format=json')
assert result.rc == 0
assert result.stdout == ''
|
11462746
|
from umongo import Document, validate
from umongo.fields import StringField
from app import app
instance = app.config["LAZY_UMONGO"]
@instance.register
class Permission(Document):
codename = StringField(
unique=True,
allow_none=False,
required=True,
validate=validate.Regexp(
r'^[a-z\-\.]+$',
error="Field value can contain only 'a'-'z', '.', '-' characters."
)
)
description = StringField(allow_none=True)
class Meta:
indexes = ['$codename', ]
|
11462764
|
class DataGridColumn(DependencyObject):
""" Represents a System.Windows.Controls.DataGrid column. """
def CancelCellEdit(self,*args):
"""
CancelCellEdit(self: DataGridColumn,editingElement: FrameworkElement,uneditedValue: object)
Causes the cell being edited to revert to the original,unedited value.
editingElement: The element that the column displays for a cell in editing mode.
uneditedValue: The original,unedited value in the cell being edited.
"""
pass
def CommitCellEdit(self,*args):
"""
CommitCellEdit(self: DataGridColumn,editingElement: FrameworkElement) -> bool
Performs any required validation before exiting cell editing mode.
editingElement: The element that the column displays for a cell in editing mode.
Returns: true if no validation errors are found; otherwise,false.
"""
pass
def GenerateEditingElement(self,*args):
"""
GenerateEditingElement(self: DataGridColumn,cell: DataGridCell,dataItem: object) -> FrameworkElement
When overridden in a derived class,gets an editing element that is bound to the
System.Windows.Controls.DataGridBoundColumn.Binding property value of the column.
cell: The cell that will contain the generated element.
dataItem: The data item that is represented by the row that contains the intended cell.
Returns: A new editing element that is bound to the System.Windows.Controls.DataGridBoundColumn.Binding
property value of the column.
"""
pass
def GenerateElement(self,*args):
"""
GenerateElement(self: DataGridColumn,cell: DataGridCell,dataItem: object) -> FrameworkElement
When overridden in a derived class,gets a read-only element that is bound to the
System.Windows.Controls.DataGridBoundColumn.Binding property value of the column.
cell: The cell that will contain the generated element.
dataItem: The data item that is represented by the row that contains the intended cell.
Returns: A new read-only element that is bound to the System.Windows.Controls.DataGridBoundColumn.Binding
property value of the column.
"""
pass
def GetCellContent(self,*__args):
"""
GetCellContent(self: DataGridColumn,dataGridRow: DataGridRow) -> FrameworkElement
Retrieves the System.Windows.Controls.ContentControl.Content property value for the cell at the
intersection of this column and the specified row.
dataGridRow: The row that contains the intended cell.
Returns: The cell content; or null,if the cell is not found.
GetCellContent(self: DataGridColumn,dataItem: object) -> FrameworkElement
Gets the System.Windows.Controls.ContentControl.Content property value for the cell at the
intersection of this column and the row that represents the specified data item.
dataItem: The data item that is represented by the row that contains the intended cell.
Returns: The cell content; or null,if the cell is not found.
"""
pass
def NotifyPropertyChanged(self,*args):
"""
NotifyPropertyChanged(self: DataGridColumn,propertyName: str)
Notifies the System.Windows.Controls.DataGrid that contains this column that a column property
has changed.
propertyName: The name of the column property that changed.
"""
pass
def OnCoerceIsReadOnly(self,*args):
"""
OnCoerceIsReadOnly(self: DataGridColumn,baseValue: bool) -> bool
Determines the value of the System.Windows.Controls.DataGridColumn.IsReadOnly property based on
the property rules of the System.Windows.Controls.DataGrid that contains this column.
baseValue: The value that was passed to the delegate.
Returns: true if cells in the column cannot be edited based on rules from the
System.Windows.Controls.DataGrid; otherwise,false.
"""
pass
def OnCopyingCellClipboardContent(self,item):
"""
OnCopyingCellClipboardContent(self: DataGridColumn,item: object) -> object
Raises the System.Windows.Controls.DataGridColumn.CopyingCellClipboardContent event.
item: The data context for the selected element.
Returns: An object that represents the content of the cell.
"""
pass
def OnPastingCellClipboardContent(self,item,cellContent):
"""
OnPastingCellClipboardContent(self: DataGridColumn,item: object,cellContent: object)
Raises the System.Windows.Controls.DataGridColumn.PastingCellClipboardContent event.
item: The data context for the selected element.
cellContent: The content to paste into the cell.
"""
pass
def PrepareCellForEdit(self,*args):
"""
PrepareCellForEdit(self: DataGridColumn,editingElement: FrameworkElement,editingEventArgs: RoutedEventArgs) -> object
When overridden in a derived class,sets cell content as needed for editing.
editingElement: The element that the column displays for a cell in editing mode.
editingEventArgs: Information about the user gesture that is causing a cell to enter editing mode.
Returns: When returned by a derived class,the unedited cell value. This implementation returns null in
all cases.
"""
pass
def RefreshCellContent(self,*args):
"""
RefreshCellContent(self: DataGridColumn,element: FrameworkElement,propertyName: str)
When overridden in a derived class,updates the contents of a cell in the column in response to
a column property value that changed.
element: The cell to update.
propertyName: The name of the column property that changed.
"""
pass
ActualWidth=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the current width of the column,in device-independent units (1/96th inch per unit).
Get: ActualWidth(self: DataGridColumn) -> float
"""
CanUserReorder=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates whether the user can change the column display position by dragging the column header.
Get: CanUserReorder(self: DataGridColumn) -> bool
Set: CanUserReorder(self: DataGridColumn)=value
"""
CanUserResize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates whether the user can adjust the column width by using the mouse.
Get: CanUserResize(self: DataGridColumn) -> bool
Set: CanUserResize(self: DataGridColumn)=value
"""
CanUserSort=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates whether the user can sort the column by clicking the column header.
Get: CanUserSort(self: DataGridColumn) -> bool
Set: CanUserSort(self: DataGridColumn)=value
"""
CellStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the style that is used to render cells in the column.
Get: CellStyle(self: DataGridColumn) -> Style
Set: CellStyle(self: DataGridColumn)=value
"""
ClipboardContentBinding=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the binding object to use when getting or setting cell content for the clipboard.
Get: ClipboardContentBinding(self: DataGridColumn) -> BindingBase
Set: ClipboardContentBinding(self: DataGridColumn)=value
"""
DataGridOwner=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Windows.Controls.DataGrid control that contains this column.
"""
DisplayIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the display position of the column relative to the other columns in the System.Windows.Controls.DataGrid.
Get: DisplayIndex(self: DataGridColumn) -> int
Set: DisplayIndex(self: DataGridColumn)=value
"""
DragIndicatorStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the style object to apply to the column header during a drag operation.
Get: DragIndicatorStyle(self: DataGridColumn) -> Style
Set: DragIndicatorStyle(self: DataGridColumn)=value
"""
Header=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the content of the column header.
Get: Header(self: DataGridColumn) -> object
Set: Header(self: DataGridColumn)=value
"""
HeaderStringFormat=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the format pattern to apply to the content of the column header.
Get: HeaderStringFormat(self: DataGridColumn) -> str
Set: HeaderStringFormat(self: DataGridColumn)=value
"""
HeaderStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the style that is used when rendering the column header.
Get: HeaderStyle(self: DataGridColumn) -> Style
Set: HeaderStyle(self: DataGridColumn)=value
"""
HeaderTemplate=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the template that defines the visual representation of the column header.
Get: HeaderTemplate(self: DataGridColumn) -> DataTemplate
Set: HeaderTemplate(self: DataGridColumn)=value
"""
HeaderTemplateSelector=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the object that selects which template to use for the column header.
Get: HeaderTemplateSelector(self: DataGridColumn) -> DataTemplateSelector
Set: HeaderTemplateSelector(self: DataGridColumn)=value
"""
IsAutoGenerated=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the column is auto-generated.
Get: IsAutoGenerated(self: DataGridColumn) -> bool
"""
IsFrozen=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the column is prevented from scrolling horizontally.
Get: IsFrozen(self: DataGridColumn) -> bool
"""
IsReadOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates whether cells in the column can be edited.
Get: IsReadOnly(self: DataGridColumn) -> bool
Set: IsReadOnly(self: DataGridColumn)=value
"""
MaxWidth=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the maximum width constraint of the column.
Get: MaxWidth(self: DataGridColumn) -> float
Set: MaxWidth(self: DataGridColumn)=value
"""
MinWidth=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the minimum width constraint of the column.
Get: MinWidth(self: DataGridColumn) -> float
Set: MinWidth(self: DataGridColumn)=value
"""
SortDirection=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the sort direction (ascending or descending) of the column.
Get: SortDirection(self: DataGridColumn) -> Nullable[ListSortDirection]
Set: SortDirection(self: DataGridColumn)=value
"""
SortMemberPath=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a property name,or a period-delimited hierarchy of property names,that indicates the member to sort by.
Get: SortMemberPath(self: DataGridColumn) -> str
Set: SortMemberPath(self: DataGridColumn)=value
"""
Visibility=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the visibility of the column.
Get: Visibility(self: DataGridColumn) -> Visibility
Set: Visibility(self: DataGridColumn)=value
"""
Width=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the column width or automatic sizing mode.
Get: Width(self: DataGridColumn) -> DataGridLength
Set: Width(self: DataGridColumn)=value
"""
ActualWidthProperty=None
CanUserReorderProperty=None
CanUserResizeProperty=None
CanUserSortProperty=None
CellStyleProperty=None
CopyingCellClipboardContent=None
DisplayIndexProperty=None
DragIndicatorStyleProperty=None
HeaderProperty=None
HeaderStringFormatProperty=None
HeaderStyleProperty=None
HeaderTemplateProperty=None
HeaderTemplateSelectorProperty=None
IsAutoGeneratedProperty=None
IsFrozenProperty=None
IsReadOnlyProperty=None
MaxWidthProperty=None
MinWidthProperty=None
PastingCellClipboardContent=None
SortDirectionProperty=None
SortMemberPathProperty=None
VisibilityProperty=None
WidthProperty=None
|
11462770
|
import tensorflow as tf
import numpy as np
def fc_op(input_op, name, n_out, layer_collector, act_func=tf.nn.leaky_relu):
n_in = input_op.get_shape()[-1].value
with tf.name_scope(name) as scope:
kernel = tf.Variable(tf.contrib.layers.xavier_initializer()([n_in, n_out]), dtype=tf.float32, name=scope + "w")
# kernel = tf.Variable(tf.random_normal([n_in, n_out]))
biases = tf.Variable(tf.constant(0, shape=[1, n_out], dtype=tf.float32), name=scope + 'b')
fc = tf.add(tf.matmul(input_op, kernel), biases)
activation = act_func(fc, name=scope + 'act')
layer_collector.append([kernel, biases])
return activation
class SDNE(object):
def __init__(self, graph, encoder_layer_list, alpha=1e-6, beta=5., nu1=1e-5, nu2=1e-4,
batch_size=200, epoch=100, learning_rate=None):
"""
encoder_layer_list: a list of numbers of the neuron at each ecdoer layer, the last number is the
dimension of the output node representation
Eg:
if node size is 2000, encoder_layer_list=[1000, 128], then the whole neural network would be
2000(input)->1000->128->1000->2000, SDNE extract the middle layer as the node representation
"""
self.g = graph
self.node_size = self.g.G.number_of_nodes()
self.dim = encoder_layer_list[-1]
self.encoder_layer_list = [self.node_size]
self.encoder_layer_list.extend(encoder_layer_list)
self.encoder_layer_num = len(encoder_layer_list)+1
self.alpha = alpha
self.beta = beta
self.nu1 = nu1
self.nu2 = nu2
self.bs = batch_size
self.epoch = epoch
self.max_iter = (epoch * self.node_size) // batch_size
self.lr = learning_rate
if self.lr is None:
self.lr = tf.train.inverse_time_decay(0.03, self.max_iter, decay_steps=1, decay_rate=0.9999)
self.sess = tf.Session()
self.vectors = {}
self.adj_mat = self.getAdj()
self.embeddings = self.train()
look_back = self.g.look_back_list
for i, embedding in enumerate(self.embeddings):
self.vectors[look_back[i]] = embedding
def getAdj(self):
node_size = self.g.node_size
look_up = self.g.look_up_dict
adj = np.zeros((node_size, node_size))
for edge in self.g.G.edges():
adj[look_up[edge[0]]][look_up[edge[1]]] = self.g.G[edge[0]][edge[1]]['weight']
return adj
def train(self):
adj_mat = self.adj_mat
AdjBatch = tf.placeholder(tf.float32, [None, self.node_size], name='adj_batch')
Adj = tf.placeholder(tf.float32, [None, None], name='adj_mat')
B = tf.placeholder(tf.float32, [None, self.node_size], name='b_mat')
fc = AdjBatch
scope_name = 'encoder'
layer_collector = []
with tf.name_scope(scope_name):
for i in range(1, self.encoder_layer_num):
fc = fc_op(fc,
name=scope_name+str(i),
n_out=self.encoder_layer_list[i],
layer_collector=layer_collector)
_embeddings = fc
scope_name = 'decoder'
with tf.name_scope(scope_name):
for i in range(self.encoder_layer_num-2, 0, -1):
fc = fc_op(fc,
name=scope_name+str(i),
n_out=self.encoder_layer_list[i],
layer_collector=layer_collector)
fc = fc_op(fc,
name=scope_name+str(0),
n_out=self.encoder_layer_list[0],
layer_collector=layer_collector,)
_embeddings_norm = tf.reduce_sum(tf.square(_embeddings), 1, keepdims=True)
L_1st = tf.reduce_sum(
Adj * (
_embeddings_norm - 2 * tf.matmul(
_embeddings, tf.transpose(_embeddings)
) + tf.transpose(_embeddings_norm)
)
)
L_2nd = tf.reduce_sum(tf.square((AdjBatch - fc) * B))
L = L_2nd + self.alpha * L_1st
for param in layer_collector:
L += self.nu1 * tf.reduce_sum(tf.abs(param[0])) + self.nu2 * tf.reduce_sum(tf.square(param[0]))
optimizer = tf.train.AdamOptimizer(self.lr)
train_op = optimizer.minimize(L)
init = tf.global_variables_initializer()
self.sess.run(init)
print("total iter: %i" % self.max_iter)
for step in range(self.max_iter):
index = np.random.randint(self.node_size, size=self.bs)
adj_batch_train = adj_mat[index, :]
adj_mat_train = adj_batch_train[:, index]
b_mat_train = np.ones_like(adj_batch_train)
b_mat_train[adj_batch_train != 0] = self.beta
self.sess.run(train_op, feed_dict={AdjBatch: adj_batch_train,
Adj: adj_mat_train,
B: b_mat_train})
if step % 50 == 0:
l, l1, l2 = self.sess.run((L, L_1st, L_2nd),
feed_dict={AdjBatch: adj_batch_train,
Adj: adj_mat_train,
B: b_mat_train})
print("step %i: total loss: %s, l1 loss: %s, l2 loss: %s" % (step, l, l1, l2))
return self.sess.run(_embeddings, feed_dict={AdjBatch: adj_mat})
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors)
fout.write("{} {}\n".format(node_num, self.dim))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node, ' '.join([str(x) for x in vec])))
fout.close()
class SDNE2(object):
def __init__(self, graph, encoder_layer_list, alpha=1e-6, beta=5., nu1=1e-5, nu2=1e-5,
batch_size=100, max_iter=2000, learning_rate=None):
self.g = graph
self.node_size = self.g.G.number_of_nodes()
self.rep_size = encoder_layer_list[-1]
self.encoder_layer_list = [self.node_size] + encoder_layer_list
self.encoder_layer_num = len(encoder_layer_list)+1
self.alpha = alpha
self.beta = beta
self.nu1 = nu1
self.nu2 = nu2
self.bs = batch_size
self.max_iter = max_iter
self.lr = learning_rate
if self.lr is None:
self.lr = tf.train.inverse_time_decay(0.1, self.max_iter, decay_steps=1, decay_rate=0.9999)
self.sess = tf.Session()
self.vectors = {}
self.adj_mat = self.getAdj()
self.deg_vec = np.sum(self.adj_mat, axis=1)
self.embeddings = self.get_train()
look_back = self.g.look_back_list
for i, embedding in enumerate(self.embeddings):
self.vectors[look_back[i]] = embedding
def getAdj(self):
node_size = self.g.node_size
look_up = self.g.look_up_dict
adj = np.zeros((node_size, node_size))
for edge in self.g.G.edges():
adj[look_up[edge[0]]][look_up[edge[1]]] = self.g.G[edge[0]][edge[1]]['weight']
return adj
def model(self, node, layer_collector, scope_name):
fc = node
with tf.name_scope(scope_name + 'encoder'):
for i in range(1, self.encoder_layer_num):
fc = fc_op(fc,
name=scope_name+str(i),
n_out=self.encoder_layer_list[i],
layer_collector=layer_collector)
_embeddings = fc
with tf.name_scope(scope_name + 'decoder'):
for i in range(self.encoder_layer_num-2, -1, -1):
fc = fc_op(fc,
name=scope_name+str(i),
n_out=self.encoder_layer_list[i],
layer_collector=layer_collector)
return _embeddings, fc
def generate_batch(self, shuffle=True):
adj = self.adj_mat
row_indices, col_indices = adj.nonzero()
sample_index = np.arange(row_indices.shape[0])
num_of_batches = row_indices.shape[0] // self.bs
counter = 0
if shuffle:
np.random.shuffle(sample_index)
while True:
batch_index = sample_index[self.bs * counter:self.bs * (counter + 1)]
nodes_a = adj[row_indices[batch_index], :]
nodes_b = adj[col_indices[batch_index], :]
weights = adj[row_indices[batch_index], col_indices[batch_index]]
weights = np.reshape(weights, [-1, 1])
beta_mask_a = np.ones_like(nodes_a)
beta_mask_a[nodes_a != 0] = self.beta
beta_mask_b = np.ones_like(nodes_b)
beta_mask_b[nodes_b != 0] = self.beta
if counter == num_of_batches:
counter = 0
np.random.shuffle(sample_index)
else:
counter += 1
yield (nodes_a, nodes_b, beta_mask_a, beta_mask_b, weights)
def get_train(self):
NodeA = tf.placeholder(tf.float32, [None, self.node_size], name='node_a')
BmaskA = tf.placeholder(tf.float32, [None, self.node_size], name='beta_mask_a')
NodeB = tf.placeholder(tf.float32, [None, self.node_size], name='node_b')
BmaskB = tf.placeholder(tf.float32, [None, self.node_size], name='beta_mask_b')
Weights = tf.placeholder(tf.float32, [None, 1], name='adj_weights')
layer_collector = []
nodes = tf.concat([NodeA, NodeB], axis=0)
bmasks = tf.concat([BmaskA, BmaskB], axis=0)
emb, recons = self.model(nodes, layer_collector, 'reconstructor')
embs = tf.split(emb, num_or_size_splits=2, axis=0)
L_1st = tf.reduce_sum(Weights * (tf.reduce_sum(tf.square(embs[0] - embs[1]), axis=1)))
L_2nd = tf.reduce_sum(tf.square((nodes - recons) * bmasks))
L = L_2nd + self.alpha * L_1st
for param in layer_collector:
L += self.nu1 * tf.reduce_sum(tf.abs(param[0])) + self.nu2 * tf.reduce_sum(tf.square(param[0]))
# lr = tf.train.exponential_decay(1e-6, self.max_iter, decay_steps=1, decay_rate=0.9999)
# optimizer = tf.train.MomentumOptimizer(lr, 0.99, use_nesterov=True)
optimizer = tf.train.AdamOptimizer(self.lr)
train_op = optimizer.minimize(L)
init = tf.global_variables_initializer()
self.sess.run(init)
generator = self.generate_batch()
for step in range(self.max_iter+1):
nodes_a, nodes_b, beta_mask_a, beta_mask_b, weights = generator.__next__()
feed_dict = {NodeA: nodes_a,
NodeB: nodes_b,
BmaskA: beta_mask_a,
BmaskB: beta_mask_b,
Weights: weights}
self.sess.run(train_op, feed_dict=feed_dict)
if step % 50 == 0:
print("step %i: %s" % (step, self.sess.run([L, L_1st, L_2nd], feed_dict=feed_dict)))
return self.sess.run(emb, feed_dict={NodeA: self.adj_mat[0:1, :], NodeB: self.adj_mat[1:, :]})
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors)
fout.write("{} {}\n".format(node_num, self.rep_size))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node, ' '.join([str(x) for x in vec])))
fout.close()
|
11462799
|
import RPi.GPIO as GPIO
import time
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(17,GPIO.OUT)
servo=GPIO.PWM(17,50)
servo.start(0)
time.sleep(1)
duty =2
while duty<=12:
servo.ChangeDutyCycle(duty)
time.sleep(1)
duty=duty+1
time.sleep(2)
while duty>=0:
servo.ChangeDutyCycle(duty)
time.sleep(0.05)
duty=duty-1
servo.stop()
GPIO.cleanup()
print('done')
|
11462805
|
import inspect
class Player(object):
"""A class for a player in the tournament.
This is an abstract base class, not intended to be used directly.
"""
name = "Player"
def __init__(self):
"""Initiates an empty history and 0 score for a player."""
self.history = []
self.stochastic = "random" in inspect.getsource(self.__class__)
if self.name == "Player":
self.stochastic = False
def __repr__(self):
"""The string method for the strategy."""
return self.name
def strategy(self, opponent):
"""This is a placeholder strategy."""
return None
def play(self, opponent):
"""This pits two players against each other."""
s1, s2 = self.strategy(opponent), opponent.strategy(self)
self.history.append(s1)
opponent.history.append(s2)
def reset(self):
"""Resets history.
When creating strategies that create new attributes then this method should be
re-written (in the inherited class) and should not only reset history but also
rest all other attributes.
"""
self.history = []
|
11462874
|
def _get_default_options():
"""
Returns a dictionary with the available compiler options and the default values
Returns:
default_options (Dict)
"""
return {
'library_folders': [],
'verbose': False,
'check_balanced': True,
'mtime_check': True,
'cache': False,
'codegen': False,
'expand_mx': False,
'unroll_loops': True,
'inline_functions': True,
'expand_vectors': False,
'resolve_parameter_values': False,
'replace_parameter_expressions': False,
'replace_constant_expressions': False,
'eliminate_constant_assignments': False,
'replace_parameter_values': False,
'replace_constant_values': False,
'eliminable_variable_expression': None,
'factor_and_simplify_equations': False,
'detect_aliases': False,
'allow_derivative_aliases': True,
'reduce_affine_expression': False,
}
def _merge_default_options(options):
if options is None:
return _get_default_options()
elif isinstance(options, dict):
default_options = _get_default_options()
default_options.update(options)
return default_options
else:
raise TypeError('options must be of type dict')
|
11462929
|
import os
from importlib import import_module
from uploader import AbstractUploader
class Uploader:
@staticmethod
def get(server: str, path: str) -> AbstractUploader:
return import_module(f'uploader.{server}').Uploader(path)
@staticmethod
def server() -> list:
return tuple(os.path.splitext(x)[0] for x in os.listdir(os.path.dirname(__file__) + '/uploader') if os.path.splitext(x)[1].lower() == '.py' and x != '__init__.py')
|
11462955
|
import tsensor
import numpy as np
def f():
# Currently can't handle double assign
a = b = np.ones(1) @ np.ones(2)
def A():
with tsensor.clarify():
f()
def test_nested():
msg = ""
try:
A()
except BaseException as e:
msg = e.args[0]
expected = "matmul: Input operand 1 has a mismatch in its core dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?) (size 2 is different from 1)"
assert msg==expected
|
11462981
|
import numpy as np
import pdb
class Resampling:
"""
References: Thrun, Sebastian, <NAME>, and <NAME>. Probabilistic robotics. MIT press, 2005.
[Chapter 4.3]
"""
def __init__(self):
"""
TODO : Initialize resampling process parameters here
"""
def multinomial_sampler(self, X_bar):
"""
param[in] X_bar : [num_particles x 4] sized array containing [x, y, theta, wt] values for all particles
param[out] X_bar_resampled : [num_particles x 4] sized array containing [x, y, theta, wt] values for resampled set of particles
"""
"""
TODO : Add your code here
"""
return X_bar_resampled
def low_variance_sampler(self, X_bar):
"""
param[in] X_bar : [num_particles x 4] sized array containing [x, y, theta, wt] values for all particles
param[out] X_bar_resampled : [num_particles x 4] sized array containing [x, y, theta, wt] values for resampled set of particles
"""
"""
TODO : Add your code here
"""
return X_bar_resampled
if __name__ == "__main__":
pass
|
11462986
|
r"""
Diametrically point loaded 2-D disk. See :ref:`sec-primer`.
Find :math:`\ul{u}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
= 0
\;, \quad \forall \ul{v} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""
from __future__ import absolute_import
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.discrete.fem.utils import refine_mesh
from sfepy import data_dir
# Fix the mesh file name if you run this file outside the SfePy directory.
filename_mesh = data_dir + '/meshes/2d/its2D.mesh'
refinement_level = 0
filename_mesh = refine_mesh(filename_mesh, refinement_level)
output_dir = '.' # set this to a valid directory you have write access to
young = 2000.0 # Young's modulus [MPa]
poisson = 0.4 # Poisson's ratio
options = {
'output_dir' : output_dir,
}
regions = {
'Omega' : 'all',
'Left' : ('vertices in (x < 0.001)', 'facet'),
'Bottom' : ('vertices in (y < 0.001)', 'facet'),
'Top' : ('vertex 2', 'vertex'),
}
materials = {
'Asphalt' : ({'D': stiffness_from_youngpoisson(2, young, poisson)},),
'Load' : ({'.val' : [0.0, -1000.0]},),
}
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
}
equations = {
'balance_of_forces' :
"""dw_lin_elastic.2.Omega(Asphalt.D, v, u)
= dw_point_load.0.Top(Load.val, v)""",
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
ebcs = {
'XSym' : ('Bottom', {'u.1' : 0.0}),
'YSym' : ('Left', {'u.0' : 0.0}),
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-6,
}),
}
|
11463005
|
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print(tag)
for li in attrs:
print('->', li[0], '>', li[1])
if __name__ == '__main__':
n = int(input())
s = ''
for _ in range(n):
t = input()
s += t
obj = MyHTMLParser()
obj.feed(s)
|
11463011
|
import sys
import sqlite
# The shared connection object
cx = None
def getCon():
# All code gets the connection object via this function
global cx
return cx
def createSchema():
# Create the schema and make sure we're not accessing an old, incompatible schema
cu = getCon().cursor()
cu.execute("select tbl_name from sqlite_master where type='table' order by tbl_name")
tables = []
for row in cu.fetchall():
tables.append(row.tbl_name)
if tables != ["customer", "orders"]:
if tables == []:
# ok, database is empty
cu.execute("""
create table customer (
cust_id integer primary key,
cust_firstname text not null,
cust_lastname text not null,
cust_no text not null
)
""")
cu.execute("""
create table orders (
ord_id integer primary key,
ord_customer int,
ord_item text not null,
ord_quantity integer
)
""")
getCon().commit()
else:
print "We have an unknown schema here. Please fix manually."
sys.exit(1)
def createCustomer(firstname, lastname, customerNo):
# Create a new customer and return the primary key id.
cu = getCon().cursor()
cu.execute("""
insert into customer(cust_firstname, cust_lastname, cust_no)
values (%s, %s, %s)
""", (firstname, lastname, customerNo))
getCon().commit()
return cu.lastrowid
def createOrder(cust_id, ord_item, ord_quantity):
# Create a new order for the customer identified by cust_id and return the
# primary key of the created order row.
cu = getCon().cursor()
cu.execute("""
insert into orders (ord_customer, ord_item, ord_quantity)
values (%s, %s, %s)
""", (cust_id, ord_item, ord_quantity))
getCon().commit()
return cu.lastrowid
def deleteOrder(ord_id):
# Delete an order.
cu = getCon().cursor()
cu.execute("delete from order where ord_id=%s", (ord_id,))
getCon().commit()
def deleteCustomer(cust_id):
# Delete the customer identified by cust_id and all its orders (recursive
# delete).
# So now, finally, here we have an example where you *really* need
# transactions. We either want this to happen all or not at all. So all of
# these SQL statements need to be atomic, i. e. we need a transaction here.
# This will send the BEGIN to SQLite, as soon as the first non-SELECT is
# sent.
cu = getCon().cursor()
# So, before the next 'delete' statement, a 'BEGIN' is sent
cu.execute("delete from orders where ord_customer=%s", (cust_id,))
cu.execute("delete from customer where cust_id=%s", (cust_id,))
# This will send the "COMMIT" statement to the library.
getCon().commit()
def main():
global cx
cx = sqlite.connect("customerdb")
createSchema()
# Create a customer
cust_id = createCustomer("Jane", "Doe", "JD0001")
# Create two orders for the customer
ord_id = createOrder(cust_id, "White Towel", 2)
ord_id = createOrder(cust_id, "Blue Cup", 5)
# Delete the customer, and all her orders.
deleteCustomer(cust_id)
cx.close()
if __name__ == "__main__":
main()
|
11463014
|
from typing import Optional
from platypush.message.event import Event
from platypush.plugins.mail import Mail
class MailEvent(Event):
def __init__(self, mailbox: str, message: Optional[Mail] = None, *args, **kwargs):
super().__init__(*args, mailbox=mailbox, message=message or {}, **kwargs)
class MailReceivedEvent(MailEvent):
"""
Triggered when a new email is received.
"""
pass
class MailSeenEvent(MailEvent):
"""
Triggered when a previously unseen email is seen.
"""
pass
class MailFlaggedEvent(MailEvent):
"""
Triggered when a message is marked as flagged/starred.
"""
pass
class MailUnflaggedEvent(MailEvent):
"""
Triggered when a message previously marked as flagged/starred is unflagged.
"""
pass
# vim:sw=4:ts=4:et:
|
11463039
|
import numpy as np
from gym_flock.envs.flocking.flocking_relative import FlockingRelativeEnv
from gym_flock.envs.flocking.utils import grid
class FlockingTwoFlocksEnv(FlockingRelativeEnv):
def reset(self):
self.x = np.zeros((self.n_agents, self.nx_system))
# grids, vels = twoflocks(self.n_agents, delta=self.n_agents/10*0.8+0.25, side=5)
# self.x[:, 0:2] = grids
# self.x[:, 2:4] = vels * 0.25
# self.x[:, 2] = np.random.uniform(low=-self.v_max*0.25, high=self.v_max*0.25, size=(self.n_agents,))
bias = np.random.uniform(low=-self.v_bias/2.0, high=self.v_bias/2.0, size=(2,))
scale = 0.1
grids = grid(self.n_agents, side=int(self.n_agents/10))
self.x[:, 0:2] = grids
self.x[:, 2:4] = -grids
self.x[:, 2] = self.x[:, 2] + bias[0]
self.x[:, 3] = self.x[:, 3] + bias[1]
self.mean_vel = np.mean(self.x[:, 2:4], axis=0)
self.init_vel = self.x[:, 2:4]
self.compute_helpers()
return (self.state_values, self.state_network)
|
11463043
|
import numpy as np
import networkx as nx
import cPickle as cp
import random
import ctypes
import os
import sys
from tqdm import tqdm
sys.path.append( '%s/tsp2d_lib' % os.path.dirname(os.path.realpath(__file__)) )
from tsp2d_lib import Tsp2dLib
n_valid = 100
def find_model_file(opt):
max_n = int(opt['max_n'])
min_n = int(opt['min_n'])
log_file = None
if max_n < 100:
return None
if min_n == 100 and max_n == 200:
n1 = 50
n2 = 100
else:
n1 = min_n - 100
n2 = max_n - 100
log_file = '%s/log-%d-%d.txt' % (opt['save_dir'], n1, n2)
if not os.path.isfile(log_file):
return None
best_r = 1000000
best_it = -1
with open(log_file, 'r') as f:
for line in f:
if 'average' in line:
line = line.split(' ')
it = int(line[1].strip())
r = float(line[-1].strip())
if r < best_r:
best_r = r
best_it = it
if best_it < 0:
return None
return '%s/nrange_%d_%d_iter_%d.model' % (opt['save_dir'], n1, n2, best_it)
def PrepareGraphs(isValid):
if isValid:
n_graphs = 100
prefix = 'validation_tsp2d'
else:
n_graphs = 10000
prefix = 'train_tsp2d'
folder = '%s/%s/tsp_min-n=%s_max-n=%s_num-graph=%d_type=%s' % (opt['data_root'], prefix, opt['min_n'], opt['max_n'], n_graphs, opt['g_type'])
with open('%s/paths.txt' % folder, 'r') as f:
for line in tqdm(f):
fname = '%s/%s' % (folder, line.split('/')[-1].strip())
coors = {}
in_sec = False
n_nodes = -1
with open(fname, 'r') as f_tsp:
for l in f_tsp:
if 'DIMENSION' in l:
n_nodes = int(l.split(' ')[-1].strip())
if in_sec:
idx, x, y = [int(w.strip()) for w in l.split(' ')]
coors[idx - 1] = [float(x) / 1000000.0, float(y) / 1000000.0]
assert len(coors) == idx
elif 'NODE_COORD_SECTION' in l:
in_sec = True
assert len(coors) == n_nodes
g = nx.Graph()
g.add_nodes_from(range(n_nodes))
nx.set_node_attributes(g, 'pos', coors)
api.InsertGraph(g, is_test=isValid)
if __name__ == '__main__':
api = Tsp2dLib(sys.argv)
opt = {}
for i in range(1, len(sys.argv), 2):
opt[sys.argv[i][1:]] = sys.argv[i + 1]
model_file = find_model_file(opt)
if model_file is not None:
print 'loading', model_file
sys.stdout.flush()
api.LoadModel(model_file)
PrepareGraphs(isValid=True)
PrepareGraphs(isValid=False)
# startup
for i in range(10):
api.lib.PlayGame(100, ctypes.c_double(1.0))
api.TakeSnapshot()
eps_start = 1.0
eps_end = 1.0
eps_step = 10000.0
api.lib.SetSign(1)
lr = float(opt['learning_rate'])
for iter in range(int(opt['max_iter'])):
eps = eps_end + max(0., (eps_start - eps_end) * (eps_step - iter) / eps_step)
if iter % 10 == 0:
api.lib.PlayGame(10, ctypes.c_double(eps))
if iter % 100 == 0:
frac = 0.0
for idx in range(n_valid):
frac += api.lib.Test(idx)
print 'iter', iter, 'lr', lr, 'eps', eps, 'average tour length: ', frac / n_valid
sys.stdout.flush()
model_path = '%s/nrange_%d_%d_iter_%d.model' % (opt['save_dir'], int(opt['min_n']), int(opt['max_n']), iter)
api.SaveModel(model_path)
if iter % 1000 == 0:
api.TakeSnapshot()
lr = lr * 0.95
api.lib.Fit(ctypes.c_double(lr))
|
11463108
|
import numpy as np
a = np.array([0, 0, 30, 10, 10, 20])
print(a)
# [ 0 0 30 10 10 20]
print(np.unique(a))
# [ 0 10 20 30]
print(type(np.unique(a)))
# <class 'numpy.ndarray'>
l = [0, 0, 30, 10, 10, 20]
print(l)
# [0, 0, 30, 10, 10, 20]
print(np.unique(l))
# [ 0 10 20 30]
print(type(np.unique(l)))
# <class 'numpy.ndarray'>
print(np.unique(a).size)
# 4
print(len(np.unique(a)))
# 4
u, counts = np.unique(a, return_counts=True)
print(u)
# [ 0 10 20 30]
print(counts)
# [2 2 1 1]
print(u[counts == 1])
# [20 30]
print(u[counts != 1])
# [ 0 10]
print(np.unique(a, return_counts=True))
# (array([ 0, 10, 20, 30]), array([2, 2, 1, 1]))
print(type(np.unique(a, return_counts=True)))
# <class 'tuple'>
u, indices = np.unique(a, return_index=True)
print(u)
# [ 0 10 20 30]
print(indices)
# [0 3 5 2]
print(a)
# [ 0 0 30 10 10 20]
print(a[indices])
# [ 0 10 20 30]
u, inverse = np.unique(a, return_inverse=True)
print(u)
# [ 0 10 20 30]
print(inverse)
# [0 0 3 1 1 2]
print(a)
# [ 0 0 30 10 10 20]
print(u[inverse])
# [ 0 0 30 10 10 20]
u, indices, inverse, counts = np.unique(a, return_index=True, return_inverse=True, return_counts=True)
print(u)
# [ 0 10 20 30]
print(indices)
# [0 3 5 2]
print(inverse)
# [0 0 3 1 1 2]
print(counts)
# [2 2 1 1]
print(np.unique(a, return_counts=True, return_index=True, return_inverse=True))
# (array([ 0, 10, 20, 30]), array([0, 3, 5, 2]), array([0, 0, 3, 1, 1, 2]), array([2, 2, 1, 1]))
a_2d = np.array([[20, 20, 10, 10], [0, 0, 10, 30], [20, 20, 10, 10]])
print(a_2d)
# [[20 20 10 10]
# [ 0 0 10 30]
# [20 20 10 10]]
print(np.unique(a_2d))
# [ 0 10 20 30]
print(np.unique(a_2d, axis=0))
# [[ 0 0 10 30]
# [20 20 10 10]]
print(np.unique(a_2d, axis=1))
# [[10 10 20]
# [10 30 0]
# [10 10 20]]
print(a_2d[0])
# [20 20 10 10]
print(np.unique(a_2d[0]))
# [10 20]
print(a_2d[:, 2])
# [10 10 10]
print(np.unique(a_2d[:, 2]))
# [10]
print([np.unique(row) for row in a_2d])
# [array([10, 20]), array([ 0, 10, 30]), array([10, 20])]
print([np.unique(row).tolist() for row in a_2d])
# [[10, 20], [0, 10, 30], [10, 20]]
print([np.unique(row).size for row in a_2d])
# [2, 3, 2]
print(a_2d.T)
# [[20 0 20]
# [20 0 20]
# [10 10 10]
# [10 30 10]]
print([np.unique(row) for row in a_2d.T])
# [array([ 0, 20]), array([ 0, 20]), array([10]), array([10, 30])]
print(a_2d.shape)
# (3, 4)
print([np.unique(a_2d[:, i]) for i in range(a_2d.shape[1])])
# [array([ 0, 20]), array([ 0, 20]), array([10]), array([10, 30])]
u, indices, inverse, counts = np.unique(a_2d, return_index=True, return_inverse=True, return_counts=True)
print(u)
# [ 0 10 20 30]
print(indices)
# [4 2 0 7]
print(a_2d.flatten())
# [20 20 10 10 0 0 10 30 20 20 10 10]
print(a_2d.flatten()[indices])
# [ 0 10 20 30]
print(inverse)
# [2 2 1 1 0 0 1 3 2 2 1 1]
print(u[inverse])
# [20 20 10 10 0 0 10 30 20 20 10 10]
print(u[inverse].reshape(a_2d.shape))
# [[20 20 10 10]
# [ 0 0 10 30]
# [20 20 10 10]]
print(counts)
# [2 5 4 1]
u, indices, inverse, counts = np.unique(a_2d, axis=0, return_index=True, return_inverse=True, return_counts=True)
print(u)
# [[ 0 0 10 30]
# [20 20 10 10]]
print(indices)
# [1 0]
print(a_2d[indices])
# [[ 0 0 10 30]
# [20 20 10 10]]
print(inverse)
# [1 0 1]
print(u[inverse])
# [[20 20 10 10]
# [ 0 0 10 30]
# [20 20 10 10]]
print(counts)
# [1 2]
print(a_2d)
# [[20 20 10 10]
# [ 0 0 10 30]
# [20 20 10 10]]
u, indices = np.unique(a_2d, return_index=True)
print(u)
# [ 0 10 20 30]
print(a_2d.flatten())
# [20 20 10 10 0 0 10 30 20 20 10 10]
print(indices)
# [4 2 0 7]
print(list(zip(*np.where(a_2d == 0))))
# [(1, 0), (1, 1)]
d = {u: list(zip(*np.where(a_2d == u))) for u in np.unique(a_2d)}
print(d)
# {0: [(1, 0), (1, 1)], 10: [(0, 2), (0, 3), (1, 2), (2, 2), (2, 3)], 20: [(0, 0), (0, 1), (2, 0), (2, 1)], 30: [(1, 3)]}
print(d[0])
# [(1, 0), (1, 1)]
print(d[10])
# [(0, 2), (0, 3), (1, 2), (2, 2), (2, 3)]
print(d[20])
# [(0, 0), (0, 1), (2, 0), (2, 1)]
print(d[30])
# [(1, 3)]
d = {u: list(zip(*np.where(a_2d == u)))
for u, c in zip(*np.unique(a_2d, return_counts=True)) if c == 1}
print(d)
# {30: [(1, 3)]}
d = {u: list(zip(*np.where(a_2d == u)))
for u, c in zip(*np.unique(a_2d, return_counts=True)) if c <= 2}
print(d)
# {0: [(1, 0), (1, 1)], 30: [(1, 3)]}
|
11463167
|
from unittest.mock import patch
from cicada2.engine import runners
def test_config_to_runner_env():
config = {"foo": "bar", "fizz": "buzz"}
env_config = runners.config_to_runner_env(config)
assert env_config == {"RUNNER_FOO": "bar", "RUNNER_FIZZ": "buzz"}
@patch("cicada2.engine.runners.runner_healthcheck")
def test_runner_healthcheck_success(runner_healthcheck_mock):
runner_healthcheck_mock.side_effect = [False, False, True]
assert runners.container_is_healthy(
hostname="alpha", initial_wait_time=0, max_retries=3
)
@patch("cicada2.engine.runners.runner_healthcheck")
def test_runner_healthcheck_failure(runner_healthcheck_mock):
runner_healthcheck_mock.return_value = False
assert not runners.container_is_healthy(
hostname="alpha", initial_wait_time=0, max_retries=3
)
|
11463188
|
from __future__ import print_function
import os
import sys
import imp
import inspect
class TestFunction(object):
is_class = False
def __init__(self, target, name=None, is_method=False):
self.target = target
if not name:
name = target.__name__
self.name = '{}:{}'.format(target.__module__, name)
self.is_method = is_method
def shortname(self):
return self.target.__name__
class TestClass(object):
is_class = True
def __init__(self, cls, names):
self.methnames = names
self.clsname = cls.__name__
self.cls = cls
self.name = '{}:{}'.format(cls.__module__, self.clsname)
def create_instance(self, *args, **kwargs):
return self.cls(*args, **kwargs)
def get_functions(self, instance):
fns = []
for mname in self.methnames:
bound = getattr(instance, mname)
if not callable(bound):
continue
fns.append(TestFunction(bound,
name='{}.{}'.format(self.clsname, mname),
is_method=True))
return fns
class TestLoader(object):
def __init__(self, filter=None):
self.tests = []
self.toplevel_filter = filter
self.subfilter = None
def load_spec(self, arg):
# See what kind of spec this is!
"""
Load tests from single argument form, e.g. foo.py:BarBaz
"""
if ':' in arg:
filename, varname = arg.split(':')
else:
filename = arg
varname = None
if os.path.isdir(filename):
sys.path.append(filename)
self.scan_dir(filename)
return
# Ensure the path is in sys.path
dirname = os.path.abspath(os.path.dirname(filename))
if dirname not in sys.path:
sys.path.append(dirname)
module_name, _ = os.path.splitext(os.path.basename(filename))
if varname:
if '.' in varname:
self.toplevel_filter, self.subfilter = varname.split('.')
else:
self.toplevel_filter = varname
self.load_files(dirname, module_name)
def load_files(self, module_dir, module_name):
filename = '%s/%s.py' % (module_dir, module_name)
module_file = open(filename, 'r')
module = imp.load_module(module_name, module_file, filename,
('.py', 'r', imp.PY_SOURCE))
for symbol in dir(module):
if not self.filter_modulevar(symbol):
continue
obj = getattr(module, symbol)
if inspect.isclass(obj):
methnames = [mname for mname in dir(obj)
if self.filter_method(mname)]
self.tests.append(TestClass(obj, methnames))
elif inspect.isfunction(obj):
self.tests.append(TestFunction(obj))
def scan_dir(self, testdir):
for filename in os.listdir(testdir):
if filename.startswith('test') and filename.endswith('.py'):
module_name, ext = os.path.splitext(filename)
self.load_files(testdir, module_name)
def filter_modulevar(self, candidate):
if not candidate.lower().startswith('test'):
return False
if self.toplevel_filter and candidate != self.toplevel_filter:
return False
return True
def filter_method(self, candidate):
if not candidate.lower().startswith('test'):
return False
if self.subfilter and candidate != self.subfilter:
return False
return True
def __iter__(self):
return iter(self.tests)
def print_tests(self):
for t in self.tests:
print("Test: ", t.name)
if t.is_class:
print("\tClass")
print("\tFunctions")
for m in t.methnames:
print("\t\t", m)
else:
print("\tFunction")
|
11463194
|
import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
def get_all(isamAppliance, check_mode=False, force=False):
"""
Get all rsyslog objects
"""
return isamAppliance.invoke_get("Get all rsyslog objects",
"/core/rsp_rsyslog_objs")
def get(isamAppliance, uuid, check_mode=False, force=False):
"""
Get a specific rsyslog object
"""
return isamAppliance.invoke_get("Get a specific rsyslog object",
"/core/rsp_rsyslog_objs/{0}".format(uuid))
def add(isamAppliance, name, collector, collectorPort=514, collectorLeef=False, objType='rsyslog', comment='',
check_mode=False, force=False):
"""
Add a rsyslog object
"""
if force is True or _check(isamAppliance, None, name, objType, comment, collector, collectorPort,
collectorLeef) is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post(
"Add a rsyslog object",
"/core/rsp_rsyslog_objs/",
{
'name': name,
'objType': objType,
'comment': comment,
'collector': collector,
'collectorPort': collectorPort,
'collectorLeef': collectorLeef
})
return isamAppliance.create_return_object()
def update(isamAppliance, uuid, name, collector, collectorPort=514, collectorLeef=False, objType='rsyslog', comment='',
check_mode=False, force=False):
"""
Update a specific rsyslog object
"""
if force is True or (
_exists(isamAppliance, uuid) is True and _check(isamAppliance, uuid, name, objType, comment,
collector, collectorPort, collectorLeef) is False):
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put(
"Update a specific rsyslog object",
"/core/rsp_rsyslog_objs/{0}".format(uuid),
{
'name': name,
'uuid': uuid,
'objType': objType,
'comment': comment,
'collector': collector,
'collectorPort': collectorPort,
'collectorLeef': collectorLeef
})
return isamAppliance.create_return_object()
def delete(isamAppliance, uuid, check_mode=False, force=False):
"""
Delete a rsyslog object
"""
if force is True or _exists(isamAppliance, uuid) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete(
"Delete a rsyslog object",
"/core/rsp_rsyslog_objs/{0}".format(uuid))
return isamAppliance.create_return_object()
def _exists(isamAppliance, uuid):
"""
Check if an uuid object exists
:param isamAppliance:
:param uuid:
:return:
"""
exists = False
ret_obj = get_all(isamAppliance)
for rsyslog in ret_obj['data']['rsyslogObjects']:
if rsyslog['uuid'] == uuid:
exists = True
break
return exists
def _check(isamAppliance, uuid, name, objType, comment, collector, collectorPort, collectorLeef):
"""
Check if the rsyslog object exists and is the same - uuid=None means add versus delete
NOTE: if UUID is not found that will be same as no match!!!
"""
set_value = {
'name': name,
'uuid': uuid,
'objType': objType,
'comment': comment,
'collector': collector,
'collectorPort': collectorPort,
'collectorLeef': collectorLeef
}
set_value = ibmsecurity.utilities.tools.json_sort(set_value)
ret_obj = get_all(isamAppliance)
for obj in ret_obj['data']['rsyslogObjects']:
if uuid is None and obj['name'] == name:
return True
elif ibmsecurity.utilities.tools.json_sort(obj) == set_value:
return True
return False
def compare(isamAppliance1, isamAppliance2):
"""
Compare rsyslog objects between two appliances
"""
ret_obj1 = get_all(isamAppliance1)
ret_obj2 = get_all(isamAppliance2)
for obj in ret_obj1['data']['rsyslogObjects']:
del obj['uuid']
for obj in ret_obj2['data']['rsyslogObjects']:
del obj['uuid']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['uuid'])
|
11463242
|
import h5py
import numpy
###############################################################################
# This file writes all of the materials data (multi-group nuclear
# cross-sections) for the LRA diffusion
# benchmark problem to an HDF5 file. The script uses the h5py Python package
# to interact with the HDF5 file format. This may be a good example for those
# wishing ot write their nuclear data to an HDF5 file to import using the
# OpenMOC 'materialize' Python module.
###############################################################################
# Create the file to store LRA multi-groups cross-sections
f = h5py.File('LRA-mgxs.h5')
f.attrs["# groups"] = 2
# Create a group to specify that MGXS are split by material (vs. cell)
material_group = f.create_group('material')
# The neutron multiplication factor for all fissionable materials
nu = 2.43
###############################################################################
################################ region 1 ################################
###############################################################################
# Create a subgroup for region 1 materials data
region_1 = material_group.create_group('Region 1')
sigma_t = numpy.array([0.265604, 1.579779])
sigma_s = numpy.array([0.232022, 0.02533, 0.00, 1.479479])
sigma_f = numpy.array([0.004602, 0.1091]) / nu
nu_sigma_f = numpy.array([0.004602, 0.1091])
chi = numpy.array([1.0, 0.0])
region_1.create_dataset('total', data=sigma_t)
region_1.create_dataset('scatter matrix', data=sigma_s)
region_1.create_dataset('fission', data=sigma_f)
region_1.create_dataset('nu-fission', data=nu_sigma_f)
region_1.create_dataset('chi', data=chi)
###############################################################################
################################ region 2 ################################
###############################################################################
# Create a subgroup for region 2 materials data
region_2 = material_group.create_group('Region 2')
sigma_t = numpy.array([0.262881, 1.752541])
sigma_s = numpy.array([0.228030, 0.02767, 0.00, 1.682071])
sigma_f = numpy.array([0.004609, 0.08675]) / nu
nu_sigma_f = numpy.array([0.004609, 0.08675])
chi = numpy.array([1.0, 0.0])
region_2.create_dataset('total', data=sigma_t)
region_2.create_dataset('scatter matrix', data=sigma_s)
region_2.create_dataset('fission', data=sigma_f)
region_2.create_dataset('nu-fission', data=nu_sigma_f)
region_2.create_dataset('chi', data=chi)
###############################################################################
################################ region 3 ################################
###############################################################################
# Create a subgroup for region 3 materials data
region_3 = material_group.create_group('Region 3')
sigma_t = numpy.array([0.26476, 1.594134])
sigma_s = numpy.array([0.230588, 0.02617, 0.00, 1.510694])
sigma_f = numpy.array([0.004663, 0.1021]) / nu
nu_sigma_f = numpy.array([0.004663, 0.1021])
chi = numpy.array([1.0, 0.0])
region_3.create_dataset('total', data=sigma_t)
region_3.create_dataset('scatter matrix', data=sigma_s)
region_3.create_dataset('fission', data=sigma_f)
region_3.create_dataset('nu-fission', data=nu_sigma_f)
region_3.create_dataset('chi', data=chi)
###############################################################################
################################ region 4 ################################
###############################################################################
# Create a subgroup for region 4 materials data
region_4 = material_group.create_group('Region 4')
sigma_t = numpy.array([0.26476, 1.594134])
sigma_s = numpy.array([0.230588, 0.02617, 0.00, 1.52081])
sigma_f = numpy.array([0.004663, 0.1021]) / nu
nu_sigma_f = numpy.array([0.004663, 0.1021])
chi = numpy.array([1.0, 0.0])
region_4.create_dataset('total', data=sigma_t)
region_4.create_dataset('scatter matrix', data=sigma_s)
region_4.create_dataset('fission', data=sigma_f)
region_4.create_dataset('nu-fission', data=nu_sigma_f)
region_4.create_dataset('chi', data=chi)
###############################################################################
################################ region 5 ################################
###############################################################################
# Create a subgroup for region 5 materials data
region_5 = material_group.create_group('Region 5')
sigma_t = numpy.array([0.265182, 2.093802])
sigma_s = numpy.array([0.217039, 0.04754, 0.00, 2.074692])
sigma_f = numpy.array([0.0, 0.0])
nu_sigma_f = numpy.array([0.0, 0.0])
chi = numpy.array([1.0, 0.0])
region_5.create_dataset('total', data=sigma_t)
region_5.create_dataset('scatter matrix', data=sigma_s)
region_5.create_dataset('fission', data=sigma_f)
region_5.create_dataset('nu-fission', data=nu_sigma_f)
region_5.create_dataset('chi', data=chi)
# Close the hdf5 data file
f.close()
|
11463251
|
from typing import Dict, List
from uuid import uuid4
from ee.clickhouse.models.event import create_event
from ee.clickhouse.models.session_recording_event import create_session_recording_event
def bulk_create_events(events: List[Dict], **kw):
for event_data in events:
create_event(**event_data, **kw, event_uuid=uuid4())
def bulk_create_session_recording_events(events: List[Dict], **kw):
for data in events:
create_session_recording_event(**data, **kw, uuid=uuid4())
|
11463253
|
import logging
import os
from overrides import overrides
from typing import Dict, List
from repro.common import TemporaryDirectory
from repro.common.docker import make_volume_map, run_command
from repro.common.io import write_to_text_file
from repro.models import Model, TruecasingModel
from repro.models.susanto2016 import DEFAULT_IMAGE, MODEL_NAME
logger = logging.getLogger(__name__)
@Model.register(f"{MODEL_NAME}-truecaser")
class RNNTruecaser(TruecasingModel):
def __init__(
self,
model: str,
image: str = DEFAULT_IMAGE,
device: int = 0,
) -> None:
"""
Parameters
----------
model : str
The name of the model, currently either "wiki-truecaser-model-en.tar.gz" for English,
"wmt-truecaser-model-de.tar.gz" for German, "wmt-truecaser-model-es.tar.gz" for Spanish,
or "lrl-truecaser-model-ru.tar.gz" for Russian.
image : str, default="susanto2016"
The name of the Docker image
device : int, default=0
The ID of the GPU, -1 if CPU
"""
if model not in [
"lrl-truecaser-model-ru.tar.gz",
"wiki-truecaser-model-en.tar.gz",
"wmt-truecaser-model-de.tar.gz",
"wmt-truecaser-model-es.tar.gz",
]:
raise Exception(f"Unknown model: {model}")
self.model = model
self.image = image
self.device = device
@overrides
def predict_batch(self, inputs: List[Dict[str, str]], *args, **kwargs) -> List[str]:
input_texts = [inp["text"] for inp in inputs]
logger.info(f"Running truecasing on {len(inputs)} using image {self.image}")
with TemporaryDirectory() as temp:
host_input_dir = f"{temp}/input"
host_output_dir = f"{temp}/output"
volume_map = make_volume_map(host_input_dir, host_output_dir)
container_input_dir = volume_map[host_input_dir]
container_output_dir = volume_map[host_output_dir]
host_input_file = f"{host_input_dir}/input.txt"
container_input_file = f"{container_input_dir}/input.txt"
write_to_text_file(input_texts, host_input_file)
# Run inference. The output_dir must exist before running
# the docker command
os.makedirs(host_output_dir)
host_output_file = f"{host_output_dir}/output.txt"
container_output_file = f"{container_output_dir}/output.txt"
commands = []
commands.append("cd pytorch-truecaser")
cuda = self.device != -1
if cuda:
commands.append(f"export CUDA_VISIBLE_DEVICES={self.device}")
process_device = 0
else:
process_device = -1
commands.append(
f"allennlp predict"
f" ../{self.model}"
f" {container_input_file}"
f" --output-file {container_output_file}"
f" --include-package mylib "
f" --use-dataset-reader "
f" --predictor truecaser-predictor"
f" --cuda-device {process_device}"
f" --silent"
)
command = " && ".join(commands)
run_command(
self.image,
command,
volume_map=volume_map,
network_disabled=True,
cuda=cuda,
)
outputs = open(host_output_file, "r").read().splitlines()
return outputs
|
11463278
|
from django.conf.urls import url
from . import views
urlpatterns = [
url('', views.TableView.as_view(), name="table_api"),
]
|
11463319
|
import pygame_sdl2
import os
import argparse
import json
def smoothscale(surf, size):
while True:
w, h = surf.get_size()
if (w == size[0]) and (h == size[1]):
break
w = max(w // 2, size[0])
h = max(h // 2, size[1])
surf = pygame_sdl2.transform.smoothscale(surf, (w, h))
return surf
def generate(source, destination, scale):
if not os.path.exists(source):
return
src = pygame_sdl2.image.load(source).convert_alpha()
sw, sh = src.get_size()
with open(os.path.join(destination, "Contents.json"), "r") as f:
contents = json.load(f)
for i in contents["images"]:
if "filename" not in i:
continue
dfn = os.path.join(destination, i['filename'])
dst = pygame_sdl2.image.load(dfn)
dst.convert_alpha()
w, h = dst.get_size()
if scale:
dst = smoothscale(src, (w, h))
else:
dst.fill(dst.get_at((0, 0)))
xo = int(w / 2) - int(sw / 2)
yo = int(h / 2) - int(sh / 2)
dst.blit(src, (xo, yo))
pygame_sdl2.image.save(dst, dfn)
def main():
ap = argparse.ArgumentParser()
ap.add_argument("source")
ap.add_argument("destination")
ap.add_argument("--scale", action="store_true")
args = ap.parse_args()
pygame_sdl2.display.init()
pygame_sdl2.display.set_mode((640, 480))
pygame_sdl2.event.pump()
generate(args.source, args.destination, args.scale)
if __name__ == "__main__":
main()
|
11463347
|
def upper(word: str) -> str:
"""
Will convert the entire string to uppercase letters
>>> upper("wow")
'WOW'
>>> upper("Hello")
'HELLO'
>>> upper("WHAT")
'WHAT'
>>> upper("wh[]32")
'WH[]32'
"""
# Converting to ascii value int value and checking to see if char is a lower letter
# if it is a lowercase letter it is getting shift by 32 which makes it an uppercase
# case letter
return "".join(chr(ord(char) - 32) if "a" <= char <= "z" else char for char in word)
if __name__ == "__main__":
from doctest import testmod
testmod()
|
11463358
|
api_key: str = "api-key-string"
bearer_token: str = "bearer-token-string"
mock_data = {"testkey": "testval"}
hooli_id = "9676868b-60d2-5ebe-aa66-c1de8162ff9d"
from gremlinapi.attack_helpers import (
GremlinAttackTargetHelper,
GremlinAttackCommandHelper,
)
mock_access_token = "<PASSWORD>"
mock_bearer_token = "<PASSWORD>"
def access_token_json():
return {"access_token": mock_access_token}
def bearer_token_json():
return {"header": mock_bearer_token}
def mock_json():
return mock_data
mock_team_id = "1234567890a"
mock_body = {"body": mock_data}
mock_guid = {"guid": mock_data}
mock_scenario_guid = {
"guid": mock_data,
"body": mock_data,
"startDate": "1/1/1900",
"endDate": "1/1/2000",
"runNumber": 1,
"staticEndpointName": "not-a-website.comorg",
}
mock_users = {
"role": "mock user role",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"orgId": "102928384756z",
"renewToken": "42",
"companyId": "c0mp4ny",
"companyName": "Mocking Co, A Mockery Company",
"provider": "MacinoogleSoft",
"teamId": "h4x0r734m",
"accessToken": "1q2w3e4r5t6y7u8i9o90p",
"token": "<PASSWORD>ca3f41fb4d1cb4cd4bfcd14c",
}
mock_identifier = {
"identifier": mock_data,
"email": "<EMAIL>",
"body": mock_data,
"name": "Gremlin",
}
mock_payload = {
"body": mock_data,
"headers": "1234567890",
"data": mock_data,
}
mock_uid = {"body": mock_data, "uid": "1234567890z"}
mock_metrics = {
"attackId": "1234567890",
"scenarioId": "1234567890",
"scenarioRunNumber": "1",
}
mock_report = {
"start": "mock_start",
"end": "mock_end",
"period": "4",
"startDate": "1/1/1900",
"endDate": "1/1/2000",
"trackingPeriod": "6",
}
mock_saml = {
"SAMLResponse": "mock_response",
"RelayState": "mock_state",
"companyName": "Gremlin Mocks",
"destination": "earth",
"acsHandler": "mock_handler",
"code": "12567890",
}
mock_scenario = {
"description": "A mock scenario",
"hypothesis": "to prove test status",
"name": "mock_scenario",
}
mock_scenario_step = {
"delay": 65536,
"command": GremlinAttackCommandHelper(),
"target": GremlinAttackTargetHelper(),
}
mock_ilfi_node = {
"name": "mock_scenario",
"command": GremlinAttackCommandHelper(),
"target": GremlinAttackTargetHelper(),
}
mock_delay_node = {"delay": "42"}
mock_status_check_node = {
"description": "A mock status check node",
"endpoint_url": "definitely-fake-website1234.com",
"endpoint_headers": {"name": "mock header"},
"evaluation_response_body_evaluation": "mock evaluation",
"evaluation_ok_status_codes": ["24-42"],
"evaluation_ok_latency_max": 999,
}
|
11463365
|
def checking_subset():
user_in = int(input())
set1 = set(list(map(int, input().splitl())))
user_in2 = int(input())
set2 = set(list(map(int, input().splitl())))
print(set1.issubset(set2))
user = int(input())
for i in range(user):
checking_subset()
|
11463369
|
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
# from noduleCADEvaluationLUNA16 import noduleCADEvaluation
from noduleCADEvaluationLUNA16compare import noduleCADEvaluation
import os
import csv
from multiprocessing import Pool
import functools
import SimpleITK as sitk
fold = 4
trainnum = 5
annotations_filename = '/home/zhaojie/zhaojie/Lung/code/evaluationScript/10FoldCsvFiles/annotations' +str(fold) + '.csv'# path for ground truth annotations for the fold
annotations_excluded_filename = '/home/zhaojie/zhaojie/Lung/code/evaluationScript/10FoldCsvFiles/annotations_excluded' +str(fold) + '.csv'# path for excluded annotations for the fold
seriesuids_filename = '/home/zhaojie/zhaojie/Lung/code/evaluationScript/10FoldCsvFiles/seriesuids' +str(fold) + '.csv'# path for seriesuid for the fold
results_path = '/home/zhaojie/zhaojie/Lung/code/detector_py3/results/dpn3d26/retrft96' + str(trainnum) + '/val'#val' #val' ft96'+'/val'#
sideinfopath = '/home/zhaojie/zhaojie/Lung/data/luna16/LUNA16PROPOCESSPATH/subset'+str(fold)+'/'
datapath = '/home/zhaojie/zhaojie/Lung/data/luna16/subset_data/subset'+str(fold)+'/'
maxeps = 995 #03 #150 #100#100
# eps = range(1, maxeps+1, 1)#6,7,1)#5,151,5)#5,151,5)#76,77,1)#40,41,1)#76,77,1)#1,101,1)#17,18,1)#38,39,1)#1, maxeps+1, 1) #maxeps+1, 1)
eps = range(995, maxeps+1, 1)#6,7,1)#5,151,5)#5,151,5)#76,77,1)#40,41,1)#76,77,1)#1,101,1)#17,18,1)#38,39,1)#1, maxeps+1, 1) #maxeps+1, 1)
detp = [-1.5, -1]#, -0.5, 0]#, 0.5, 1]#, 0.5, 1] #range(-1, 0, 1)
# detp = [-1]#, -0.5, 0]#, 0.5, 1]#, 0.5, 1] #range(-1, 0, 1)
isvis = False #True
nmsthresh = 0.1
nprocess = 38#4
use_softnms = False
frocarr = np.zeros((maxeps, len(detp)))
firstline = ['seriesuid', 'coordX', 'coordY', 'coordZ', 'probability']
def VoxelToWorldCoord(voxelCoord, origin, spacing):
strechedVocelCoord = voxelCoord * spacing
worldCoord = strechedVocelCoord + origin
return worldCoord
def load_itk_image(filename):
with open(filename) as f:
contents = f.readlines()
line = [k for k in contents if k.startswith('TransformMatrix')][0]
transformM = np.array(line.split(' = ')[1].split(' ')).astype('float')
transformM = np.round(transformM)
if np.any( transformM!=np.array([1,0,0, 0, 1, 0, 0, 0, 1])):
isflip = True
else:
isflip = False
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing,isflip
def iou(box0, box1):
r0 = box0[3] / 2#半径
s0 = box0[:3] - r0#xyz的左上角
e0 = box0[:3] + r0#xyz的右上角
r1 = box1[3] / 2
s1 = box1[:3] - r1#xyz的左上角
e1 = box1[:3] + r1#xyz的右上角
overlap = []
for i in range(len(s0)):#3个
overlap.append(max(0, min(e0[i], e1[i]) - max(s0[i], s1[i])))#最小的右下角-最大的左下角=((x1-x0),(y1-y0),(z1-z0))
intersection = overlap[0] * overlap[1] * overlap[2]#(x1-x0)*(y1-y0)*(z1-z0)
union = box0[3] * box0[3] * box0[3] + box1[3] * box1[3] * box1[3] - intersection#体积就是3个直径的乘积
return intersection / union
def nms(output, nms_th):
if len(output) == 0:
return output
output = output[np.argsort(-output[:, 0])]
bboxes = [output[0]]
for i in np.arange(1, len(output)):
bbox = output[i]
flag = 1
for j in range(len(bboxes)):
if iou(bbox[1:5], bboxes[j][1:5]) >= nms_th:
flag = -1
break
if flag == 1:
bboxes.append(bbox)
bboxes = np.asarray(bboxes, np.float32)
return bboxes
def convertcsv(bboxfname, bboxpath, detp):#给定pbb.npy的文件名,pbb.npy的路径,阈值
# print(datapath, sideinfopath, bboxpath)#加载原始数据
#/public/share/jiezhao/Minerva/Lung/data/luna16/subset_data/subset9/ /public/share/jiezhao/Minerva/Lung/data/luna16/LUNA16PROPOCESSPATH/subset9/ /public/share/jiezhao/Minerva/Lung/DeepLung-master/detector_py3/results/dpn3d26/retrft960/val1/
sliceim,origin,spacing,isflip = load_itk_image(datapath+bboxfname[:-8]+'.mhd')#加载原始数据
origin = np.load(sideinfopath+bboxfname[:-8]+'_origin.npy', mmap_mode='r')#以下几行加载预处理后的坐标原点,分辨率,拓展box
spacing = np.load(sideinfopath+bboxfname[:-8]+'_spacing.npy', mmap_mode='r')
resolution = np.array([1, 1, 1])
extendbox = np.load(sideinfopath+bboxfname[:-8]+'_extendbox.npy', mmap_mode='r')
# if str(bboxfname) == '1.3.6.1.4.1.14519.5.2.1.6279.6001.265960756233787099041040311282_pbb.npy':
print(bboxpath+bboxfname)
#加载pbb.npy文件
pbb = np.load(bboxpath+bboxfname, mmap_mode='r')#加载pbb.npy文件
print('pbb.shape',pbb.shape)#(267, 5)
pbbold = np.array(pbb[pbb[:,0] > detp])#根据阈值过滤掉概率低的
pbbold = np.array(pbbold[pbbold[:,-1] > 3]) # add new 9 15#根据半径过滤掉小于3mm的
pbbold = pbbold[np.argsort(-pbbold[:,0])][:1000] #这条是我加上的,取概率值前1000的结节作为输出,不然直接进行nms耗时太长
# print('pbbold.shape',pbbold.shape)
# pbb = np.array(pbb[:K, :4])
# print pbbold.shape1
# if use_softnms:
# keep = cpu_soft_nms(pbbold, method=2) # 1 for linear weighting, 2 for gaussian weighting
# pbb = np.array(pbbold[keep]) #cpu_soft_nms(pbbold)
# else:
pbb = nms(pbbold, nmsthresh)#对输出的结节进行nms
print('len(pbb), pbb[0]',pbb.shape)
# print bboxfname, pbbold.shape, pbb.shape, pbbold.shape
pbb = np.array(pbb[:, :-1])#去掉直径
# print(stop)
pbb[:, 1:] = np.array(pbb[:, 1:] + np.expand_dims(extendbox[:,0], 1).T)#对输出加上拓展box的坐标,其实就是恢复为原来的坐标,我对这个拓展box深恶痛绝
pbb[:, 1:] = np.array(pbb[:, 1:] * np.expand_dims(resolution, 1).T / np.expand_dims(spacing, 1).T)#将输出恢复为原来的分辨率,这样就对应了原始数据中的体素坐标
if isflip:#如果有翻转的情况,将坐标翻转(我理解是这样的,原始数据有翻转的情况,但是label还是未翻转的label,那么将label翻转,所以模型的输出也是翻转的,现在要再翻转回去,与label对应)
Mask = np.load(sideinfopath+bboxfname[:-8]+'_mask.npy', mmap_mode='r')
pbb[:, 2] = Mask.shape[1] - pbb[:, 2]
pbb[:, 3] = Mask.shape[2] - pbb[:, 3]
pos = VoxelToWorldCoord(pbb[:, 1:], origin, spacing)#将输出转换为世界坐标
rowlist = []
# print pos.shape
for nk in range(pos.shape[0]): # pos[nk, 2], pos[nk, 1], pos[nk, 0]
rowlist.append([bboxfname[:-8], pos[nk, 2], pos[nk, 1], pos[nk, 0], 1/(1+np.exp(-pbb[nk,0]))])#现在依次将文件名,z,y,x,概率(经过sigmoid处理)写入rowlist,每行都是一个输出结节
# print ('convertcsv-len(rowlist), len(rowlist[0])',len(rowlist), len(rowlist[0]))
return rowlist#bboxfname[:-8], pos[:K, 2], pos[:K, 1], pos[:K, 0], 1/(1+np.exp(-pbb[:K,0]))
def getfrocvalue(results_filename):
return noduleCADEvaluation(annotations_filename,annotations_excluded_filename,seriesuids_filename,results_filename,'./outputDir/')#vis=False)
# return noduleCADEvaluation(annotations_filename,annotations_excluded_filename,seriesuids_filename,results_filename,'./', vis=isvis)#vis=False)
p = Pool(nprocess)
#结果就是每一个epoch都生成一个csv文件,存放80多个测试病例的预测结节位置及概率
def getcsv(detp, eps):#给定阈值和epoch
for ep in eps:#针对每个epoch
bboxpath = results_path + str(ep) + '/'#找到每个epoch的路径
for detpthresh in detp:
print('ep', ep, 'detp', detpthresh, bboxpath)
f = open(bboxpath + 'predanno'+ str(detpthresh) + '.csv', 'w')#根据阈值分别创建与之对应的文件
fwriter = csv.writer(f)
fwriter.writerow(firstline)#写入第一行,包括用户id,结节坐标x,y,z,结节概率p
fnamelist = []
for fname in os.listdir(bboxpath):
if fname.endswith('_pbb.npy'):#找到以_pbb.npy结尾的文件(输出的结节预测值),添加进文件列表
fnamelist.append(fname)
# print fname
# for row in convertcsv(fname, bboxpath, k):
# fwriter.writerow(row)
# # return
print('len(fnamelist)',len(fnamelist))
predannolist = p.map(functools.partial(convertcsv, bboxpath=bboxpath, detp=detpthresh), fnamelist)#这个函数对convertcsv函数进行修饰,其实就是预设定几个参数,不用再输入
# print len(predannolist), len(predannolist[0])
for predanno in predannolist:
# print predanno
for row in predanno:
# print row
fwriter.writerow(row)
f.close()
getcsv(detp, eps)
print(stop)
def getfroc(detp, eps):
maxfroc = 0
maxep = 0
for ep in eps:#对每个epoch分别处理
bboxpath = results_path + str(ep) + '/'
predannofnamalist = []
# print('detp, bboxpath',detp, bboxpath)#[-1.5, -1] /public/share/jiezhao/Minerva/Lung/DeepLung-master/detector_py3/results/dpn3d26/retrft960/val1/
#此处的detp就是阈值,只不过这里采用的是一个阈值列表,就我自己而言,我采用的阈值是-0.125,列表中只有一个元素
for detpthresh in detp:
predannofnamalist.append(bboxpath + 'predanno'+ str(detpthresh) + '.csv')
print('DONE!',detpthresh)
# print('predannofnamalist',predannofnamalist)#['/public/share/jiezhao/Minerva/Lung/DeepLung-master/detector_py3/results/dpn3d26/retrft960/val199/predanno-1.5.csv', '/public/share/jiezhao/Minerva/Lung/DeepLung-master/detector_py3/results/dpn3d26/retrft960/val199/predanno-1.csv']
froclist = p.map(getfrocvalue, predannofnamalist)#调用getfrocvalue求取froc值
# print('maxfroc0', ep, max(froclist))
# print('max(froclist)',froclist)
if maxfroc < max(froclist):
maxep = ep
maxfroc = max(froclist)
# print('maxfroc1',maxfroc)
for detpthresh in detp:
# print((ep-eps[0])//(eps[1]-eps[0]), int((detpthresh-detp[0])/(detp[1]-detp[0])))
# print((ep-eps[0])/(eps[1]-eps[0]), int((detpthresh-detp[0])/(detp[1]-detp[0])))
frocarr[(ep-eps[0])//(eps[1]-eps[0]), int((detpthresh-detp[0])/(detp[1]-detp[0]))] = froclist[int((detpthresh-detp[0])/(detp[1]-detp[0]))]
# print('ep', ep, 'detp', detpthresh, froclist[int((detpthresh-detp[0])/(detp[1]-detp[0]))])
print('maxfroc, maxep', maxfroc, maxep)
getfroc(detp, eps)
p.close()
fig = plt.imshow(frocarr.T)
plt.colorbar()
plt.xlabel('# Epochs')
plt.ylabel('# Detection Prob.')
xtick = detp #[36, 37, 38, 39, 40]
plt.yticks(range(len(xtick)), xtick)
ytick = eps #range(51, maxeps+1, 2)
plt.xticks(range(len(ytick)), ytick)
plt.title('Average FROC')
plt.savefig(results_path+'frocavg.png')
np.save(results_path+'frocavg.npy', frocarr)
frocarr = np.load(results_path+'frocavg.npy', 'r')
froc, x, y = 0, 0, 0
for i in range(frocarr.shape[0]):
for j in range(frocarr.shape[1]):
if froc < frocarr[i,j]:
froc, x, y = frocarr[i,j], i, j
print('FINISH:',fold, froc, x, y)
|
11463388
|
import logging
logger = logging.getLogger(__name__)
import time
from boto.exception import SDBResponseError
from nymms.scheduler.lock.SchedulerLock import SchedulerLock
class SDBLock(SchedulerLock):
def __init__(self, duration, conn, domain_name,
lock_name="scheduler_lock"):
super(SDBLock, self).__init__(duration, lock_name)
self.conn = conn
self.domain_name = domain_name
self.domain = None
self.lock = None
def setup_domain(self):
if self.domain:
return
logger.debug("Setting up lock domain %s", self.domain_name)
self.domain = self.conn.create_domain(self.domain_name)
def acquire(self):
logger.debug("Attempting to acquire lock %s:%s", self.domain_name,
self.lock_name)
self.setup_domain()
now = int(time.time())
existing_lock = self.domain.get_item(self.lock_name,
consistent_read=True)
lock_body = {'expiry': now + self.duration,
'timestamp': now,
'owner': self.id}
expected_value = ['timestamp', False]
if existing_lock:
logger.debug("Existing lock found: %s", existing_lock)
existing_ts = existing_lock['timestamp']
if not existing_lock['owner'] == self.id:
if not self.lock_expired(existing_lock['expiry'], now):
logger.debug("Lock still valid, not taking over.")
return False
else:
logger.info("Lock expired, attempting takeover.")
else:
logger.info("I already own the lock, updating.")
expected_value = ['timestamp', existing_ts]
try:
self.domain.put_attributes(self.lock_name, lock_body,
replace=bool(existing_lock),
expected_value=expected_value)
self.lock = lock_body
logger.debug("Acquired lock %s:%s", self.domain_name,
self.lock_name)
return True
except SDBResponseError as e:
if e.status == 409:
logger.debug('Looks like someone else has acquired the lock.')
return False
raise
return False
|
11463423
|
import numpy as np
# pythran export kernel(int, int, float64[:,:])
def kernel(TSTEPS, N, A):
for t in range(0, TSTEPS - 1):
for i in range(1, N - 1):
A[i, 1:-1] += (A[i - 1, :-2] + A[i - 1, 1:-1] + A[i - 1, 2:] +
A[i, 2:] + A[i + 1, :-2] + A[i + 1, 1:-1] +
A[i + 1, 2:])
for j in range(1, N - 1):
A[i, j] += A[i, j - 1]
A[i, j] /= 9.0
|
11463460
|
from django import forms
from osf.models.storage import ProviderAssetFile
from osf.models.provider import AbstractProvider
class ProviderAssetFileForm(forms.ModelForm):
class Meta:
model = ProviderAssetFile
fields = ['name', 'file', 'providers', 'id']
id = forms.IntegerField(required=False, widget=forms.HiddenInput())
providers = forms.ModelMultipleChoiceField(AbstractProvider.objects.all(), widget=forms.CheckboxSelectMultiple(), required=False)
def clean(self):
cleaned_data = super(ProviderAssetFileForm, self).clean()
obj_id = int(cleaned_data.get('id', None) or 0)
for provider in cleaned_data.get('providers', []):
if provider.asset_files.exclude(id=obj_id).filter(name=cleaned_data.get('name', '')).exists():
raise forms.ValidationError('Naming conflict detected on Provider "{}"'.format(provider.name))
return cleaned_data
|
11463518
|
import numpy as np
# import uproot
import json
import datetime
import pytz
import os
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.ticker as ticker
from matplotlib.font_manager import FontProperties
# Define some plot constants
NBINS=37
X_MIN=70
X_MAX=181
Y_MIN=0
Y_MAX=31.5
DELTA_Y = Y_MAX - Y_MIN
HIST_LINEWIDTH=2.0
# Colours
green_m5 = '#558953'
cyan_m9 = '#89bffe'
gray = '#a9a9a9'
edges = np.asarray([ 70., 73., 76., 79., 82., 85., 88., 91., 94., 97., 100.,
103., 106., 109., 112., 115., 118., 121., 124., 127., 130., 133.,
136., 139., 142., 145., 148., 151., 154., 157., 160., 163., 166.,
169., 172., 175., 178., 181.])
ctrs = edges[:-1] + 1.5
constants = {
'xsecs': {
'scalexsecHZZ12': 0.0065,
'scalexsecHZZ11': 0.0057,
'xsecZZ412': 0.077,
'xsecZZ2mu2e12': 0.18,
'xsecZZ411': 0.067,
'xsecZZ2mu2e11': 0.15,
'xsecTTBar12' : 200.,
'xsecTTBar11' : 177.31,
'xsecDY5012' : 2955.,
'xsecDY1012' : 10.742,
'xsecDY5011' : 2475.,
'xsecDY1011' : 9507.,
},
'sfs': {
'sfZZ': 1.386,
'sfTTBar11': 0.11,
'sfDY': 1.12,
},
}
lumi_settings = {
'2011': {
'mass4e_8TeV_low': 'el_stream_2011',
'mass4mu_8TeV_low': 'mu_stream_2011',
'mass2mu2e_8TeV_low': 'mu_stream_2011'
},
'2012': {
'mass4e_8TeV_low': 'el_stream_2012',
'mass4mu_8TeV_low': 'mu_stream_2012',
'mass2mu2e_8TeV_low': 'mu_stream_2012'
},
}
sampledata = {
# Higgs 2012
'sm12_dr53x_smhiggstozzto4l_m-125_8tev-pw15-j3-py6': {
'id': 'Higgs 2012',
'group': 'higgs',
'datatype': 'mc',
'lumi': '2012',
'xsec': constants['xsecs']['scalexsecHZZ12'],
'pickup': ['mass4mu_8TeV_low', 'mass4e_8TeV_low', 'mass2mu2e_8TeV_low'],
'nfiles': '34'
},
# Higgs 2011
'sm11legdr_smhiggstozzto4l_m-125_7tev-pw15-j3-py6': {
'id': 'Higgs 2011',
'group': 'higgs',
'datatype': 'mc',
'lumi': '2011',
'xsec': constants['xsecs']['scalexsecHZZ11'],
'pickup': ['mass4mu_8TeV_low', 'mass4e_8TeV_low', 'mass2mu2e_8TeV_low'],
'nfiles': '22'
},
# ZZ 2012
'sm12_dr53x_zzto4mu_8tev-pw-py6': {
'id': r'ZZ $\rightarrow 4\mu$ 2012',
'group': 'zz',
'datatype': 'mc',
'lumi': '2012',
'xsec': constants['xsecs']['xsecZZ412'] * constants['sfs']['sfZZ'],
'pickup': ['mass4mu_8TeV_low'],
'nfiles': '151'
},
'sm12_dr53x_zzto4e_8tev-pw-py6': {
'id': r'ZZ $\rightarrow 4$e 2012',
'group': 'zz',
'datatype': 'mc',
'lumi': '2012',
'xsec': constants['xsecs']['xsecZZ412'] * constants['sfs']['sfZZ'],
'pickup': ['mass4e_8TeV_low'],
'nfiles': '143'
},
'sm12_dr53x_zzto2e2mu_8tev-pw-py6': {
'id': r'ZZ $\rightarrow 2\mu$2e 2012',
'group': 'zz',
'datatype': 'mc',
'lumi': '2012',
'xsec': constants['xsecs']['xsecZZ2mu2e12'] * constants['sfs']['sfZZ'],
'pickup': ['mass2mu2e_8TeV_low'],
'nfiles': '161'
},
# ZZ 2011
'sm11legdr_zzto4mu_mll4_7tev-pw-py6': {
'id': r'ZZ $\rightarrow 4\mu$ 2011',
'group': 'zz',
'datatype': 'mc',
'lumi': '2011',
'xsec': constants['xsecs']['xsecZZ411'] * constants['sfs']['sfZZ'],
'pickup': ['mass4mu_8TeV_low'],
'nfiles': '92'
},
'sm11legdr_zzto4e_mll4_7tev-pw-py6': {
'id': r'ZZ $\rightarrow 4$e 2011',
'group': 'zz',
'datatype': 'mc',
'lumi': '2011',
'xsec': constants['xsecs']['xsecZZ411'] * constants['sfs']['sfZZ'],
'pickup': ['mass4e_8TeV_low'],
'nfiles': '96'
},
'sm11legdr_zzto2e2mu_mll4_7tev-pw-py6': {
'id': r'ZZ $\rightarrow 2\mu$2e 2011',
'group': 'zz',
'datatype': 'mc',
'lumi': '2011',
'xsec': constants['xsecs']['xsecZZ2mu2e11'] * constants['sfs']['sfZZ'],
'pickup': ['mass2mu2e_8TeV_low'],
'nfiles': '97'
},
# TTbar 2011
'sm11legdr_ttto2l2nu2b_7tev-pw-py6': {
'id': r'$t\bar{t}$ 2011',
'group': 'ttbar',
'datatype': 'mc',
'lumi': '2011',
'xsec': constants['xsecs']['xsecTTBar11'] * constants['sfs']['sfTTBar11'],
'pickup': ['mass4mu_8TeV_low', 'mass4e_8TeV_low', 'mass2mu2e_8TeV_low'],
'nfiles': '676'
},
# TTbar 2012
'sm12_dr53x_ttbar_8tev-madspin_amcatnlo-herwig': {
'id': r'$t\bar{t}$ 2012',
'group': 'ttbar',
'datatype': 'mc',
'lumi': '2012',
'xsec': constants['xsecs']['xsecTTBar12'],
'pickup': ['mass4mu_8TeV_low', 'mass4e_8TeV_low', 'mass2mu2e_8TeV_low'],
'nfiles': '696'
},
# DY 2011
'sm11legdr_dyjetstoll_m-10to50_tunez2_7tev-py6': {
'id': r'DY m10-50 2011',
'group': 'dy',
'datatype': 'mc',
'lumi': '2011',
'xsec': constants['xsecs']['xsecDY1011'] * constants['sfs']['sfDY'],
'pickup': ['mass4mu_8TeV_low', 'mass4e_8TeV_low', 'mass2mu2e_8TeV_low'],
'nfiles': '1771'
},
'sm11legdr_dyjetstoll_m-50_7tev-madgraph-py6-tauola': {
'id': r'DY m50 2011',
'group': 'dy',
'datatype': 'mc',
'lumi': '2011',
'xsec': constants['xsecs']['xsecDY1011'] * constants['sfs']['sfDY'],
'pickup': ['mass4mu_8TeV_low', 'mass4e_8TeV_low', 'mass2mu2e_8TeV_low'],
'nfiles': '7501'
},
# DY 2012
'sm12_dr53x_dyjetstoll_m-10to50_ht-200to400_tz2_8tev-mgt': {
'id': r'DY m10-50 low 2012',
'group': 'dy',
'datatype': 'mc',
'lumi': '2012',
'xsec': constants['xsecs']['xsecDY1012'] * constants['sfs']['sfDY'],
'pickup': ['mass4mu_8TeV_low', 'mass4e_8TeV_low', 'mass2mu2e_8TeV_low'],
'nfiles': '430',
},
'sm12_dr53x_dyjetstoll_m-10to50_ht-400toinf_tz2_8tev-mgt': {
'id': r'DY m10-50 high 2012',
'group': 'dy',
'datatype': 'mc',
'lumi': '2012',
'xsec': constants['xsecs']['xsecDY1012'] * constants['sfs']['sfDY'],
'pickup': ['mass4mu_8TeV_low', 'mass4e_8TeV_low', 'mass2mu2e_8TeV_low'],
'nfiles': '277',
},
'sm12_dr53x_dyjetstoll_m-50_tz2_8tev-mgtt-taupolaroff': {
'id': r'DY m50 2012',
'group': 'dy',
'datatype': 'mc',
'lumi': '2012',
'xsec': constants['xsecs']['xsecDY1012'] * constants['sfs']['sfDY'],
'pickup': ['mass4mu_8TeV_low', 'mass4e_8TeV_low', 'mass2mu2e_8TeV_low'],
'nfiles': '2467',
},
# Data
'cms_run2012b_doublemuparked_aod_22jan2013-v1': {
'id': r'Data Run 2012B $\mu\mu$',
'group': 'data',
'datatype': 'data',
'pickup': ['mass4mu_8TeV_low', 'mass2mu2e_8TeV_low'],
'nfiles': '2279'
},
'cms_run2012b_doubleelectron_aod_22jan2013-v1': {
'id': r'Data Run 2012B ee',
'group': 'data',
'datatype': 'data',
'pickup': ['mass4e_8TeV_low'],
'nfiles': '1643'
},
'cms_run2012c_doublemuparked_aod_22jan2013-v1': {
'id': r'Data Run 2012C $\mu\mu$',
'group': 'data',
'datatype': 'data',
'pickup': ['mass4mu_8TeV_low', 'mass2mu2e_8TeV_low'],
'nfiles': '2920'
},
'cms_run2012c_doubleelectron_aod_22jan2013-v1': {
'id': r'Data Run 2012C ee',
'group': 'data',
'datatype': 'data',
'pickup': ['mass4e_8TeV_low'],
'nfiles': '2389'
},
'cms_run2011a_doublemu_aod_12oct2013-v1': {
'id': r'Data Run 2011A $\mu\mu$',
'group': 'data',
'datatype': 'data',
'pickup': ['mass4mu_8TeV_low', 'mass2mu2e_8TeV_low'],
'nfiles': '1378'
},
'cms_run2011a_doubleelectron_aod_12oct2013-v1': {
'id': r'Data Run 2011A ee',
'group': 'data',
'datatype': 'data',
'pickup': ['mass4e_8TeV_low'],
'nfiles': '1697'
},
}
colour_dict = {
'higgs': 'red',
'zz': cyan_m9,
'dy': green_m5,
'ttbar': gray,
'data': 'black'
}
plotdata = {
'samples': {},
'lumi_data': {
'mu_stream_2012': 0,
'mu_stream_2011': 0,
'el_stream_2012': 0,
'el_stream_2011': 0
}
}
def update(d):
if not 'samplename' in d: return
sname = d['samplename']
if not sname in sampledata: return
pickups = sampledata[sname]['pickup']
plotdata['samples'].setdefault(sname,{})
for pick in pickups:
plotdata['samples'][sname].setdefault(pick,[])
plotdata['samples'][sname][pick] += [d[pick]]
plotdata['samples'][sname].setdefault('processed',0)
plotdata['samples'][sname]['processed'] += d['processed']
if d.get('lumi'):
lumikey = d['lumi']['stream']
lumival = d['lumi']['value']
plotdata['lumi_data'][lumikey] += lumival/1000000.0
def weight_samples(pd):
weighted_and_summed = {}
for sname,s in pd['samples'].items():
for p in sampledata[sname]['pickup']:
if not 'cms_run' in sname:
processed = s['processed']
lumikey = sampledata[sname]['lumi']
lumikey = lumi_settings[lumikey][p]
lumi = pd['lumi_data'][lumikey]
xsec = sampledata[sname]['xsec']
x = np.asarray(s[p]) / processed * lumi * xsec
else:
x = np.asarray(s[p])
summed = np.sum(x, axis=0)
weighted_and_summed.setdefault(sname,{})[p] = summed
return weighted_and_summed
def group_samples(weighted_and_summed):
groups = {}
for k,v in weighted_and_summed.items():
group = sampledata[k]['group']
for p in sampledata[k]['pickup']:
counts = v[p]
groups.setdefault(group,[])
groups[group].append(counts.tolist())
return groups
def plot(ax,groups, hide = None):
hide = hide or []
ax.clear()
bottom = np.zeros(NBINS)
if 'zz' in groups and not 'zz' in hide:
summed = np.sum(groups['zz'],axis=0)
ax.bar(ctrs,summed, width = 3, facecolor = 'steelblue', label = 'zz')
bottom = summed
if 'higgs' in groups and not 'higgs' in hide:
summed = np.sum(groups['higgs'],axis=0)
ax.bar(ctrs,summed, width = 3, bottom = bottom, facecolor = 'red', label = 'higgs')
if 'data' in groups and not 'data' in hide:
data = np.sum(groups['data'],axis=0)
ax.errorbar(ctrs, data, yerr = np.sqrt(data), marker = 'o', fmt='o', c = 'k')
ax.errorbar(ctrs, data, xerr = 1.5*np.ones_like(data), marker = 'o', fmt='o', c = 'k', markersize = 5, linewidth = 3, label = 'data')
ax.set_xlim(70,181)
ax.set_ylim(0,25)
def reset_plotdata():
global plotdata
plotdata = {
'samples': {},
'lumi_data': {
'mu_stream_2012': 0,
'mu_stream_2011': 0,
'el_stream_2012': 0,
'el_stream_2011': 0
}
}
def init_mpl():
# matplotlib general settings
# plt.rc('text', usetex=True)
# plt.rcParams['text.latex.preamble']=[
# r"\usepackage{amsmath}",
# ]
plt.rcParams["mathtext.default"] = 'regular'
plt.rcParams["mathtext.fontset"] = "stix"
fontP = FontProperties()
fontP.set_size(32)
def plot_binned_data(axes, binedges, data, *args, **kwargs):
# The dataset values are the bin centres
x = (binedges[1:] + binedges[:-1]) / 2.0
# The weights are the y-values of the input binned data
weights = data
return axes.hist(x, bins=binedges, weights=weights,
*args, **kwargs)
def get_legend_handles():
handles = []
handles.append(
(mlines.Line2D([], [], color='k', linestyle='-', marker = 'o', markersize=8, linewidth=3), 'Data')
)
handles.append(
(mpatches.Patch(facecolor='white', edgecolor='red', linewidth=HIST_LINEWIDTH), r'm$_\mathrm{\mathsf{H}}$ = 125 GeV')
)
handles.append(
(mpatches.Patch(facecolor=cyan_m9, edgecolor='black', linewidth=HIST_LINEWIDTH), r'ZZ $\rightarrow$ 4l')
)
handles.append(
(mpatches.Patch(facecolor=green_m5, edgecolor='black', linewidth=HIST_LINEWIDTH), r'Z$\gamma$* + X')
)
handles.append(
(mpatches.Patch(facecolor=gray, edgecolor='black', linewidth=HIST_LINEWIDTH), r'$\mathrm{\mathsf{t}}\bar{\mathrm{\mathsf{t}}}$')
)
return handles
def add_cms_label(ax):
cms_label = 'CMS Open Data'
# cms_label = r'''\textbf{CMS Open Data}'''
# cms_label = r'''\textbf{CMS} \textit{Open Data}'''
ax.text(
X_MIN+(X_MAX-X_MIN)*0.04, Y_MAX-0.08*DELTA_Y,
cms_label,
fontsize=32
)
def label_axes(ax):
# Label axes
ax.set_xlabel(
r'm$_{4\mathrm{\mathsf{l}}}$ (GeV)', fontsize=32,
horizontalalignment='right', x=1.0,
)
# by hand y label, in pyplot 1.4 it aligns properly, here not
ax.set_ylabel(
r'Events / 3 GeV', fontsize=32,
horizontalalignment='right',
y=0.94 #shifts the label down just right
)
def format_axes(ax):
ax.set_xlim(X_MIN, X_MAX)
ax.set_ylim(Y_MIN, Y_MAX)
# Major ticks every 20, minor ticks every 5
major_ticks = np.arange(80, 181, 20)
minor_ticks = np.arange(75, 181, 5)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
ax.xaxis.set_major_formatter(
ticker.FormatStrFormatter("%d")
)
ax.yaxis.set_major_formatter(
ticker.FormatStrFormatter("%d")
)
# might need to replot this later
loc = ticker.MultipleLocator(base=1) # this locator puts ticks at regular intervals
ax.yaxis.set_minor_locator(loc)
ax.tick_params(axis='both', labelsize=29, which='both', direction='in')
def add_legend(handles, ax):
x = lambda vv: [i for i, _ in vv]
y = lambda vv: [i for _, i in vv]
ax.legend(
x(handles),
y(handles),
bbox_to_anchor=(0.52, 0.58, 0.38, 0.38),#, .55, .102),
loc=1,
ncol=1, mode="expand", borderaxespad=0.,
fontsize=27,
frameon=False,
)
def add_lumi(lumi_data, ax):
# Receiving a dict with the following keys: mu_stream_2012, mu_stream_2011, el_stream_2012, el_stream_2011
lumi_2011 = 0.001*(max(lumi_data['mu_stream_2011'], lumi_data['el_stream_2011']))
lumi_2012 = 0.001*(max(lumi_data['mu_stream_2012'], lumi_data['el_stream_2012']))
txt = ax.text(
X_MIN+(X_MAX-X_MIN)*0.01, Y_MAX+0.025*DELTA_Y,
r'%2.1f fb$^{\mathsf{-1}}$ (7 TeV), %2.1f fb$^{\mathsf{-1}}$ (8 TeV)' % (lumi_2011, lumi_2012),
fontsize=29,
horizontalalignment='left'
)
return txt
def add_processed(lumi_data, ax):
# Receiving a dict with the following keys: mu_stream_2012, mu_stream_2011, el_stream_2012, el_stream_2011
lumi_2011 = 0.001*(max(lumi_data['mu_stream_2011'], lumi_data['el_stream_2011']))
lumi_2012 = 0.001*(max(lumi_data['mu_stream_2012'], lumi_data['el_stream_2012']))
txt = ax.text(
X_MIN+(X_MAX-X_MIN)*0.01, Y_MAX+0.025*DELTA_Y,
r'%2.1f fb$^{\mathsf{-1}}$ (7 TeV), %2.1f fb$^{\mathsf{-1}}$ (8 TeV)' % (lumi_2011, lumi_2012),
fontsize=29,
horizontalalignment='left'
)
return txt
def add_timestamp(ax):
tz = pytz.timezone(os.environ.get('CMS_PLOT_TIMEZONE','Europe/Madrid'))
time_now = datetime.datetime.now(tz).time()
# cms_label = r'''\textbf{CMS} \textit{Open Data}'''
ax.text(
X_MIN-(X_MAX-X_MIN)*0.1, Y_MIN-(Y_MAX-Y_MIN)*0.11,
# 0,0,
time_now.strftime('%H:%M:%S'),
fontsize=32
)
def plot_cosmetics(ax, handles, lumidata):
label_axes(ax)
add_cms_label(ax)
format_axes(ax)
add_cms_label(ax)
add_timestamp(ax)
add_legend(handles, ax)
txt = add_lumi(lumidata, ax)
return txt
def new_plot(ax, groups, handles, hide=None):
hide = hide or []
ax.clear()
txt = plot_cosmetics(ax, handles, plotdata['lumi_data'])
bottom = np.zeros(NBINS)
bottom_no_zz = np.zeros(NBINS)
summed = np.zeros(NBINS)
if 'ttbar' in groups and not 'ttbar' in hide:
summed = np.sum(groups['ttbar'], axis=0)
plot_binned_data(ax, edges, summed, histtype="stepfilled", bottom = bottom, facecolor = gray, edgecolor='black', label = 'ttbar', linewidth=HIST_LINEWIDTH)
bottom += summed
bottom_no_zz += summed
if 'dy' in groups and not 'dy' in hide:
summed = np.sum(groups['dy'], axis=0)
plot_binned_data(ax, edges, summed, histtype="stepfilled", bottom = bottom, facecolor = green_m5, edgecolor='black', label = 'dy', linewidth=HIST_LINEWIDTH)
bottom += summed
bottom_no_zz += summed
if 'zz' in groups and not 'zz' in hide:
summed = np.sum(groups['zz'], axis=0)
# ax.bar(ctrs,summed, width = 3, facecolor = cyan_m9, edgecolor='black', label = 'zz')
# plot_binned_data(ax, edges, summed, histtype="stepfilled", bottom = bottom, facecolor = cyan_m9, edgecolor='black', label = 'zz')
# ax.hist(summed, edges, histtype='stepfilled', facecolor = cyan_m9, edgecolor='black', label = 'zz')
bottom += summed
if 'higgs' in groups and not 'higgs' in hide:
summed = np.sum(groups['higgs'], axis=0)
# ax.bar(ctrs,summed, width = 3, bottom = bottom, facecolor = 'red', label = 'higgs')
plot_binned_data(ax, edges, summed, histtype="stepfilled", bottom = bottom, facecolor = 'white', edgecolor='red', label = 'higgs', linewidth=HIST_LINEWIDTH)
# redo zz for overlapping lines
if 'zz' in groups and not 'zz' in hide:
summed = np.sum(groups['zz'],axis=0)
plot_binned_data(ax, edges, summed, histtype="stepfilled", bottom = bottom_no_zz, facecolor = cyan_m9, edgecolor='black', label = 'zz', linewidth=HIST_LINEWIDTH)
if 'data' in groups and not 'data' in hide:
data = np.sum(groups['data'],axis=0)
ax.errorbar(ctrs, data, yerr = np.sqrt(data), marker = 'o', fmt='o', c = 'k')
ax.errorbar(ctrs, data, xerr = 1.5*np.ones_like(data), marker = 'o', fmt='o', c = 'k', markersize = 5, linewidth = 3, label = 'data')
return txt
def update_progress(ax, dict_files_processed):
samples = list(sampledata[key]['id'] for key in sampledata.keys())
y_pos = np.arange(len(samples))
progress = [dict_files_processed[key]/float(sampledata[key]['nfiles'])*100 if key in dict_files_processed else 0. for key in sampledata.keys()]
colours = list(colour_dict[sampledata[key]['group']] for key in sampledata.keys())
ax.barh(y_pos, progress, align='center',
color=colours, ecolor='black')
ax.set_yticks(y_pos)
ax.set_yticklabels(samples)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Jobs done per sample (\%)',
fontsize=32,
)
ax.set_title('Progress', fontsize=32)
ax.tick_params(axis='x', labelsize=29, which='both', direction='out')
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter("%d"))
ax.set_xlim(0, 100)
|
11463543
|
import arcade
from triple_vision import Settings as s
from triple_vision.triple_vision import TripleVision
from triple_vision.views.leaderboard_view import LeaderboardView
class PlayButton(arcade.TextButton):
def __init__(self, view, *args, **kwargs) -> None:
super().__init__(text='', *args, **kwargs)
self.view = view
self.pressed = False
def on_press(self):
self.pressed = True
def on_release(self):
if self.pressed:
self.view.play()
self.pressed = False
class LeaderboardButton(arcade.TextButton):
def __init__(self, view, *args, **kwargs) -> None:
super().__init__(text='', *args, **kwargs)
self.view = view
self.pressed = False
def on_press(self):
self.pressed = True
def on_release(self):
if self.pressed:
self.view.leaderboard()
self.pressed = False
class MainView(arcade.View):
def __init__(self) -> None:
super().__init__()
self.play_button = None
self.leaderboard_button = None
self.game_title = None
self.background = arcade.load_texture('assets/background.png')
def play(self) -> None:
self.window.button_list.clear()
self.window.show_view(TripleVision(self))
def leaderboard(self) -> None:
self.window.button_list.clear()
self.window.show_view(LeaderboardView(self))
def on_show(self) -> None:
play_button_theme = arcade.Theme()
play_button_theme.add_button_textures(
clicked='assets/buttons/play_pressed.png',
normal='assets/buttons/play_released.png'
)
self.play_button = PlayButton(
self,
center_x=s.WINDOW_SIZE[0] / 2,
center_y=s.WINDOW_SIZE[1] / 2 + 50,
width=150,
height=80,
theme=play_button_theme
)
self.window.button_list.append(self.play_button)
leaderboard_button_theme = arcade.Theme()
leaderboard_button_theme.add_button_textures(
clicked='assets/buttons/leaderboard_pressed.png',
normal='assets/buttons/leaderboard_released.png'
)
self.leaderboard_button = LeaderboardButton(
self,
center_x=s.WINDOW_SIZE[0] / 2,
center_y=s.WINDOW_SIZE[1] / 2 - 50,
width=150,
height=80,
theme=leaderboard_button_theme
)
self.window.button_list.append(self.leaderboard_button)
self.game_title = arcade.load_texture(
'assets/title.png'
)
def on_draw(self) -> None:
arcade.start_render()
arcade.draw_lrwh_rectangle_textured(
bottom_left_x=0,
bottom_left_y=0,
width=s.WINDOW_SIZE[0],
height=s.WINDOW_SIZE[1],
texture=self.background
)
arcade.draw_lrwh_rectangle_textured(
bottom_left_x=s.WINDOW_SIZE[0] / 2 - 449,
bottom_left_y=s.WINDOW_SIZE[1] / 8 * 7 - 200,
width=898,
height=400,
texture=self.game_title
)
self.play_button.draw()
self.leaderboard_button.draw()
|
11463570
|
def True(): pass
def None(): pass
def False(): pass
def : meta.function.python, source.python, storage.type.function.python
: meta.function.python, source.python
True : keyword.illegal.name.python, meta.function.python, source.python
( : meta.function.parameters.python, meta.function.python, punctuation.definition.parameters.begin.python, source.python
) : meta.function.parameters.python, meta.function.python, punctuation.definition.parameters.end.python, source.python
: : meta.function.python, punctuation.section.function.begin.python, source.python
: source.python
pass : keyword.control.flow.python, source.python
def : meta.function.python, source.python, storage.type.function.python
: meta.function.python, source.python
None : keyword.illegal.name.python, meta.function.python, source.python
( : meta.function.parameters.python, meta.function.python, punctuation.definition.parameters.begin.python, source.python
) : meta.function.parameters.python, meta.function.python, punctuation.definition.parameters.end.python, source.python
: : meta.function.python, punctuation.section.function.begin.python, source.python
: source.python
pass : keyword.control.flow.python, source.python
def : meta.function.python, source.python, storage.type.function.python
: meta.function.python, source.python
False : keyword.illegal.name.python, meta.function.python, source.python
( : meta.function.parameters.python, meta.function.python, punctuation.definition.parameters.begin.python, source.python
) : meta.function.parameters.python, meta.function.python, punctuation.definition.parameters.end.python, source.python
: : meta.function.python, punctuation.section.function.begin.python, source.python
: source.python
pass : keyword.control.flow.python, source.python
|
11463579
|
from prml.nn.image.convolve2d import convolve2d, Convolve2d
from prml.nn.image.deconvolve2d import deconvolve2d, Deconvolve2d
from prml.nn.image.max_pooling2d import max_pooling2d
from prml.nn.image.util import img2patch, patch2img
|
11463586
|
import os
import sys
sys.path.insert(0, '../../')
import time
import glob
import numpy as np
import torch
import nasbench201.utils as ig_utils
import logging
import argparse
import shutil
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import sota.cnn.genotypes as genotypes
from sota.cnn.model import Network
from torch.utils.tensorboard import SummaryWriter
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../../data',
help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='cifar10', help='choose dataset')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=str, default='auto', help='gpu device id')
parser.add_argument('--epochs', type=int, default=600, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--cutout_prob', type=float, default=1.0, help='cutout probability')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--save', type=str, default='exp', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='c100_s4_pgd', help='which architecture to use')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
#### common
parser.add_argument('--resume_epoch', type=int, default=0, help="load ckpt, start training at resume_epoch")
parser.add_argument('--ckpt_interval', type=int, default=50, help="interval (epoch) for saving checkpoints")
parser.add_argument('--resume_expid', type=str, default='', help="full expid to resume from, name == ckpt folder name")
parser.add_argument('--fast', action='store_true', default=False, help="fast mode for debugging")
parser.add_argument('--queue', action='store_true', default=False, help="queueing for gpu")
args = parser.parse_args()
#### args augment
expid = args.save
args.save = '../../experiments/sota/{}/eval/{}-{}-{}'.format(
args.dataset, args.save, args.arch, args.seed)
if args.cutout:
args.save += '-cutout-' + str(args.cutout_length) + '-' + str(args.cutout_prob)
if args.auxiliary:
args.save += '-auxiliary-' + str(args.auxiliary_weight)
#### logging
if args.resume_epoch > 0: # do not delete dir if resume:
args.save = '../../experiments/sota/{}/{}'.format(args.dataset, args.resume_expid)
assert(os.path.exists(args.save), 'resume but {} does not exist!'.format(args.save))
else:
scripts_to_save = glob.glob('*.py')
if os.path.exists(args.save):
if input("WARNING: {} exists, override?[y/n]".format(args.save)) == 'y':
print('proceed to override saving directory')
shutil.rmtree(args.save)
else:
exit(0)
ig_utils.create_exp_dir(args.save, scripts_to_save=scripts_to_save)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
log_file = 'log_resume_{}.txt'.format(args.resume_epoch) if args.resume_epoch > 0 else 'log.txt'
fh = logging.FileHandler(os.path.join(args.save, log_file), mode='w')
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
writer = SummaryWriter(args.save + '/runs')
if args.dataset == 'cifar100':
n_classes = 100
else:
n_classes = 10
def main():
torch.set_num_threads(3)
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
#### gpu queueing
if args.queue:
ig_utils.queue_gpu()
np.random.seed(args.seed)
gpu = ig_utils.pick_gpu_lowest_memory() if args.gpu == 'auto' else int(args.gpu)
torch.cuda.set_device(gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, n_classes, args.layers, args.auxiliary, genotype)
model = model.cuda()
logging.info("param size = %fMB", ig_utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
if args.dataset == 'cifar10':
train_transform, valid_transform = ig_utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
elif args.dataset == 'cifar100':
train_transform, valid_transform = ig_utils._data_transforms_cifar100(args)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform)
elif args.dataset == 'svhn':
train_transform, valid_transform = ig_utils._data_transforms_svhn(args)
train_data = dset.SVHN(root=args.data, split='train', download=True, transform=train_transform)
valid_data = dset.SVHN(root=args.data, split='test', download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs))
#### resume
start_epoch = 0
if args.resume_epoch > 0:
logging.info('loading checkpoint from {}'.format(expid))
filename = os.path.join(args.save, 'checkpoint_{}.pth.tar'.format(args.resume_epoch))
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename, map_location='cpu')
resume_epoch = checkpoint['epoch'] # epoch
model.load_state_dict(checkpoint['state_dict']) # model
scheduler.load_state_dict(checkpoint['scheduler'])
optimizer.load_state_dict(checkpoint['optimizer']) # optimizer
start_epoch = args.resume_epoch
print("=> loaded checkpoint '{}' (epoch {})".format(filename, resume_epoch))
else:
print("=> no checkpoint found at '{}'".format(filename))
#### main training
best_valid_acc = 0
for epoch in range(start_epoch, args.epochs):
lr = scheduler.get_lr()[0]
if args.cutout:
train_transform.transforms[-1].cutout_prob = args.cutout_prob
logging.info('epoch %d lr %e cutout_prob %e', epoch, lr,
train_transform.transforms[-1].cutout_prob)
else:
logging.info('epoch %d lr %e', epoch, lr)
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train(train_queue, model, criterion, optimizer)
logging.info('train_acc %f', train_acc)
writer.add_scalar('Acc/train', train_acc, epoch)
writer.add_scalar('Obj/train', train_obj, epoch)
## scheduler
scheduler.step()
valid_acc, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc %f', valid_acc)
writer.add_scalar('Acc/valid', valid_acc, epoch)
writer.add_scalar('Obj/valid', valid_obj, epoch)
## checkpoint
if (epoch + 1) % args.ckpt_interval == 0:
save_state_dict = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
}
ig_utils.save_checkpoint(save_state_dict, False, args.save, per_epoch=True)
best_valid_acc = max(best_valid_acc, valid_acc)
logging.info('best valid_acc %f', best_valid_acc)
writer.close()
def train(train_queue, model, criterion, optimizer):
objs = ig_utils.AvgrageMeter()
top1 = ig_utils.AvgrageMeter()
top5 = ig_utils.AvgrageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight*loss_aux
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = ig_utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data, n)
top1.update(prec1.data, n)
top5.update(prec5.data, n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
if args.fast:
logging.info('//// WARNING: FAST MODE')
break
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = ig_utils.AvgrageMeter()
top1 = ig_utils.AvgrageMeter()
top5 = ig_utils.AvgrageMeter()
model.eval()
with torch.no_grad():
for step, (input, target) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = ig_utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data, n)
top1.update(prec1.data, n)
top5.update(prec5.data, n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
if args.fast:
logging.info('//// WARNING: FAST MODE')
break
return top1.avg, objs.avg
if __name__ == '__main__':
main()
|
11463591
|
KEYWORDS = ['cgi', ]
def rules(head='',context='',ip='',port='',productname={},keywords='',hackinfo=''):
if 'cgi-bin' in hackinfo or 'cgi-bin' in context:
return True
else:
return False
|
11463628
|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import requests
class EmailSender(object):
def __init__(self, detail):
self.detail = detail
self.sender = None
self.msg_type = 'email'
def _get_server_connection(self):
email_host = self.detail.get('email_host')
email_port = int(self.detail.get('email_port'))
host_user = self.detail.get('host_user')
password = self.detail.get('password')
self.sender = host_user
try:
smtp = smtplib.SMTP(email_host, email_port)
smtp.starttls()
smtp.login(host_user, password)
except:
smtp = None
return smtp
def send_msg(self, msg, **kwargs):
smtp_server = self._get_server_connection()
if not smtp_server:
raise ValueError("The connection to the server of email sender failed")
msg_obj = MIMEMultipart()
content_body = MIMEText(msg)
send_to = kwargs.get('send_to', '')
subject = kwargs.get('subject', '')
source = kwargs.get('from', '')
copy_to = kwargs.get('copy_to', '')
reply_to = kwargs.get('reply_to')
if not send_to:
raise ValueError('The email is not valid in the email sender')
if not subject:
raise ValueError('The subject is not valid in the email sender')
if not isinstance(send_to, list):
send_to = [send_to, ]
if not isinstance(copy_to, list):
copy_to = [copy_to, ]
if not source:
source = self.sender
msg_obj['Subject'] = subject
msg_obj['From'] = source
msg_obj['To'] = ",".join(send_to)
msg_obj['Cc'] = copy_to and ",".join(copy_to) or ""
msg_obj['Reply-to'] = reply_to
msg_obj.attach(content_body)
recevers = copy_to and send_to + copy_to or send_to
smtp_server.sendmail(self.sender, recevers, msg_obj.as_string())
smtp_server.quit()
class WechatSender(object):
def __init__(self, detail):
self.detail = detail
self.msg_type = 'wechat'
@property
def _headers(self):
return {
"Content-Type": "application/json"
}
def _format_msg(self, msg):
return {
"msgtype": "text",
"text": {
"content": msg,
}
}
def send_msg(self, msg):
webhook_url = self.detail.get('webhook_url')
requests.post(webhook_url, json=self._format_msg(msg), headers=self._headers)
def get_sender_by_account(account):
account_type = account.get('account_type', '')
detail = account.get('detail')
if account_type == 'email':
return EmailSender(detail)
if account_type == 'wechat_robot':
return WechatSender(detail)
|
11463642
|
import collections
from arekit.common.evaluation.cmp_opinions import OpinionCollectionsToCompare
class OpinionCollectionsToCompareUtils:
def __init__(self):
pass
@staticmethod
def iter_comparable_collections(doc_ids,
read_result_collection_func,
read_etalon_collection_func):
assert(isinstance(doc_ids, collections.Iterable))
for doc_id in doc_ids:
yield OpinionCollectionsToCompare(doc_id=doc_id,
read_result_collection_func=read_result_collection_func,
read_etalon_collection_func=read_etalon_collection_func)
|
11463674
|
PARSING_SCHEME = {
'name': 'a',
'conference': 'td[data-stat="conf_abbr"] a',
'games': 'td[data-stat="g"]:first',
'wins': 'td[data-stat="wins"]:first',
'losses': 'td[data-stat="losses"]:first',
'win_percentage': 'td[data-stat="win_loss_pct"]:first',
'conference_wins': 'td[data-stat="wins_conf"]:first',
'conference_losses': 'td[data-stat="losses_conf"]:first',
'conference_win_percentage': 'td[data-stat="win_loss_pct_conf"]:first',
'points_per_game': 'td[data-stat="points_per_g"]:first',
'points_against_per_game': 'td[data-stat="opp_points_per_g"]:first',
'simple_rating_system': 'td[data-stat="srs"]:first',
'strength_of_schedule': 'td[data-stat="sos"]:first',
'current_rank': 'td[data-stat="rank_current"]:first',
'preseason_rank': 'td[data-stat="rank_pre"]:first',
'highest_rank': 'td[data-stat="rank_min"]:first',
'pass_completions': 'td[data-stat="pass_cmp"]:first',
'opponents_pass_completions': 'td[data-stat="opp_pass_cmp"]:first',
'pass_attempts': 'td[data-stat="pass_att"]:first',
'opponents_pass_attempts': 'td[data-stat="opp_pass_att"]:first',
'pass_completion_percentage': 'td[data-stat="pass_cmp_pct"]:first',
'opponents_pass_completion_percentage':
'td[data-stat="opp_pass_cmp_pct"]:first',
'pass_yards': 'td[data-stat="pass_yds"]:first',
'opponents_pass_yards': 'td[data-stat="opp_pass_yds"]:first',
'interceptions': 'td[data-stat="pass_int"]:first',
'opponents_interceptions': 'td[data-stat="opp_pass_int"]:first',
'pass_touchdowns': 'td[data-stat="pass_td"]:first',
'opponents_pass_touchdowns': 'td[data-stat="opp_pass_td"]:first',
'rush_attempts': 'td[data-stat="rush_att"]:first',
'opponents_rush_attempts': 'td[data-stat="opp_rush_att"]:first',
'rush_yards': 'td[data-stat="rush_yds"]:first',
'opponents_rush_yards': 'td[data-stat="opp_rush_yds"]:first',
'rush_yards_per_attempt': 'td[data-stat="rush_yds_per_att"]:first',
'opponents_rush_yards_per_attempt':
'td[data-stat="opp_rush_yds_per_att"]:first',
'rush_touchdowns': 'td[data-stat="rush_td"]:first',
'opponents_rush_touchdowns': 'td[data-stat="opp_rush_td"]:first',
'plays': 'td[data-stat="tot_plays"]:first',
'opponents_plays': 'td[data-stat="opp_tot_plays"]:first',
'yards': 'td[data-stat="tot_yds"]:first',
'opponents_yards': 'td[data-stat="opp_tot_yds"]:first',
'turnovers': 'td[data-stat="turnovers"]:first',
'opponents_turnovers': 'td[data-stat="opp_turnovers"]:first',
'fumbles_lost': 'td[data-stat="fumbles_lost"]:first',
'opponents_fumbles_lost': 'td[data-stat="opp_fumbles_lost"]:first',
'yards_per_play': 'td[data-stat="tot_yds_per_play"]:first',
'opponents_yards_per_play': 'td[data-stat="opp_tot_yds_per_play"]:first',
'pass_first_downs': 'td[data-stat="first_down_pass"]:first',
'opponents_pass_first_downs': 'td[data-stat="opp_first_down_pass"]:first',
'rush_first_downs': 'td[data-stat="first_down_rush"]:first',
'opponents_rush_first_downs': 'td[data-stat="opp_first_down_rush"]:first',
'first_downs_from_penalties': 'td[data-stat="first_down_penalty"]:first',
'opponents_first_downs_from_penalties':
'td[data-stat="opp_first_down_penalty"]:first',
'first_downs': 'td[data-stat="first_down"]:first',
'opponents_first_downs': 'td[data-stat="opp_first_down"]:first',
'penalties': 'td[data-stat="penalty"]:first',
'opponents_penalties': 'td[data-stat="opp_penalty"]:first',
'yards_from_penalties': 'td[data-stat="penalty_yds"]:first',
'opponents_yards_from_penalties': 'td[data-stat="opp_penalty_yds"]:first'
}
SCHEDULE_SCHEME = {
'game': 'th[data-stat="g"]:first',
'date': 'td[data-stat="date_game"]:first',
'time': 'td[data-stat="time_game"]:first',
'day_of_week': 'td[data-stat="day_name"]:first',
'location': 'td[data-stat="game_location"]:first',
'rank': 'td[data-stat="school_name"]:first',
'opponent_rank': 'td[data-stat="opp_name"]:first',
'opponent_name': 'td[data-stat="opp_name"]:first',
'opponent_abbr': 'td[data-stat="opp_name"]:first',
'opponent_conference': 'td[data-stat="conf_abbr"]:first',
'result': 'td[data-stat="game_result"]:first',
'points_for': 'td[data-stat="points"]:first',
'points_against': 'td[data-stat="opp_points"]:first',
'wins': 'td[data-stat="wins"]:first',
'losses': 'td[data-stat="losses"]:first',
'streak': 'td[data-stat="game_streak"]:first'
}
BOXSCORE_SCHEME = {
'date': 'div[class="scorebox_meta"]:first',
'time': 'div[class="scorebox_meta"]:first',
'stadium': 'div[class="scorebox_meta"]:first',
'attendance': 'div[class="scorebox_meta"]:first',
'duration': 'div[class="scorebox_meta"]:first',
'home_name': 'a[itemprop="name"]:first',
'away_name': 'a[itemprop="name"]:last',
'away_points': 'div[class="scorebox"] div[class="score"]',
'away_first_downs': 'td[data-stat="vis_stat"]',
'away_rush_attempts': 'td[data-stat="vis_stat"]',
'away_rush_yards': 'td[data-stat="vis_stat"]',
'away_rush_touchdowns': 'td[data-stat="vis_stat"]',
'away_pass_completions': 'td[data-stat="vis_stat"]',
'away_pass_attempts': 'td[data-stat="vis_stat"]',
'away_pass_yards': 'td[data-stat="vis_stat"]',
'away_pass_touchdowns': 'td[data-stat="vis_stat"]',
'away_interceptions': 'td[data-stat="vis_stat"]',
'away_times_sacked': 'td[data-stat="vis_stat"]',
'away_yards_lost_from_sacks': 'td[data-stat="vis_stat"]',
'away_net_pass_yards': 'td[data-stat="vis_stat"]',
'away_total_yards': 'td[data-stat="vis_stat"]',
'away_fumbles': 'td[data-stat="vis_stat"]',
'away_fumbles_lost': 'td[data-stat="vis_stat"]',
'away_turnovers': 'td[data-stat="vis_stat"]',
'away_penalties': 'td[data-stat="vis_stat"]',
'away_yards_from_penalties': 'td[data-stat="vis_stat"]',
'away_third_down_conversions': 'td[data-stat="vis_stat"]',
'away_third_down_attempts': 'td[data-stat="vis_stat"]',
'away_fourth_down_conversions': 'td[data-stat="vis_stat"]',
'away_fourth_down_attempts': 'td[data-stat="vis_stat"]',
'away_time_of_possession': 'td[data-stat="vis_stat"]',
'home_points': 'div[class="scorebox"] div[class="score"]',
'home_first_downs': 'td[data-stat="home_stat"]',
'home_rush_attempts': 'td[data-stat="home_stat"]',
'home_rush_yards': 'td[data-stat="home_stat"]',
'home_rush_touchdowns': 'td[data-stat="home_stat"]',
'home_pass_completions': 'td[data-stat="home_stat"]',
'home_pass_attempts': 'td[data-stat="home_stat"]',
'home_pass_yards': 'td[data-stat="home_stat"]',
'home_pass_touchdowns': 'td[data-stat="home_stat"]',
'home_interceptions': 'td[data-stat="home_stat"]',
'home_times_sacked': 'td[data-stat="home_stat"]',
'home_yards_lost_from_sacks': 'td[data-stat="home_stat"]',
'home_net_pass_yards': 'td[data-stat="home_stat"]',
'home_total_yards': 'td[data-stat="home_stat"]',
'home_fumbles': 'td[data-stat="home_stat"]',
'home_fumbles_lost': 'td[data-stat="home_stat"]',
'home_turnovers': 'td[data-stat="home_stat"]',
'home_penalties': 'td[data-stat="home_stat"]',
'home_yards_from_penalties': 'td[data-stat="home_stat"]',
'home_third_down_conversions': 'td[data-stat="home_stat"]',
'home_third_down_attempts': 'td[data-stat="home_stat"]',
'home_fourth_down_conversions': 'td[data-stat="home_stat"]',
'home_fourth_down_attempts': 'td[data-stat="home_stat"]',
'home_time_of_possession': 'td[data-stat="home_stat"]'
}
BOXSCORE_SCHEME = {
'date': 'div[class="scorebox_meta"]:first',
'time': 'div[class="scorebox_meta"]:first',
'stadium': 'div[class="scorebox_meta"]:first',
'summary': 'table[class="linescore nohover stats_table no_freeze"]:first',
'home_name': 'a[itemprop="name"]:last',
'away_name': 'a[itemprop="name"]:first',
'away_points': 'div[class="scorebox"] div[class="score"]',
'away_first_downs': 'td[data-stat="vis_stat"]',
'away_rush_attempts': 'td[data-stat="vis_stat"]',
'away_rush_yards': 'td[data-stat="vis_stat"]',
'away_rush_touchdowns': 'td[data-stat="vis_stat"]',
'away_pass_completions': 'td[data-stat="vis_stat"]',
'away_pass_attempts': 'td[data-stat="vis_stat"]',
'away_pass_yards': 'td[data-stat="vis_stat"]',
'away_pass_touchdowns': 'td[data-stat="vis_stat"]',
'away_interceptions': 'td[data-stat="vis_stat"]',
'away_total_yards': 'td[data-stat="vis_stat"]',
'away_fumbles': 'td[data-stat="vis_stat"]',
'away_fumbles_lost': 'td[data-stat="vis_stat"]',
'away_turnovers': 'td[data-stat="vis_stat"]',
'away_penalties': 'td[data-stat="vis_stat"]',
'away_yards_from_penalties': 'td[data-stat="vis_stat"]',
'home_points': 'div[class="scorebox"] div[class="score"]',
'home_first_downs': 'td[data-stat="home_stat"]',
'home_rush_attempts': 'td[data-stat="home_stat"]',
'home_rush_yards': 'td[data-stat="home_stat"]',
'home_rush_touchdowns': 'td[data-stat="home_stat"]',
'home_pass_completions': 'td[data-stat="home_stat"]',
'home_pass_attempts': 'td[data-stat="home_stat"]',
'home_pass_yards': 'td[data-stat="home_stat"]',
'home_pass_touchdowns': 'td[data-stat="home_stat"]',
'home_interceptions': 'td[data-stat="home_stat"]',
'home_total_yards': 'td[data-stat="home_stat"]',
'home_fumbles': 'td[data-stat="home_stat"]',
'home_fumbles_lost': 'td[data-stat="home_stat"]',
'home_turnovers': 'td[data-stat="home_stat"]',
'home_penalties': 'td[data-stat="home_stat"]',
'home_yards_from_penalties': 'td[data-stat="home_stat"]',
}
BOXSCORE_ELEMENT_INDEX = {
'date': 0,
'time': 1,
'stadium': 2,
'away_points': 0,
'away_first_downs': 0,
'away_rush_attempts': 1,
'away_rush_yards': 1,
'away_rush_touchdowns': 1,
'away_pass_completions': 2,
'away_pass_attempts': 2,
'away_pass_yards': 2,
'away_pass_touchdowns': 2,
'away_interceptions': 2,
'away_total_yards': 3,
'away_fumbles': 4,
'away_fumbles_lost': 4,
'away_turnovers': 5,
'away_penalties': 6,
'away_yards_from_penalties': 6,
'home_points': 1,
'home_first_downs': 0,
'home_rush_attempts': 1,
'home_rush_yards': 1,
'home_rush_touchdowns': 1,
'home_pass_completions': 2,
'home_pass_attempts': 2,
'home_pass_yards': 2,
'home_pass_touchdowns': 2,
'home_interceptions': 2,
'home_total_yards': 3,
'home_fumbles': 4,
'home_fumbles_lost': 4,
'home_turnovers': 5,
'home_penalties': 6,
'home_yards_from_penalties': 6
}
# Designates the index of the item within the requested tag
BOXSCORE_ELEMENT_SUB_INDEX = {
'away_rush_attempts': 0,
'away_rush_yards': 1,
'away_rush_touchdowns': 2,
'away_pass_completions': 0,
'away_pass_attempts': 1,
'away_pass_yards': 2,
'away_pass_touchdowns': 3,
'away_interceptions': 4,
'away_fumbles': 0,
'away_fumbles_lost': 1,
'away_penalties': 0,
'away_yards_from_penalties': 1,
'home_rush_attempts': 0,
'home_rush_yards': 1,
'home_rush_touchdowns': 2,
'home_pass_completions': 0,
'home_pass_attempts': 1,
'home_pass_yards': 2,
'home_pass_touchdowns': 3,
'home_interceptions': 4,
'home_fumbles': 0,
'home_fumbles_lost': 1,
'home_penalties': 0,
'home_yards_from_penalties': 1
}
PLAYER_SCHEME = {
'summary': '[data-template="Partials/Teams/Summary"]',
'season': 'th[data-stat="year_id"]',
'name': 'h1[itemprop="name"]',
'team_abbreviation': 'td[data-stat="school_name"]',
'position': 'td[data-stat="pos"]',
'height': 'span[itemprop="height"]',
'weight': 'span[itemprop="weight"]',
'year': 'td[data-stat="class"]',
'games': 'td[data-stat="g"]',
'completed_passes': 'td[data-stat="pass_cmp"]',
'pass_attempts': 'td[data-stat="pass_att"]',
'passing_completion': 'td[data-stat="pass_cmp_pct"]',
'passing_touchdowns': 'td[data-stat="pass_td"]',
'interceptions_thrown': 'td[data-stat="pass_int"]',
'passing_yards_per_attempt': 'td[data-stat="pass_yds_per_att"]',
'adjusted_yards_per_attempt': 'td[data-stat="adj_pass_yds_per_att"]',
'quarterback_rating': 'td[data-stat="pass_rating"]',
'rush_attempts': 'td[data-stat="rush_att"]',
'rush_yards': 'td[data-stat="rush_yds"]',
'rush_yards_per_attempt': 'td[data-stat="rush_yds_per_att"]',
'rush_touchdowns': 'td[data-stat="rush_td"]',
'receptions': 'td[data-stat="rec"]',
'receiving_yards': 'td[data-stat="rec_yds"]',
'receiving_yards_per_reception': 'td[data-stat="rec_yds_per_rec"]',
'receiving_touchdowns': 'td[data-stat="rec_td"]',
'plays_from_scrimmage': 'td[data-stat="scrim_att"]',
'yards_from_scrimmage': 'td[data-stat="scrim_yds"]',
'yards_from_scrimmage_per_play': 'td[data-stat="scrim_yds_per_att"]',
'rushing_and_receiving_touchdowns': 'td[data-stat="scrim_td"]',
'solo_tackles': 'td[data-stat="tackles_solo"]',
'assists_on_tackles': 'td[data-stat="tackles_assists"]',
'total_tackles': 'td[data-stat="tackles_total"]',
'tackles_for_loss': 'td[data-stat="tackles_loss"]',
'sacks': 'td[data-stat="sacks"]',
'interceptions': 'td[data-stat="def_int"]',
'yards_returned_from_interceptions': 'td[data-stat="def_int_yds"]',
'yards_returned_per_interception': 'td[data-stat="def_int_yds_per_int"]',
'interceptions_returned_for_touchdown': 'td[data-stat="def_int_td"]',
'passes_defended': 'td[data-stat="pass_defended"]',
'fumbles_recovered': 'td[data-stat="fumbles_rec"]',
'yards_recovered_from_fumble': 'td[data-stat="fumbles_rec_yds"]',
'fumbles_recovered_for_touchdown': 'td[data-stat="fumbles_rec_td"]',
'fumbles_forced': 'td[data-stat="fumbles_forced"]',
'punt_return_touchdowns': 'td[data-stat="td_punt_ret"]',
'kickoff_return_touchdowns': 'td[data-stat="td_kick_ret"]',
'other_touchdowns': 'td[data-stat="td_other"]',
'total_touchdowns': 'td[data-stat="td_total"]',
'extra_points_made': 'td[data-stat="xpm"]',
'field_goals_made': 'td[data-stat="fgm"]',
'two_point_conversions': 'td[data-stat="two_pt_md"]',
'safeties': 'td[data-stat="safety_md"]',
'points': 'td[data-stat="points"]',
'passing_yards': 'td[data-stat="pass_yds"]',
'pass_yards_per_attempt': 'td[data-stat="pass_yds_per_att"]',
'kickoff_returns': 'td[data-stat="kick_ret"]',
'kickoff_return_yards': 'td[data-stat="kick_ret_yds"]',
'average_kickoff_return_yards': 'td[data-stat="kick_ret_yds_per_ret"]',
'punt_returns': 'td[data-stat="punt_ret"]',
'punt_return_yards': 'td[data-stat="punt_ret_yds"]',
'average_punt_return_yards': 'td[data-stat="punt_ret_yds"]',
'extra_points_attempted': 'td[data-stat="xpa"]',
'extra_point_percentage': 'td[data-stat="xp_pct"]',
'field_goals_attempted': 'td[data-stat="fga"]',
'field_goal_percentage': 'td[data-stat="fg_pct"]',
'points_kicking': 'td[data-stat="kick_points"]',
'punts': 'td[data-stat="punt"]',
'punting_yards': 'td[data-stat="punt_yds"]',
'punting_yards_per_attempt': 'td[data-stat="punt_yds_per_punt"]'
}
BOXSCORE_RETRY = {
'kickoff_return_touchdowns': 'td[data-stat="kick_ret_td"]',
'punt_return_touchdowns': 'td[data-stat="punt_ret_td"]'
}
RANKINGS_SCHEME = {
'name': 'td[data-stat="school_name"]',
'week': 'th[data-stat="week_poll"]',
'date': 'td[data-stat="date_poll"]',
'rank': 'td[data-stat="rank"]',
'previous': 'td[data-stat="rank_prev"]',
'change': 'td[data-stat="rank_diff"]'
}
SEASON_PAGE_URL = 'http://www.sports-reference.com/cfb/years/%s-standings.html'
OFFENSIVE_STATS_URL = ('https://www.sports-reference.com/cfb/years/'
'%s-team-offense.html')
DEFENSIVE_STATS_URL = ('https://www.sports-reference.com/cfb/years/'
'%s-team-defense.html')
SCHEDULE_URL = ('https://www.sports-reference.com/cfb/schools/%s/'
'%s-schedule.html')
BOXSCORE_URL = 'https://www.sports-reference.com/cfb/boxscores/%s.html'
BOXSCORES_URL = ('https://www.sports-reference.com/cfb/boxscores/index.cgi'
'?month=%s&day=%s&year=%s&conf_id=')
CONFERENCES_URL = 'https://www.sports-reference.com/cfb/years/%s.html'
CONFERENCE_URL = 'https://www.sports-reference.com/cfb/conferences/%s/%s.html'
CFP_RANKINGS_URL = 'https://www.sports-reference.com/cfb/years/%s-polls.html'
RANKINGS_URL = 'https://www.sports-reference.com/cfb/years/%s-polls.html'
PLAYER_URL = 'https://www.sports-reference.com/cfb/players/%s.html'
ROSTER_URL = 'https://www.sports-reference.com/cfb/schools/%s/%s-roster.html'
|
11463683
|
from flask import Flask, session
import meetyourmappers
app = Flask(__name__)
app.config.from_object("meetyourmappers.config")
try:
app.config.from_object("meetyourmappers.config_local")
except:
pass
import meetyourmappers.views
|
11463704
|
from . import time
from . import grid
from .interp import interpNan
import numpy as np
def index2d(ind, ny, nx):
iy = np.floor(ind / nx)
ix = np.floor(ind % nx)
return int(iy), int(ix)
def fillNan(mat, mask):
temp = mat.copy()
temp[~mask] = np.nan
return temp
|
11463711
|
import os
from pathlib import Path
from unittest import mock
import click
import pytest
from cumulusci.cli.tests.utils import recursive_list_files, run_click_command
from cumulusci.core.dependencies.dependencies import PackageNamespaceVersionDependency
from cumulusci.core.exceptions import NotInProject
from cumulusci.utils import temporary_dir
from .. import project
from ..runtime import CliRuntime
class TestProjectCommands:
def test_validate_project_name(self):
with pytest.raises(click.UsageError):
project.validate_project_name("with spaces")
def test_validate_project_name__valid(self):
assert project.validate_project_name("valid") == "valid"
@mock.patch("cumulusci.cli.project.click")
def test_project_init(self, click):
with temporary_dir():
os.mkdir(".git")
Path(".git", "HEAD").write_text("ref: refs/heads/main")
click.prompt.side_effect = (
"testproj", # project_name
"testpkg", # package_name
"testns", # package_namespace
"43.0", # api_version
"mdapi", # source_format
"3", # extend other URL
"https://github.com/SalesforceFoundation/NPSP", # github_url
"main", # git_default_branch
"work/", # git_prefix_feature
"uat/", # git_prefix_beta
"rel/", # git_prefix_release
"%_TEST%", # test_name_match
"90", # code_coverage
)
click.confirm.side_effect = (
True,
True,
True,
) # is managed? extending? enforce Apex coverage?
runtime = CliRuntime(
config={"project": {"test": {"name_match": "%_TEST%"}}},
load_keychain=False,
)
run_click_command(project.project_init, runtime=runtime)
# Make sure expected files/dirs were created
assert [
".git/",
".git/HEAD",
".github/",
".github/PULL_REQUEST_TEMPLATE.md",
".gitignore",
"README.md",
"cumulusci.yml",
"datasets/",
"datasets/mapping.yml",
"orgs/",
"orgs/beta.json",
"orgs/dev.json",
"orgs/feature.json",
"orgs/release.json",
"robot/",
"robot/testproj/",
"robot/testproj/doc/",
"robot/testproj/resources/",
"robot/testproj/tests/",
"robot/testproj/tests/create_contact.robot",
"sfdx-project.json",
"src/",
] == recursive_list_files()
@mock.patch("cumulusci.cli.project.click")
def test_project_init_tasks(self, click):
"""Verify that the generated cumulusci.yml file is readable and has the proper robot task"""
with temporary_dir():
os.mkdir(".git")
Path(".git", "HEAD").write_text("ref: refs/heads/main")
click.prompt.side_effect = (
"testproj", # project_name
"testpkg", # package_name
"testns", # package_namespace
"43.0", # api_version
"mdapi", # source_format
"3", # extend other URL
"https://github.com/SalesforceFoundation/NPSP", # github_url
"main", # git_default_branch
"work/", # git_prefix_feature
"uat/", # git_prefix_beta
"rel/", # git_prefix_release
"%_TEST%", # test_name_match
"90", # code_coverage
)
click.confirm.side_effect = (
True,
True,
True,
) # is managed? extending? enforce code coverage?
run_click_command(project.project_init)
# verify we can load the generated yml
cli_runtime = CliRuntime(load_keychain=False)
# ...and verify it has the expected tasks
config = cli_runtime.project_config.config_project
expected_tasks = {
"robot": {
"options": {
"suites": "robot/testproj/tests",
"options": {"outputdir": "robot/testproj/results"},
}
},
"robot_testdoc": {
"options": {
"path": "robot/testproj/tests",
"output": "robot/testproj/doc/testproj_tests.html",
}
},
"run_tests": {"options": {"required_org_code_coverage_percent": 90}},
}
assert config["tasks"] == expected_tasks
def test_project_init_no_git(self):
with temporary_dir():
with pytest.raises(click.ClickException):
run_click_command(project.project_init)
def test_project_init_already_initted(self):
with temporary_dir():
os.mkdir(".git")
Path(".git", "HEAD").write_text("ref: refs/heads/main")
with open("cumulusci.yml", "w"):
pass # create empty file
with pytest.raises(click.ClickException):
run_click_command(project.project_init)
def test_project_init_dont_overwrite(self):
with temporary_dir():
# Gotta have a Repo
os.mkdir(".git")
Path(".git", "HEAD").write_text("ref: refs/heads/main")
os.mkdir("orgs")
orgs = "orgs/"
text = "Can't touch this"
path_list = [
Path("README.md"),
Path(".gitignore"),
Path(orgs + "dev.json"),
Path(orgs + "release.json"),
]
for path in path_list:
path.write_text(text)
runtime = mock.Mock()
runtime.project_config.project = {"test": "test"}
run_click_command(project.project_info, runtime=runtime)
# Project init must not overwrite project files or org defs
for path in path_list:
assert text == path.read_text()
@mock.patch("click.echo")
def test_project_info(self, echo):
runtime = mock.Mock()
runtime.project_config.project = {"test": "test"}
run_click_command(project.project_info, runtime=runtime)
echo.assert_called_once_with("\x1b[1mtest:\x1b[0m test")
def test_project_info__outside_project(self):
runtime = mock.Mock()
runtime.project_config = None
runtime.project_config_error = NotInProject()
with temporary_dir():
with pytest.raises(NotInProject):
run_click_command(project.project_info, runtime=runtime)
@mock.patch("cumulusci.cli.project.get_static_dependencies")
def test_project_dependencies(self, get_static_dependencies):
out = []
runtime = mock.Mock()
runtime.project_config.project__dependencies = [
{"namespace": "npe01", "version": "3.16"},
{"namespace": "npsp", "version": "3.193"},
]
get_static_dependencies.return_value = [
PackageNamespaceVersionDependency(namespace="npe01", version="3.16"),
PackageNamespaceVersionDependency(namespace="npsp", version="3.193"),
]
with mock.patch("click.echo", out.append):
run_click_command(
project.project_dependencies,
runtime=runtime,
resolution_strategy="production",
)
assert out == [
str(PackageNamespaceVersionDependency(namespace="npe01", version="3.16")),
str(PackageNamespaceVersionDependency(namespace="npsp", version="3.193")),
]
def test_render_recursive(self):
out = []
with mock.patch("click.echo", out.append):
project.render_recursive(
{"test": [{"list": ["list"], "dict": {"key": "value"}, "str": "str"}]}
)
assert """\x1b[1mtest:\x1b[0m
-
\x1b[1mlist:\x1b[0m
- list
\x1b[1mdict:\x1b[0m
\x1b[1mkey:\x1b[0m value
\x1b[1mstr:\x1b[0m str""" == "\n".join(
out
)
|
11463738
|
import pandas as pd
import numpy as np
import scipy as sp
import os
import errno
from sklearn.decomposition import PCA
import umap.distances as dist
from sklearn.utils.extmath import svd_flip
from sklearn.utils import check_array, check_random_state
from scipy import sparse
import sklearn.utils.sparsefuncs as sf
from umap.umap_ import nearest_neighbors
__version__ = "0.8.7"
def find_corr_genes(sam, input_gene):
"""Rank genes by their spatially averaged expression pattern correlations to
a desired gene.
Parameters
----------
sam - SAM
The analyzed SAM object
input_gene - string
The gene ID with respect to which correlations will be computed.
Returns
-------
A ranked list of gene IDs based on correlation to the input gene.
"""
all_gene_names = np.array(list(sam.adata.var_names))
D_avg = sam.adata.layers["X_knn_avg"]
input_gene = np.where(all_gene_names == input_gene)[0]
if input_gene.size == 0:
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive."
)
return
pw_corr = generate_correlation_map(D_avg.T.A, D_avg[:, input_gene].T.A)
return all_gene_names[np.argsort(-pw_corr.flatten())]
def _pca_with_sparse(X, npcs, solver='arpack', mu=None, seed=0):
random_state = check_random_state(seed)
np.random.set_state(random_state.get_state())
random_init = np.random.rand(np.min(X.shape))
X = check_array(X, accept_sparse=['csr', 'csc'])
if mu is None:
mu = X.mean(0).A.flatten()[None, :]
mdot = mu.dot
mmat = mdot
mhdot = mu.T.dot
mhmat = mu.T.dot
Xdot = X.dot
Xmat = Xdot
XHdot = X.T.conj().dot
XHmat = XHdot
ones = np.ones(X.shape[0])[None, :].dot
def matvec(x):
return Xdot(x) - mdot(x)
def matmat(x):
return Xmat(x) - mmat(x)
def rmatvec(x):
return XHdot(x) - mhdot(ones(x))
def rmatmat(x):
return XHmat(x) - mhmat(ones(x))
XL = sp.sparse.linalg.LinearOperator(
matvec=matvec,
dtype=X.dtype,
matmat=matmat,
shape=X.shape,
rmatvec=rmatvec,
rmatmat=rmatmat,
)
u, s, v = sp.sparse.linalg.svds(XL, solver=solver, k=npcs, v0=random_init)
u, v = svd_flip(u, v)
idx = np.argsort(-s)
v = v[idx, :]
X_pca = (u * s)[:, idx]
ev = s[idx] ** 2 / (X.shape[0] - 1)
total_var = sf.mean_variance_axis(X, axis=0)[1].sum()
ev_ratio = ev / total_var
output = {
'X_pca': X_pca,
'variance': ev,
'variance_ratio': ev_ratio,
'components': v,
}
return output
def nearest_neighbors_wrapper(X,n_neighbors=15,metric='correlation',metric_kwds={},angular=True,random_state=0):
random_state=np.random.RandomState(random_state)
return nearest_neighbors(X,n_neighbors,metric,metric_kwds,angular,random_state)[:2]
def knndist(nnma):
x, y = nnma.nonzero()
data = nnma.data
knn = y.reshape((nnma.shape[0], nnma[0, :].data.size))
val = data.reshape(knn.shape)
return knn, val
def save_figures(filename, fig_IDs=None, **kwargs):
"""
Save figures.
Parameters
----------
filename - str
Name of output file
fig_IDs - int, numpy.array, list, optional, default None
A list of open figure IDs or a figure ID that will be saved to a
pdf/png file respectively.
**kwargs -
Extra keyword arguments passed into 'matplotlib.pyplot.savefig'.
"""
import matplotlib.pyplot as plt
if fig_IDs is not None:
if type(fig_IDs) is list:
savetype = "pdf"
else:
savetype = "png"
else:
savetype = "pdf"
if savetype == "pdf":
from matplotlib.backends.backend_pdf import PdfPages
if len(filename.split(".")) == 1:
filename = filename + ".pdf"
else:
filename = ".".join(filename.split(".")[:-1]) + ".pdf"
pdf = PdfPages(filename)
if fig_IDs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
else:
figs = [plt.figure(n) for n in fig_IDs]
for fig in figs:
fig.savefig(pdf, format="pdf", **kwargs)
pdf.close()
elif savetype == "png":
plt.figure(fig_IDs).savefig(filename, **kwargs)
def weighted_PCA(mat, do_weight=True, npcs=None, solver="auto",seed = 0):
# mat = (mat - np.mean(mat, axis=0))
if do_weight:
if min(mat.shape) >= 10000 and npcs is None:
print(
"More than 10,000 cells. Running with 'npcs' set to < 1000 is"
" recommended."
)
if npcs is None:
ncom = min(mat.shape)
else:
ncom = min((min(mat.shape), npcs))
pca = PCA(svd_solver=solver, n_components=ncom,random_state=check_random_state(seed))
reduced = pca.fit_transform(mat)
scaled_eigenvalues = pca.explained_variance_
scaled_eigenvalues = scaled_eigenvalues / scaled_eigenvalues.max()
reduced_weighted = reduced * scaled_eigenvalues[None, :] ** 0.5
else:
pca = PCA(n_components=npcs, svd_solver=solver,random_state=check_random_state(seed))
reduced = pca.fit_transform(mat)
if reduced.shape[1] == 1:
pca = PCA(n_components=2, svd_solver=solver,random_state=check_random_state(seed))
reduced = pca.fit_transform(mat)
reduced_weighted = reduced
return reduced_weighted, pca
def transform_wPCA(mat, pca):
mat = mat - pca.mean_
reduced = mat.dot(pca.components_.T)
v = pca.explained_variance_ # .var(0)
scaled_eigenvalues = v / v.max()
reduced_weighted = np.array(reduced) * scaled_eigenvalues[None, :] ** 0.5
return reduced_weighted
def search_string(vec, s, case_sensitive=False, invert=False):
vec = np.array(vec)
if isinstance(s,list):
S = s
else:
S = [s]
V=[]; M=[]
for s in S:
m = []
if not case_sensitive:
s = s.lower()
for i in range(len(vec)):
if case_sensitive:
st = vec[i]
else:
st = vec[i].lower()
b = st.find(s)
if not invert and b != -1 or invert and b == -1:
m.append(i)
if len(m) > 0:
V.append(vec[np.array(m)]); M.append(np.array(m))
if len(V)>0:
i = len(V)
if not invert:
V = np.concatenate(V); M = np.concatenate(M);
if i > 1:
ix = np.sort(np.unique(M,return_index=True)[1])
V=V[ix]; M=M[ix];
else:
for i in range(len(V)):
V[i]=list(set(V[i]).intersection(*V))
V = vec[np.in1d(vec,np.unique(np.concatenate(V)))]
M = np.array([np.where(vec==x)[0][0] for x in V])
return V,M
else:
return -1,-1
def distance_matrix_error(dist1, dist2):
s = 0
for k in range(dist1.shape[0]):
s += np.corrcoef(dist1[k, :], dist2[k, :])[0, 1]
return 1 - s / dist1.shape[0]
def generate_euclidean_map(A, B):
a = (A ** 2).sum(1).flatten()
b = (B ** 2).sum(1).flatten()
x = a[:, None] + b[None, :] - 2 * np.dot(A, B.T)
x[x < 0] = 0
return np.sqrt(x)
def generate_correlation_map(x, y):
mu_x = x.mean(1)
mu_y = y.mean(1)
n = x.shape[1]
if n != y.shape[1]:
raise ValueError("x and y must " + "have the same number of timepoints.")
s_x = x.std(1, ddof=n - 1)
s_y = y.std(1, ddof=n - 1)
s_x[s_x == 0] = 1
s_y[s_y == 0] = 1
cov = np.dot(x, y.T) - n * np.dot(mu_x[:, None], mu_y[None, :])
return cov / np.dot(s_x[:, None], s_y[None, :])
def extract_annotation(cn, x, c="_"):
m = []
if x is not None:
for i in range(cn.size):
f = cn[i].split(c)
x = min(len(f) - 1, x)
m.append(f[x])
return np.array(m)
else:
ms = []
ls = []
for i in range(cn.size):
f = cn[i].split(c)
m = []
for x in range(len(f)):
m.append(f[x])
ms.append(m)
ls.append(len(m))
ml = max(ls)
for i in range(len(ms)):
ms[i].extend([""] * (ml - len(ms[i])))
if ml - len(ms[i]) > 0:
ms[i] = np.concatenate(ms[i])
ms = np.vstack(ms)
MS = []
for i in range(ms.shape[1]):
MS.append(ms[:, i])
return MS
def isolate(dt, x1, x2, y1, y2):
return np.where(
np.logical_and(
np.logical_and(dt[:, 0] > x1, dt[:, 0] < x2),
np.logical_and(dt[:, 1] > y1, dt[:, 1] < y2),
)
)[0]
def to_lower(y):
x = y.copy().flatten()
for i in range(x.size):
x[i] = x[i].lower()
return x
def to_upper(y):
x = y.copy().flatten()
for i in range(x.size):
x[i] = x[i].upper()
return x
def create_folder(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def convert_annotations(A):
x = np.unique(A)
y = np.zeros(A.size)
z = 0
for i in x:
y[A == i] = z
z += 1
return y.astype("int")
def nearest_neighbors_hnsw(x,ef=200,M=48,n_neighbors = 100):
import hnswlib
labels = np.arange(x.shape[0])
p = hnswlib.Index(space = 'cosine', dim = x.shape[1])
p.init_index(max_elements = x.shape[0], ef_construction = ef, M = M)
p.add_items(x, labels)
p.set_ef(ef)
idx, dist = p.knn_query(x, k = n_neighbors)
return idx,dist
def calc_nnm(g_weighted, k, distance=None):
if g_weighted.shape[0] > 0:
if distance == 'cosine':
nnm, dists = nearest_neighbors_hnsw(g_weighted, n_neighbors=k)
else:
nnm, dists = nearest_neighbors_wrapper(g_weighted, n_neighbors=k, metric=distance)
EDM = gen_sparse_knn(nnm, dists)
EDM = EDM.tocsr()
return EDM
def compute_distances(A, dm):
if dm == "euclidean":
m = np.dot(A, A.T)
h = np.diag(m)
x = h[:, None] + h[None, :] - 2 * m
x[x < 0] = 0
dist = np.sqrt(x)
elif dm == "correlation":
dist = 1 - np.corrcoef(A)
else:
dist = sp.spatial.distance.squareform(sp.spatial.distance.pdist(A, metric=dm))
return dist
def dist_to_nn(d, K): # , offset = 0):
E = d.copy()
np.fill_diagonal(E, -1)
M = np.max(E) * 2
x = np.argsort(E, axis=1)[:, :K] # offset:K+offset]
E[
np.tile(
np.arange(E.shape[0]).reshape(E.shape[0], -1), (1, x.shape[1])
).flatten(),
x.flatten(),
] = M
E[E < M] = 0
E[E > 0] = 1
return E # ,x
def to_sparse_knn(D1, k):
for i in range(D1.shape[0]):
x = D1.data[D1.indptr[i] : D1.indptr[i + 1]]
idx = np.argsort(x)
if idx.size > k:
x[idx[:-k]] = 0
D1.data[D1.indptr[i] : D1.indptr[i + 1]] = x
D1.eliminate_zeros()
return D1
def gen_sparse_knn(knni, knnd, shape=None):
if shape is None:
shape = (knni.shape[0], knni.shape[0])
D1 = sp.sparse.lil_matrix(shape)
D1[
np.tile(np.arange(knni.shape[0])[:, None], (1, knni.shape[1])).flatten().astype('int32'),
knni.flatten().astype('int32'),
] = knnd.flatten()
D1 = D1.tocsr()
return D1
|
11463742
|
if __name__ == "__main__":
with open("./lnbits/.env") as env_file:
env = {}
for line in env_file.readlines():
if "=" in line:
key, value = line.split("=", 1)
env[key] = value
user = input("Input your user ID: ")
allowed_users = env['LNBITS_ALLOWED_USERS'].strip('"\n')
if not allowed_users:
env['LNBITS_ALLOWED_USERS'] = f'"{user}"\n'
else:
env['LNBITS_ALLOWED_USERS'] = f'"{allowed_users},{user}"\n'
with open("./lnbits/.env", "w") as env_file:
for key, value in env.items():
env_file.write(f'{key}={value}')
print("User added to LNBITS_ALLOWED_USERS!")
|
11463743
|
import tests.hakoblog # noqa: F401
from datetime import datetime
from hakoblog.model.entry import Entry
def test_init():
now = datetime.now(),
entry = Entry(
id=0,
blog_id=1,
title='こんにちは',
body='今日は天気が良いですね。',
created=now,
modified=now,
)
assert entry.id == 0
assert entry.blog_id == 1
assert entry.title == 'こんにちは'
assert entry.body == '今日は天気が良いですね。'
assert entry.created == now
assert entry.modified == now
|
11463761
|
from .Zmod import Zmod, ZmodElement
class FiniteField(Zmod):
"""
Finite Field Class
"""
def __init__(s, p):
"""
Constructor of FiniteField
p should be prime
"""
s.n = s.p = p
s.element_class = ZmodElement
def __str__(s):
return Zmod.__str__(s, "p")
|
11463782
|
from global_utils import print_summary
from options import parse_options
from global_utils import set_global_seed, save_performance, plot_data
import time
from agent_env_params import design_agent_and_env
from multiprocessing import Process
import random
from environment import Environment
from agent import Agent
def run_HAC(FLAGS,env,agent, plot_figure=False, num=0):
from global_utils import save_plot_figure # import here is for mutilprocessing
NUM_EPOCH = FLAGS.num_epochs
SAVE_FREQ = FLAGS.save_freq
# Print task summary
print_summary(FLAGS, env)
if not FLAGS.test:
num_episodes = FLAGS.num_exploration_episodes
else:
num_episodes = FLAGS.num_test_episodes
NUM_EPOCH = 1 # only test 1 epoch
performance_list = []
test_performance_list = []
if FLAGS.curriculum >= 2:
curriculum_epoch = NUM_EPOCH / FLAGS.curriculum
assert curriculum_epoch == int(curriculum_epoch), 'NUM_EPOCH / FLAGS.curriculum should be int'
for epoch in range(1, NUM_EPOCH + 1):
successful_episodes = 0
if not FLAGS.test and FLAGS.curriculum >= 2:
env.set_goal_range(env_params['curriculum_list'][int((epoch - 1) // curriculum_epoch)])
for episode in range(num_episodes):
print("\nEpoch %d, Episode %d" % (epoch, episode))
# Train for an epoch
success = agent.train(env, epoch * num_episodes + episode,test=FLAGS.test)
if success:
print("End Goal Achieved\n")
successful_episodes += 1
# Save agent
if epoch % SAVE_FREQ == 0 and not FLAGS.test and FLAGS.threadings == 1:
agent.save_model(epoch * num_episodes)
success_rate = successful_episodes / num_episodes * 100
print("\nEpoch %d, Success Rate %.2f%%" % (epoch, success_rate))
performance_list.append(success_rate)
if not FLAGS.test:
success_test = 0
if FLAGS.curriculum >= 2:
env.set_goal_range(env_params['curriculum_list'][-1])
print('\ntesting for %d episodes' % (FLAGS.num_test_episodes))
for episode in range(FLAGS.num_test_episodes):
success = agent.train(env, episode, test=True)
success_test += int(success)
success_rate = success_test / FLAGS.num_test_episodes * 100
print('testing accuracy: %.2f%%' % (success_rate))
test_performance_list.append(success_test)
if plot_figure:
save_plot_figure(performance_list)
save_plot_figure(test_performance_list, name='test-performance.jpg')
save_performance(performance_list, test_performance_list, FLAGS=FLAGS, thread_num=num)
if FLAGS.save_experience:
agent.save_experience()
def worker(agent_params, env_params, FLAGS, i):
seed = int(time.time()) + random.randint(0, 100)
set_global_seed(seed)
FLAGS.seed = seed
env = Environment(env_params, FLAGS)
agent = Agent(FLAGS, env, agent_params)
run_HAC(FLAGS, env, agent, plot_figure=False, num=i)
FLAGS = parse_options()
agent_params, env_params = design_agent_and_env(FLAGS)
assert FLAGS.threadings >= 1, "Threadings should be more than 1!"
if FLAGS.threadings == 1:
seed = int(time.time()) + random.randint(0,100)
set_global_seed(seed)
FLAGS.seed = seed
env = Environment(env_params, FLAGS)
agent = Agent(FLAGS, env, agent_params)
run_HAC(FLAGS, env, agent, plot_figure=True)
else:
# parallel run
thread_list = []
for i in range(FLAGS.threadings):
p = Process(target=worker, args=(agent_params, env_params, FLAGS, i))
p.start()
thread_list.append(p)
for p in thread_list:
p.join()
|
11463792
|
import numpy as np
# --------------------------------------------------------------
# Rotation Functions
# --------------------------------------------------------------
def rotxM(theta):
"""Return x rotation matrix"""
theta = theta * np.pi / 180
M = [[1, 0, 0], [0, np.cos(theta), -np.sin(theta)], [0, np.sin(theta), np.cos(theta)]]
return M
def rotyM(theta):
"""Return y rotation matrix"""
theta = theta * np.pi / 180
M = [[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [-np.sin(theta), 0, np.cos(theta)]]
return M
def rotzM(theta):
"""Return z rotation matrix"""
theta = theta * np.pi / 180
M = [[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]
return M
def rotxV(x, theta):
"""Rotate a coordinate in the local x frame"""
M = [[1, 0, 0], [0, np.cos(theta), -np.sin(theta)], [0, np.sin(theta), np.cos(theta)]]
return np.dot(M, x)
def rotyV(x, theta):
"""Rotate a coordinate in the local y frame"""
M = [[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [-np.sin(theta), 0, np.cos(theta)]]
return np.dot(M, x)
def rotzV(x, theta):
"""Roate a coordinate in the local z frame"""
M = [[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]
return np.dot(M, x)
def rotVbyW(V, W, theta):
"""Rotate a vector V, about an axis W by angle theta"""
ux = W[0]
uy = W[1]
uz = W[2]
c = np.cos(theta)
s = np.sin(theta)
if (
np.array(theta).dtype == np.dtype("D")
or np.array(W).dtype == np.dtype("D")
or np.array(V).dtype == np.dtype("D")
):
dtype = "D"
else:
dtype = "d"
R = np.zeros((3, 3), dtype)
R[0, 0] = ux ** 2 + (1 - ux ** 2) * c
R[0, 1] = ux * uy * (1 - c) - uz * s
R[0, 2] = ux * uz * (1 - c) + uy * s
R[1, 0] = ux * uy * (1 - c) + uz * s
R[1, 1] = uy ** 2 + (1 - uy ** 2) * c
R[1, 2] = uy * uz * (1 - c) - ux * s
R[2, 0] = ux * uz * (1 - c) - uy * s
R[2, 1] = uy * uz * (1 - c) + ux * s
R[2, 2] = uz ** 2 + (1 - uz ** 2) * c
return np.dot(R, V)
# --------------------------------------------------------------
# Array Rotation and Flipping Functions
# --------------------------------------------------------------
def rotateCCW(inArray):
"""Rotate the inArray array 90 degrees CCW"""
rows = inArray.shape[0]
cols = inArray.shape[1]
output = np.empty([cols, rows], inArray.dtype)
for row in range(rows):
for col in range(cols):
output[cols - col - 1][row] = inArray[row][col]
return output
def rotateCW(inArray):
"""Rotate the inArray array 90 degrees CW"""
rows = inArray.shape[0]
cols = inArray.shape[1]
output = np.empty([cols, rows], inArray.dtype)
for row in range(rows):
for col in range(cols):
output[col][rows - row - 1] = inArray[row][col]
return output
def reverseRows(inArray):
"""Flip Rows (horizontally)"""
rows = inArray.shape[0]
cols = inArray.shape[1]
output = np.empty([rows, cols], inArray.dtype)
for row in range(rows):
output[row] = inArray[row][::-1].copy()
return output
def reverseCols(inArray):
"""Flip Cols (vertically)"""
rows = inArray.shape[0]
cols = inArray.shape[1]
output = np.empty([rows, cols], inArray.dtype)
for col in range(cols):
output[:, col] = inArray[:, col][::-1].copy()
return output
def orientArray(index, inArray):
"""Take an input array inArray, and rotate/flip according to the index
output from quadOrientation (in orientation.py)"""
if index == 0:
outArray = inArray.copy()
elif index == 1:
outArray = rotateCCW(inArray)
outArray = rotateCCW(outArray)
outArray = reverseRows(outArray)
elif index == 2:
outArray = reverseRows(inArray)
elif index == 3:
outArray = rotateCCW(inArray) # Verified working
outArray = rotateCCW(outArray)
elif index == 4:
outArray = rotateCW(inArray)
outArray = reverseRows(outArray)
elif index == 5:
outArray = rotateCCW(inArray)
elif index == 6:
outArray = rotateCW(inArray)
elif index == 7:
outArray = rotateCCW(inArray)
outArray = reverseRows(outArray)
return outArray
|
11463806
|
class Cache:
def __init__(self, application, store_config=None):
self.application = application
self.drivers = {}
self.store_config = store_config or {}
self.options = {}
def add_driver(self, name, driver):
self.drivers.update({name: driver})
def set_configuration(self, config):
self.store_config = config
return self
def get_driver(self, name=None):
if name is None:
return self.drivers[self.store_config.get("default")]
return self.drivers[name]
def get_store_config(self, name=None):
if name is None or name == "default":
return self.store_config.get(self.store_config.get("default"))
return self.store_config.get(name)
def get_config_options(self, name=None):
if name is None or name == "default":
return self.store_config.get(self.store_config.get("default"))
return self.store_config.get(name)
def store(self, name="default"):
store_config = self.get_config_options(name)
driver = self.get_driver(self.get_config_options(name).get("driver"))
return driver.set_options(store_config)
def add(self, *args, store=None, **kwargs):
return self.store(name=store).add(*args, **kwargs)
def get(self, *args, store=None, **kwargs):
return self.store(name=store).get(*args, **kwargs)
def put(self, *args, store=None, **kwargs):
return self.store(name=store).put(*args, **kwargs)
def has(self, *args, store=None, **kwargs):
return self.store(name=store).has(*args, **kwargs)
def forget(self, *args, store=None, **kwargs):
return self.store(name=store).forget(*args, **kwargs)
def increment(self, *args, store=None, **kwargs):
return self.store(name=store).increment(*args, **kwargs)
def decrement(self, *args, store=None, **kwargs):
return self.store(name=store).decrement(*args, **kwargs)
def flush(self, *args, store=None, **kwargs):
return self.store(name=store).flush(*args, **kwargs)
|
11463809
|
from template import testcase, base_test, trace_list
from pathlib import Path
import bitmath
import csv
class test(base_test):
def generate_trace(self, pattern: str, sz: int, testcase_out_path, repeat, cache_trace: bool) -> Path:
generate_trace = False
if cache_trace:
trace_file_path = self.basedata['out_path'].parent / \
'trace' / 'bandwidth' / pattern / f'{sz}.trace.txt'
if not trace_file_path.exists():
trace_file_path.resolve().parent.mkdir(parents=True, exist_ok=True)
generate_trace = True
else:
trace_file_path = testcase_out_path / 'trace.txt'
generate_trace = True
if generate_trace:
if self.metadata['input']['idle_clk']:
clk = self.metadata['input']['idle_clk']
else:
clk = 0
trace_access_type = 'r' if pattern == 'read' else 'w'
tl = trace_list(0, sz, 'seq', trace_access_type, step=64,
critical_load=False,
repeat_round=repeat,
idle_clk=clk)
with trace_file_path.open('w') as f:
f.writelines(tl)
return trace_file_path
def generate_testcases(self):
for pattern in self.metadata['input']['pattern']:
out_path = self.metadata['out_path'] + '_' + pattern
out_path = self.basedata['out_path'] / out_path
for sz in self.metadata['input']['access_size']:
testcase_out_path = out_path / str(sz)
testcase_out_path.mkdir(parents=True)
dump_path = testcase_out_path / 'vans_dump'
dump_path.mkdir(parents=True)
run_script_lines = ['#!/bin/bash']
repeat = 1
# generate trace file
run_script_lines.append(
f"# trace_list: trace_list(0, {sz}, 'seq', {pattern}, step=64, critical_load=False, repeat_round={repeat})")
trace_file_path = self.generate_trace(
pattern, sz, testcase_out_path, repeat, self.metadata['input']['cache_trace'])
# generate run script file
run_script_lines.append(f"cd $(dirname $0)")
run_script_lines.append(
f"{self.basedata['vans']['dramtrace_bin'].resolve()} \\")
run_script_lines.append(
f"\t -c {self.metadata['local_cfg_path'].resolve()} \\")
run_script_lines.append(
f"\t -t {trace_file_path.resolve()} \\")
run_script_lines.append(f"\t 2>&1 \\")
run_script_lines.append(f"\t > vans_dump/stdout")
run_script_file = testcase_out_path / 'run.sh'
with run_script_file.open('w') as f:
f.write('\n'.join(run_script_lines))
# generate testcase instances
size_str = str(bitmath.Byte(bytes=sz).best_prefix())
info = {}
info['name'] = f"{self.metadata['name']}\t(Pattern: {pattern:>6} |\tSize: {size_str:>14} |\tRepeat: {repeat})"
info['job_id'] = f"bandwidth_{pattern}"
info['access_size'] = sz
info['path'] = testcase_out_path
info['repeat_cnt'] = repeat
info['run_script'] = 'run.sh'
self.testcases.append(testcase(info))
def form_subsection(self, pattern):
rl = []
rl.append(f'### Bandwidth {pattern}')
rl.append('```')
rl.append(f'test:')
rl.append(f' pattern: {pattern}')
rl.append('```')
cl = []
cl.append(f'result_df <- read_data("{self.metadata["result_csv_path"].resolve()}")')
cl.append(f'result_df <- result_df[result_df$job_id == "bandwidth_{pattern}",]')
cl.append(f'\n')
if pattern == 'read':
ref_bw = 7 * 1024 * 1024 * 1024
elif pattern == 'write':
ref_bw = 1.5 * 1024 * 1024 * 1024
else:
assert(False)
cl.append(f'naplot() +')
cl.append(f' geom_line(data = result_df, aes(x = access_size, y = access_size / total_ns * 1e9, color = "result"), size=line_default_size) +')
cl.append(f' hline({ref_bw}) +')
cl.append(f' scale_y_continuous(name = "Bandwidth (Byte/Sec)", trans = "log2", labels = byte_scale) +')
cl.append(f' scale_x_continuous(name = "Access region size (Byte)", trans = "log2", labels = byte_scale)')
rl += self.form_code_region(cl)
rl.append(f'\n')
if self.metadata['input']['draw_counters']:
rl.append(f'#### Counters')
counters = []
with self.metadata['result_csv_path'].open('r') as f:
reader = csv.DictReader(f)
for row in reader:
counters = [x for x in row.keys() if x.startswith('cnt.')]
break
cl = []
cl.append(f'result_df <- read_data("{self.metadata["result_csv_path"].resolve()}")')
cl.append(f'result_df <- result_df[result_df$job_id == "bandwidth_{pattern}",]')
cl.append(f'\n')
for cnt in counters:
cl.append(f'naplot() +')
cl.append(f' geom_line(data = result_df, aes(x = access_size, y = ({cnt} / repeat_cnt) / access_size * 64, color = "{cnt}"), size=line_default_size) +')
cl.append(f' scale_y_continuous(name = "Counter per Access") +')
cl.append(f' scale_x_continuous(name = "Access region size (Byte)", trans = "log2", labels = byte_scale) +')
cl.append(f' ggtitle("{cnt}") +')
cl.append(f' theme(text=element_text(size=20))')
rl += self.form_code_region(cl, 'fig.hold="hold", out.width="30%"')
return rl
def form_report(self):
rl = self.form_report_base()
for pattern in self.metadata['input']['pattern']:
rl += self.form_subsection(pattern)
return rl
|
11463816
|
import numpy as np
class BaseDynamics:
"""Base class for all dynamics processes.
The basic usage is as follows:
>>> ground_truth = nx.read_edgelist("ground_truth.txt")
>>> dynamics_model = Dynamics()
>>> synthetic_TS = dynamics_model.simulate(ground_truth, <some_params>)
>>> # G = Reconstructor().fit(synthetic_TS)
This produces a numpy array of time series data.
"""
def __init__(self):
self.results = {}
def simulate(self, G, L):
r"""Simulate dynamics on a ground truth network.
The results dictionary stores the ground truth network as
`'ground_truth'`.
Parameters
----------
G (nx.Graph)
the input (ground-truth) graph with :math:`N` nodes.
L (int)
the length of the desired time series.
Returns
-------
TS (np.ndarray)
an :math`N \times L` array of synthetic time series data.
"""
N = G.number_of_nodes()
self.results['ground_truth'] = G
self.results['TS'] = np.ones((N, L))
return self.results['TS']
|
11463817
|
import os
import unittest
from brazil_srag import srag
_SOURCE_ID = "abc123"
_SOURCE_URL = "foo.bar"
class BrazilSRAGTest(unittest.TestCase):
def setUp(self):
# Default of 1500 is not enough to show diffs when there is one.
self.maxDiff = 10000
def test_parse(self):
current_dir = os.path.dirname(__file__)
sample_data_file = os.path.join(current_dir, "sample_data.csv")
result = srag.parse_cases(sample_data_file, _SOURCE_ID, _SOURCE_URL)
self.assertCountEqual(list(result), [
{
"caseReference": {"sourceId": _SOURCE_ID, "sourceUrl": _SOURCE_URL},
"location": {
"country": "Brazil",
"administrativeAreaLevel1": "Amazonas",
"administrativeAreaLevel2": "Manaus",
"geoResolution": "Admin2",
"name": "Manaus, Amazonas, Brazil",
"geometry": {
"longitude": -60.25962801,
"latitude": -2.625919383
}
},
"events": [
{
"name": "confirmed",
"dateRange": {
"start": '01/26/2021Z',
"end": '01/26/2021Z'
},
"value": "PCR test"
},
{
"name": "onsetSymptoms",
"dateRange": {
"start": '01/23/2021Z',
"end": '01/23/2021Z'
}
},
{
"name": "hospitalAdmission",
"value": "Yes",
"dateRange": {
"start": '01/26/2021Z',
"end": '01/26/2021Z'
}
},
{
"name": "icuAdmission",
"value": "Yes",
"dateRange": {
"start": '01/28/2021Z',
"end": None
}
},
{
"name": "outcome",
"value": "Recovered",
"dateRange": {
"start": None,
"end": None
}
}
],
"symptoms": {
"status": "Symptomatic",
"values": [
"dyspnea", "fever", "cough", "hypoxemia"
]
},
"demographics": {
"gender": "Male",
"ageRange": {
"start": 18.0,
"end": 18.0
},
"ethnicity": "Mixed"
},
"preexistingConditions": {
"hasPreexistingConditions": True,
"values": [
"nervous system disease"
]
},
"travelHistory": None
},
{
"caseReference": {"sourceId": _SOURCE_ID, "sourceUrl": _SOURCE_URL},
"location": {
"country": "Brazil",
"administrativeAreaLevel1": "Amazonas",
"administrativeAreaLevel2": "Manacapuru",
"geoResolution": "Admin2",
"name": "Manacapuru, Amazonas, Brazil",
"geometry": {
"longitude": -60.9587578,
"latitude": -3.291538169
}
},
"events": [
{
"name": "confirmed",
"dateRange": {
"start": '03/10/2021Z',
"end": '03/10/2021Z'
},
"value": "Serological test"
},
{
"name": "onsetSymptoms",
"dateRange": {
"start": '02/27/2021Z',
"end": '02/27/2021Z'
}
},
{
"name": "hospitalAdmission",
"value": "Yes",
"dateRange": {
"start": '03/09/2021Z',
"end": '03/09/2021Z'
}
},
{
"name": "icuAdmission",
"value": "No",
},
{
"name": "outcome",
"value": "Death",
"dateRange": {
"start": '03/17/2021Z',
"end": '03/17/2021Z'
}
}
],
"symptoms": {
"status": "Symptomatic",
"values": [
"dyspnea", "fever", "cough"
]
},
"demographics": {
"gender": "Male",
"ageRange": {
"start": 46.0,
"end": 46.0
},
"ethnicity": "White"
},
"preexistingConditions": None,
"travelHistory": {
"traveledPrior30Days": True,
"travel": [
{
"location": {
"country": "Antarctica",
"geoResolution": "Country",
"name": "Antarctica",
"geometry": {
"latitude": -75.250973,
"longitude": -0.071389
}
},
}
],
"dateRange": {
"start": None,
"end": None
}
},
"restrictedNotes": "Patient died from other causes"
},
{
"caseReference": {"sourceId": _SOURCE_ID, "sourceUrl": _SOURCE_URL},
"location": {
"country": "Brazil",
"administrativeAreaLevel1": "Minas Gerais",
"administrativeAreaLevel2": "Montes Claros",
"geoResolution": "Admin2",
"name": "<NAME>, Minas Gerais, Brazil",
"geometry": {
"latitude": -16.62071806,
"longitude": -43.92881683
}
},
"events": [
{
"name": "confirmed",
"dateRange": {
"start": '04/07/2021Z',
"end": '04/07/2021Z'
},
"value": "PCR test"
},
{
"name": "onsetSymptoms",
"dateRange": {
"start": '04/01/2021Z',
"end": '04/01/2021Z'
}
},
{
"name": "hospitalAdmission",
"value": "Yes",
"dateRange": {
"start": '04/07/2021Z',
"end": '04/07/2021Z'
}
},
{
"name": "icuAdmission",
"value": "No",
},
{
"name": "outcome",
"value": "Death",
"dateRange": {
"start": '04/13/2021Z',
"end": '04/13/2021Z'
}
}
],
"symptoms": {
"status": "Symptomatic",
"values": [
"dyspnea", "cough", "hypoxemia"
]
},
"demographics": {
"gender": "Female",
"ageRange": {
"start": 74.0,
"end": 74.0
},
"ethnicity": "White"
},
"preexistingConditions": {
"hasPreexistingConditions": True,
"values": [
'other comorbidity listed as: HAS'
]
},
"travelHistory": None
},
])
|
11463957
|
from .kb import KB, Boolean, Integer
# Initialise all variables that you need for you strategies and game knowledge.
# Add those variables here.. The following list is complete for the Play Jack strategy.
J0 = Boolean('j0')
J1 = Boolean('j1')
J2 = Boolean('j2')
J3 = Boolean('j3')
J4 = Boolean('j4')
J5 = Boolean('j5')
J6 = Boolean('j6')
J7 = Boolean('j7')
J8 = Boolean('j8')
J9 = Boolean('j9')
J10 = Boolean('j10')
J11 = Boolean('j11')
J12 = Boolean('j12')
J13 = Boolean('j13')
J14 = Boolean('j14')
J15 = Boolean('j15')
J16 = Boolean('j16')
J17 = Boolean('j17')
J18 = Boolean('j18')
J19 = Boolean('j19')
PJ0 = Boolean('pj0')
PJ1 = Boolean('pj1')
PJ2 = Boolean('pj2')
PJ3 = Boolean('pj3')
PJ4 = Boolean('pj4')
PJ5 = Boolean('pj5')
PJ6 = Boolean('pj6')
PJ7 = Boolean('pj7')
PJ8 = Boolean('pj8')
PJ9 = Boolean('pj9')
PJ10 = Boolean('pj10')
PJ11 = Boolean('pj11')
PJ12 = Boolean('pj12')
PJ13 = Boolean('pj13')
PJ14 = Boolean('pj14')
PJ15 = Boolean('pj15')
PJ16 = Boolean('pj16')
PJ17 = Boolean('pj17')
PJ18 = Boolean('pj18')
PJ19 = Boolean('pj19')
def general_information(kb):
# GENERAL INFORMATION ABOUT THE CARDS
# This adds information which cards are Jacks
kb.add_clause(J4)
kb.add_clause(J9)
kb.add_clause(J14)
kb.add_clause(J19)
# Add here whatever is needed for your strategy.
def strategy_knowledge(kb):
# DEFINITION OF THE STRATEGY
# Add clauses (This list is sufficient for this strategy)
# PJ is the strategy to play jacks first, so all we need to model is all x PJ(x) <-> J(x),
# In other words that the PJ strategy should play a card when it is a jack
kb.add_clause(~J4, PJ4)
kb.add_clause(~J9, PJ9)
kb.add_clause(~J14, PJ14)
kb.add_clause(~J19, PJ19)
kb.add_clause(~PJ4, J4)
kb.add_clause(~PJ9, J9)
kb.add_clause(~PJ14, J14)
kb.add_clause(~PJ19, J19)
|
11464028
|
from pydantic import BaseModel
class A(BaseModel):
cde: str
xyz: str
class B(A):
cde: int
xyz: str
A(cde='abc', xyz='123')
B(cde='abc', xyz='123')
|
11464031
|
import os
import json
import pickle
import zipfile
from tatk.util.camrest.state import default_state
from tatk.util.dataloader.module_dataloader import ActPolicyDataloader
from tatk.policy.vector.vector_camrest import CamrestVector
class ActPolicyDataLoaderCamrest(ActPolicyDataloader):
def __init__(self):
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
voc_file = os.path.join(root_dir, 'data/camrest/sys_da_voc.txt')
voc_opp_file = os.path.join(root_dir, 'data/camrest/usr_da_voc.txt')
self.vector = CamrestVector(voc_file, voc_opp_file)
processed_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'processed_data')
if os.path.exists(processed_dir):
print('Load processed data file')
self._load_data(processed_dir)
else:
print('Start preprocessing the dataset')
self._build_data(root_dir, processed_dir)
def _build_data(self, root_dir, processed_dir): # TODO
raw_data = {}
for part in ['train', 'val', 'test']:
archive = zipfile.ZipFile(os.path.join(root_dir, 'data/camrest/{}.json.zip'.format(part)), 'r')
with archive.open('{}.json'.format(part), 'r') as f:
raw_data[part] = json.load(f)
self.data = {}
for part in ['train', 'val', 'test']:
self.data[part] = []
for key in raw_data[part]:
sess = key['dial']
state = default_state()
action = {}
for i, turn in enumerate(sess):
state['user_action'] = turn['usr']['dialog_act']
if i + 1 == len(sess):
state['terminated'] = True
for da in turn['usr']['slu']:
if da['slots'][0][0] != 'slot':
state['belief_state'][da['slots'][0][0]] = da['slots'][0][1]
action = turn['sys']['dialog_act']
self.data[part].append([self.vector.state_vectorize(state),
self.vector.action_vectorize(action)])
state['system_action'] = turn['sys']['dialog_act']
os.makedirs(processed_dir)
for part in ['train', 'val', 'test']:
with open(os.path.join(processed_dir, '{}.pkl'.format(part)), 'wb') as f:
pickle.dump(self.data[part], f)
|
11464050
|
from click.testing import CliRunner
from indexpy.cli import index_cli
def test_custom_command():
@index_cli.command(name="only-print")
def only_print():
print("Custom command")
cli = CliRunner()
assert cli.invoke(index_cli, ["only-print"]).output == "Custom command\n"
|
11464053
|
import copy
import re
def normalize(s):
return re.sub('\s+', ' ', s.strip())
DEMO_SYNONYMS = [
('rate', 'ratings'),
('rating', 'ratings'),
('approval', 'popularity'),
]
def console(synonym_queries):
synonym_list = copy.copy(DEMO_SYNONYMS)
print('Using default synonyms:')
for s1, s2 in synonym_list:
print(' {} <-> {}'.format(s1, s2))
print()
try:
print('Input additional synonym pairs, e.g. \'rate ratings\'')
while True:
raw_synonyms = normalize(input('(Ctrl-D to finish synonyms): '))
synonyms = raw_synonyms.split()
if len(synonyms) != 2:
print('Two words, please')
continue
w1, w2 = synonyms
print('Added synonyms {} <-> {}'.format(w1, w2))
synonym_list.append( (w1, w2) )
except EOFError:
pass
try:
print('Input queries:')
while True:
q1 = normalize(input('Query 1: ').strip())
q2 = normalize(input('Query 2: ').strip())
synonymous = synonym_queries(synonym_list, [(q1, q2)])[0]
print('\'{}\' and \'{}\' {} synonymous'.format(q1, q2,
'ARE' if synonymous else 'ARE NOT'))
except EOFError:
pass
print()
|
11464099
|
from scipy.optimize import curve_fit
import numpy as np
def curve_func(x, a, b, c):
return a *(1-np.exp( -1/c * x**b ))
def fit(func, x, y):
popt, pcov = curve_fit(func, x, y, p0 =(1,1,1), method= 'trf', sigma = np.geomspace(1,.1,len(y)), absolute_sigma=True, bounds= ([0,0,0],[1,1,np.inf]) )
return tuple(popt)
def derivation(x, a, b, c):
x = x + 1e-6 # numerical robustness
return a * b * 1/c * np.exp(-1/c * x**b) * (x**(b-1))
def label_update_epoch(ydata_fit, n_epoch = 16, threshold = 0.9, eval_interval = 100, num_iter_per_epoch= 10581/10):
xdata_fit = np.linspace(0, len(ydata_fit)*eval_interval/num_iter_per_epoch, len(ydata_fit))
a, b, c = fit(curve_func, xdata_fit, ydata_fit)
epoch = np.arange(1, n_epoch)
y_hat = curve_func(epoch, a, b, c)
relative_change = abs(abs(derivation(epoch, a, b, c)) - abs(derivation(1, a, b, c)))/ abs(derivation(1, a, b, c))
relative_change[relative_change > 1] = 0
update_epoch = np.sum(relative_change <= threshold) + 1
return update_epoch#, a, b, c
def if_update(iou_value, current_epoch, n_epoch = 16, threshold = 0.90, eval_interval=1, num_iter_per_epoch=1):
# check iou_value
start_iter = 0
print("len(iou_value)=",len(iou_value))
for k in range(len(iou_value)-1):
if iou_value[k+1]-iou_value[k] < 0.1:
start_iter = max(start_iter, k + 1)
else:
break
shifted_epoch = start_iter*eval_interval/num_iter_per_epoch
#cut out the first few entries
iou_value = iou_value[start_iter: ]
update_epoch = label_update_epoch(iou_value, n_epoch = n_epoch, threshold=threshold, eval_interval=eval_interval, num_iter_per_epoch=num_iter_per_epoch)
# Shift back
update_epoch = shifted_epoch + update_epoch
return current_epoch >= update_epoch#, update_epoch
def merge_labels_with_skip(original_labels, model_predictions, need_label_correction_dict, conf_threshold=0.8, logic_255=False,class_constraint=True, conf_threshold_bg = 0.95):
new_label_dict = {}
update_list = []
for c in need_label_correction_dict:
if need_label_correction_dict[c]:
update_list.append(c)
for pid in model_predictions:
pred_prob = model_predictions[pid]
pred = np.argmax(pred_prob, axis=0)
label = original_labels[pid]
# print(np.unique(label))
# print(update_list)
# does not belong to the class that need to be updated, then we do not need the following updating process
if set(np.unique(label)).isdisjoint(set(update_list)):
new_label_dict[pid] = label
continue
# if the prediction is confident
# confident = np.max(pred_prob, axis=0) > conf_threshold
# if the prediction is confident
# code support different threshold for foreground and background,
# during the experiment, we always set them to be the same for simplicity
confident = (np.max(pred_prob[1:], axis=0) > conf_threshold) |(pred_prob[0] > conf_threshold_bg)
# before update: only class that need correction will be replaced
belong_to_correction_class = label==0
for c in need_label_correction_dict:
if need_label_correction_dict[c]:
belong_to_correction_class |= (label==c)
# after update: only pixels that will be flipped to the allowed classes will be updated
after_belong = pred==0
for c in need_label_correction_dict:
if need_label_correction_dict[c]:
after_belong |= (pred==c)
# combine all three masks together
replace_flag = confident & belong_to_correction_class & after_belong
# the class constraint
if class_constraint:
unique_class = np.unique(label)
# print(unique_class)
# indx = torch.zeros((h, w), dtype=torch.long)
class_constraint_indx = (pred==0)
for element in unique_class:
class_constraint_indx = class_constraint_indx | (pred == element)
replace_flag = replace_flag & (class_constraint_indx != 0)
# replace with the new label
next_label = np.where(replace_flag, pred, label).astype("int32")
# logic 255:
# - rule# 1: if label[i,j] != 0, and pred[i,j] = 0, then next_label[i,j] = 255
# - rule# 2: if label[i,j] = 255 and pred[i,j] != 0 and confident, then next_label[i,j] = pred[i,j]
# rule 2 is already enforced above, don't need additional code
if logic_255:
rule_1_flag = (label != 0) & (pred == 0)
next_label = np.where(rule_1_flag, np.ones(next_label.shape)*255, next_label).astype("int32")
new_label_dict[pid] = next_label
return new_label_dict
|
11464108
|
import collections
import chainer
import numpy as np
import onnx
from typing import List, Mapping
from chainer_compiler.ch2o import env
from chainer_compiler.ch2o import utils
def _is_float_value(v):
# The latter is for numpy-like things.
return isinstance(v, float) or int(v) != v
class Value(object):
"""An object which holds either an ONNX value or a Python object."""
def __init__(self, value):
if isinstance(value, Value):
self.const_value = value.const_value
value = value.value
else:
self.const_value = None
self.value = value
self.is_py = not isinstance(self.value, onnx.ValueInfoProto)
if not self.is_py:
assert self.is_tensor() or self.is_sequence()
assert not (self.is_tensor() and self.is_sequence())
def __repr__(self):
if self.is_py:
return 'Value(%s)' % str(self.value)
else:
return 'Value(%s)' % self.value.name
def get_attribute(self, key: str, env: 'utils.Env') -> 'Value':
if not self.is_py:
raise TypeError('Unsupported attribute %s for an ONNX value' % key)
value = Value(getattr(self.value, key))
if (value.is_py and
(value.value is None or
not isinstance(value.value, type) and
# TODO(hamaji): We probably need to create a ValueInfo
# for Variable.
not isinstance(value.value, chainer.Variable) and
np.array(value.value).dtype != np.object)):
value.to_value_info(env.root())
setattr(self.value, key, value)
if not value.is_py:
env.read_attrs.append((self, key, value))
return value
def is_none(self) -> bool:
return self.is_py and self.value is None
def is_tensor(self) -> bool:
return not self.is_py and self.value.type.HasField('tensor_type')
def is_sequence(self) -> bool:
return not self.is_py and self.value.type.HasField('sequence_type')
def copy(self, env: 'utils.Env', name=None) -> 'Value':
self.to_value_info(env)
vi = self.value
nvi = onnx.ValueInfoProto()
if self.is_tensor():
nvi.name = utils.gen_id(name, 'T')
else:
assert self.is_sequence(), self
nvi.name = utils.gen_id(name, 'S')
nvi.type.CopyFrom(vi.type)
return Value(nvi)
def identity(self, env: 'utils.Env', name=None) -> 'Value':
nv = self.copy(env, name=name)
env.addnode('Identity',
inputs=[self.value.name], outputs=[nv.value.name])
return nv
def to_value_info(self, env: 'utils.Env') -> onnx.ValueInfoProto:
if self.is_py:
if isinstance(self.value, collections.Iterable):
return self.to_sequence(env)
else:
return self.to_tensor(env)
return self.value
def to_tensor(self, env: 'utils.Env',
dtype: type = None) -> onnx.ValueInfoProto:
if self.is_py:
self.const_value = Value(self.value)
# TODO(hamaji): Rewrite `totensor` to convert a Python
# list to a tensor.
self.value = utils.totensor(self.value, env, dtype=dtype)
self.is_py = False
else:
if self.is_sequence():
self.value = env.calc('ConcatFromSequence',
inputs=[self.value.name],
axis=0,
new_axis=True)
self.is_py = False
if dtype is not None:
dt = utils.onnx_dtype(dtype)
self.value = env.calc(
'Cast',
inputs=[self.value.name],
to=dt
)
self.value.type.tensor_type.elem_type = dt
assert self.is_tensor()
return self.value
def to_sequence(self, env: 'utils.Env') -> onnx.ValueInfoProto:
if self.is_py:
self.const_value = Value(self.value)
if not isinstance(self.value, collections.Iterable):
raise TypeError('Expected a sequence: %s' % self.value)
res = env.calc_seq(
"SequenceConstruct",
inputs=[],
)
for v in self.value:
v = Value(v).to_tensor(env)
res = env.calc_seq(
"SequenceInsert",
inputs=[res.name, v.name],
)
self.value = res
self.is_py = False
elif self.is_tensor():
self.value = env.calc_seq(
'SplitToSequence',
inputs=[self.value.name],
keepdims=False
)
assert self.is_sequence()
return self.value
def _const(self) -> 'Value':
if not self.is_py and self.const_value is not None:
return self.const_value
return self
@property
def has_py_value(self):
return self.is_py or self.const_value is not None
def to_py_value(self):
if self.is_py:
return self
if self.const_value is not None:
return self.const_value
assert False, self
def to_float(self) -> float:
value = self._const()
if not value.is_py:
raise TypeError('Expected a float scalar: %s' % value.value)
return float(value.value)
def to_int(self) -> int:
value = self._const()
if not value.is_py or _is_float_value(value.value):
print(value.const_value)
raise TypeError('Expected an int scalar: %s' % value.value)
return int(value.value)
def to_bool(self) -> bool:
value = self._const()
if not value.is_py or not isinstance(value.value, bool):
raise TypeError('Expected a bool scalar: %s' % value.value)
return bool(value.value)
def to_int_list(self) -> List[int]:
value = self._const()
if not value.is_py or not isinstance(value.value, collections.Iterable):
raise TypeError('Expected an int list: %s' % value.value)
ints = list(Value(v).value for v in value.value)
if ints and _is_float_value(ints[0]):
raise TypeError('Expected an int list: %s' % value.value)
return ints
|
11464113
|
import sys
import os
import numpy as np
from scipy.interpolate import interp1d
# Get the inputs from the terminal line
L = float(sys.argv[1])
theta = float(sys.argv[2])
folderNum = int(sys.argv[3])
# Delete the previous blockMeshDict
os.system("rm ./baseCase/system/blockMeshDict")
# Delete the individual folder if it exists
if os.path.isdir('./ind%i' %folderNum):
os.system('rm -r ind%i' %folderNum)
# Interpolate the inside region of the diffuser
triX = np.array([2+L*np.cos(np.deg2rad(theta)),
2.5+L*np.cos(np.deg2rad(theta)),
3+L*np.cos(np.deg2rad(theta))])
triY = np.array([L*np.sin(np.deg2rad(theta)),
(3*L*np.sin(np.deg2rad(theta))+0.2)/5,
0.1])
f2 = interp1d(triX, triY, kind='quadratic')
x = np.linspace(2+L*np.cos(np.deg2rad(theta)),3+L*np.cos(np.deg2rad(theta)),100)
y = f2(x)
# Header
bMD1 = """/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 5 |
| \\ / A nd | Web: www.OpenFOAM.org |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
object blockMeshDict;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //"""
# Body
bMD2 = """
z1 0.0;
z2 0.1;
Nup 80;
Nuu 40;
Ncowl 20;
Naxis 150;
Ninf 200;
Nramp 200;
Ndown 100;
convertToMeters 1;
vertices
(
(1.0 0.0 $z1) // Point 0
(2.0 0.0 $z1) // Point 1
($DLcO $LsO $z1) // Point 2
($TLcO 0.1 $z1) // Point 3
(1.0 0.8 $z1) // Point 4
(2.0 0.8 $z1) // Point 5
($DLcO 0.8 $z1) // Point 6
($TLcO 0.8 $z1) // Point 7
(1.0 0.85 $z1) // Point 8
(2.0 0.85 $z1) // Point 9
($ULcO 0.85 $z1) // Point 10
($TLcO 0.85 $z1) // Point 11
(1.0 1.85 $z1) // Point 12
(2.0 1.85 $z1) // Point 13
($ULcO 1.85 $z1) // Point 14
($TLcO 1.85 $z1) // Point 15
(1.0 0.0 $z2) // Point 16
(2.0 0.0 $z2) // Point 17
($DLcO $LsO $z2) // Point 18
($TLcO 0.1 $z2) // Point 19
(1.0 0.8 $z2) // Point 20
(2.0 0.8 $z2) // Point 21
($DLcO 0.8 $z2) // Point 22
($TLcO 0.8 $z2) // Point 23
(1.0 0.85 $z2) // Point 24
(2.0 0.85 $z2) // Point 25
($ULcO 0.85 $z2) // Point 26
($TLcO 0.85 $z2) // Point 27
(1.0 1.85 $z2) // Point 28
(2.0 1.85 $z2) // Point 29
($ULcO 1.85 $z2) // Point 30
($TLcO 1.85 $z2) // Point 31
);
blocks
(
//block 0
hex (0 1 5 4 16 17 21 20) ($Ninf $Naxis 1) simpleGrading
(
0.1
(
(0.10 0.15 1)
(0.40 0.35 3.2)
(0.40 0.35 0.3125)
(0.10 0.15 1)
)
1
)
//block 1
hex (1 2 6 5 17 18 22 21) ($Nramp $Naxis 1) simpleGrading
(
1
(
(0.10 0.15 1)
(0.40 0.35 3.2)
(0.40 0.35 0.3125)
(0.10 0.15 1)
)
1
)
//block 2
hex (2 3 7 6 18 19 23 22) ($Ndown $Naxis 1) simpleGrading
(
10
(
(0.10 0.15 1)
(0.40 0.35 3.2)
(0.40 0.35 0.3125)
(0.10 0.15 1)
)
1
)
//block 3
hex (4 5 9 8 20 21 25 24) ($Ninf $Ncowl 1) simpleGrading (0.1 1 1)
//block 4
hex (5 6 10 9 21 22 26 25) ($Nramp $Ncowl 1) simpleGrading (1 1 1)
//block 5
hex (8 9 13 12 24 25 29 28) ($Ninf $Nup 1) simpleGrading (0.1 15 1)
//block 6
hex (9 10 14 13 25 26 30 29) ($Nramp $Nup 1) simpleGrading (1 15 1)
//block 7
hex (10 11 15 14 26 27 31 30) ($Nuu $Nup 1) simpleGrading (10 15 1)
);
edges
("""
# End
bMD3 = """
);
boundary
(
inlet
{
type patch;
faces
(
(12 28 24 8)
(8 24 20 4)
(4 20 16 0)
);
}
outlet
{
type patch;
faces
(
(31 15 11 27)
);
}
compressor
{
type patch;
faces
(
(23 7 3 19)
);
}
upper
{
type patch;
faces
(
(28 12 13 29)
(29 13 14 30)
(30 14 15 31)
);
}
lower
{
type patch;
faces
(
(0 16 17 1)
);
}
cowl
{
type patch;
faces
(
(10 26 27 11)
(26 10 6 22)
(22 6 7 23)
);
}
axis
{
type patch;
faces
(
(18 2 1 17)
(19 3 2 18)
);
}
);
// ************************************************************************* //
"""
# Writing the data in the file
with open('./baseCase/system/blockMeshDict', "a") as bMD:
bMD.write(bMD1)
bMD.write('\nLsO %.8f;\nULcO %.8f;\nDLcO %.8f;\nTLcO %.8f;\n'
%(L*np.sin(np.deg2rad(theta)),1.95+L*np.cos(np.deg2rad(theta)),
2+L*np.cos(np.deg2rad(theta)),3+L*np.cos(np.deg2rad(theta))))
bMD.write(bMD2)
bMD.write(' spline 2 3 ( \n')
for i in range(len(x)):
bMD.write(' (%.8f %.8f 0.0) \n' %(x[i], y[i]))
bMD.write(' ) \n')
bMD.write(' spline 18 19 ( \n')
for i in range(len(x)):
bMD.write(' (%.8f %.8f 0.1) \n' %(x[i], y[i]))
bMD.write(' ) \n')
bMD.write(bMD3)
# Copy the base case folder with
os.system("cp -r baseCase/ ind%i/" %folderNum)
# blockMesh and paraFoam calling
os.system("blockMesh -case ind%i > bmOut%i 2>&1 && paraFoam -case ind%i" %(folderNum,folderNum,folderNum))
|
11464167
|
import os
import shutil
import unittest
from mock import patch
import activity.activity_GenerateSWHReadme as activity_module
from activity.activity_GenerateSWHReadme import (
activity_GenerateSWHReadme as activity_object,
)
from provider.article import article
import tests.activity.settings_mock as settings_mock
from tests.activity.classes_mock import (
FakeLogger,
FakeStorageContext,
FakeSession,
)
import tests.activity.test_activity_data as testdata
import tests.activity.helpers as helpers
def fake_download_xml(filename, to_dir):
source_doc = os.path.join("tests", "files_source", "software_heritage", filename)
dest_doc = os.path.join(to_dir, filename)
try:
shutil.copy(source_doc, dest_doc)
return filename
except IOError:
pass
# default return assume a failure
return False
class TestGenerateSWHReadme(unittest.TestCase):
def setUp(self):
fake_logger = FakeLogger()
self.activity = activity_object(settings_mock, fake_logger, None, None, None)
def tearDown(self):
helpers.delete_files_in_folder("tests/tmp", filter_out=[".keepme"])
helpers.delete_files_in_folder(
testdata.ExpandArticle_files_dest_folder, filter_out=[".gitkeep"]
)
@patch.object(article, "download_article_xml_from_s3")
@patch.object(activity_module, "get_session")
@patch.object(activity_module, "storage_context")
def test_do_activity(
self, mock_storage_context, mock_session, fake_download_article_xml
):
article_xml_file = "elife-30274-v2.xml"
mock_storage_context.return_value = FakeStorageContext(
testdata.ExpandArticle_files_dest_folder
)
mock_session.return_value = FakeSession(
testdata.SoftwareHeritageDeposit_session_example
)
fake_download_article_xml.return_value = fake_download_xml(
article_xml_file, self.activity.get_tmp_dir()
)
return_value = self.activity.do_activity(
testdata.SoftwareHeritageDeposit_data_example
)
self.assertEqual(return_value, self.activity.ACTIVITY_SUCCESS)
# look at the file contents
files = os.listdir(testdata.ExpandArticle_files_dest_folder)
readme_files = [
file_name
for file_name in files
if file_name != ".gitkeep" and file_name.startswith("README")
]
readme_file = readme_files[0]
with open(
os.path.join(testdata.ExpandArticle_files_dest_folder, readme_file), "rb"
) as open_file:
readme_string = open_file.read()
self.assertTrue(
b'# Executable Research Article for "Replication Study: Transcriptional '
in readme_string
)
@patch.object(article, "download_article_xml_from_s3")
@patch.object(activity_module, "get_session")
@patch.object(activity_module, "storage_context")
def test_do_activity_article_xml_exception(
self, mock_storage_context, mock_session, fake_download_article_xml
):
mock_storage_context.return_value = FakeStorageContext()
mock_session.return_value = FakeSession(
testdata.SoftwareHeritageDeposit_session_example
)
fake_download_article_xml.side_effect = Exception(
"Exception in downloading article XML"
)
return_value = self.activity.do_activity(
testdata.SoftwareHeritageDeposit_data_example
)
self.assertEqual(return_value, self.activity.ACTIVITY_PERMANENT_FAILURE)
@patch("elifearticle.parse.build_article_from_xml")
@patch.object(article, "download_article_xml_from_s3")
@patch.object(activity_module, "get_session")
@patch.object(activity_module, "storage_context")
def test_do_activity_article_parse_exception(
self, mock_storage_context, mock_session, fake_download_article_xml, fake_parse
):
article_xml_file = "elife-30274-v2.xml"
mock_storage_context.return_value = FakeStorageContext()
mock_session.return_value = FakeSession(
testdata.SoftwareHeritageDeposit_session_example
)
fake_download_article_xml.return_value = fake_download_xml(
article_xml_file, self.activity.get_tmp_dir()
)
fake_parse.side_effect = Exception("Exception parsing article XML")
return_value = self.activity.do_activity(
testdata.SoftwareHeritageDeposit_data_example
)
self.assertEqual(return_value, self.activity.ACTIVITY_PERMANENT_FAILURE)
@patch("provider.software_heritage.readme")
@patch.object(article, "download_article_xml_from_s3")
@patch.object(activity_module, "get_session")
@patch.object(activity_module, "storage_context")
def test_do_activity_readme_exception(
self,
mock_storage_context,
mock_session,
fake_download_article_xml,
fake_readme,
):
article_xml_file = "elife-30274-v2.xml"
mock_storage_context.return_value = FakeStorageContext()
mock_session.return_value = FakeSession(
testdata.SoftwareHeritageDeposit_session_example
)
fake_download_article_xml.return_value = fake_download_xml(
article_xml_file, self.activity.get_tmp_dir()
)
fake_readme.side_effect = Exception("Exception generating readme")
return_value = self.activity.do_activity(
testdata.SoftwareHeritageDeposit_data_example
)
self.assertEqual(return_value, self.activity.ACTIVITY_PERMANENT_FAILURE)
@patch.object(FakeStorageContext, "set_resource_from_string")
@patch.object(article, "download_article_xml_from_s3")
@patch.object(activity_module, "get_session")
@patch.object(activity_module, "storage_context")
def test_do_activity_bucket_exception(
self,
mock_storage_context,
mock_session,
fake_download_article_xml,
fake_set_resource,
):
article_xml_file = "elife-30274-v2.xml"
mock_storage_context.return_value = FakeStorageContext()
mock_session.return_value = FakeSession(
testdata.SoftwareHeritageDeposit_session_example
)
fake_download_article_xml.return_value = fake_download_xml(
article_xml_file, self.activity.get_tmp_dir()
)
fake_set_resource.side_effect = Exception("Exception uploading readme")
return_value = self.activity.do_activity(
testdata.SoftwareHeritageDeposit_data_example
)
self.assertEqual(return_value, self.activity.ACTIVITY_PERMANENT_FAILURE)
|
11464168
|
class RedisStorage:
def __init__(self, guild_id, plugin_name, redis):
self.guild_id = guild_id
self.plugin_name = plugin_name
self.prefix = '{}.{}:'.format(plugin_name,
guild_id)
self.redis = redis
def set(self, key, value, ex=None):
key = self.prefix + key
return self.redis.set(key, value, ex)
def get(self, key):
key = self.prefix + key
return self.redis.get(key)
def smembers(self, key):
key = self.prefix + key
return self.redis.smembers(key)
def sadd(self, key, member, *members):
key = self.prefix + key
return self.redis.sadd(key, member, *members)
|
11464187
|
from rlzoo.common.policy_networks import *
from rlzoo.common.value_networks import *
from rlzoo.common.utils import set_seed
"""
full list of algorithm parameters (alg_params)
-----------------------------------------------
net_list: a list of networks (value and policy) used in the algorithm, from common functions or customization
optimizers_list: a list of optimizers for all networks and differentiable variables
epsilon: clip parameter (for method 'clip')
kl_target: controls bounds of policy update and adaptive lambda (for method 'penalty')
lam: KL-regularization coefficient (for method 'penalty')
-----------------------------------------------
full list of learning parameters (learn_params)
-----------------------------------------------
train_episodes: total number of episodes for training
test_episodes: total number of episodes for testing
max_steps: maximum number of steps for one episode
save_interval: time steps for saving
gamma: reward discount factor
mode: train or test
batch_size: update batch size
a_update_steps: actor update iteration steps
c_update_steps: critic update iteration steps
n_worker: number of workers
-----------------------------------------------
"""
def atari(env, default_seed=True):
if default_seed:
assert isinstance(env, list)
seed = np.arange(len(env)).tolist() # a list of seeds for each env
set_seed(seed, env) # reproducible
# for multi-threading
if isinstance(env, list): # judge if multiple envs are passed in for parallel computing
num_env = len(env) # number of envs passed in
env = env[0] # take one of the env as they are all the same
else:
num_env = 1
alg_params = dict(method='clip', # method can be clip or penalty
epsilon=0.2, # for method 'clip'
kl_target=0.01, # for method 'penalty'
lam=0.5 # for method 'penalty'
)
if alg_params.get('net_list') is None:
num_hidden_layer = 2 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
with tf.name_scope('DPPO'):
with tf.name_scope('V_Net'):
v_net = ValueNetwork(env.observation_space, [hidden_dim] * num_hidden_layer)
with tf.name_scope('Policy'):
policy_net = StochasticPolicyNetwork(env.observation_space, env.action_space,
[hidden_dim] * num_hidden_layer)
net_list = v_net, policy_net
alg_params['net_list'] = net_list
if alg_params.get('optimizers_list') is None:
actor_lr = 1e-4
critic_lr = 2e-4
optimizers_list = [tf.optimizers.Adam(critic_lr), tf.optimizers.Adam(actor_lr)]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(train_episodes=1000,
test_episodes=100,
max_steps=200,
save_interval=50,
gamma=0.9,
a_update_steps=10,
c_update_steps=10,
n_workers=num_env,
batch_size=32)
return alg_params, learn_params
def classic_control(env, default_seed=True):
if default_seed:
assert isinstance(env, list)
seed = np.arange(len(env)).tolist() # a list of seeds for each env
set_seed(seed, env) # reproducible
# for multi-threading
if isinstance(env, list): # judge if multiple envs are passed in for parallel computing
num_env = len(env) # number of envs passed in
env = env[0] # take one of the env as they are all the same
else:
num_env = 1
alg_params = dict(method='clip', # method can be clip or penalty
epsilon=0.2, # for method 'clip'
kl_target=0.01, # for method 'penalty'
lam=0.5 # for method 'penalty'
)
if alg_params.get('net_list') is None:
num_hidden_layer = 2 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
with tf.name_scope('DPPO'):
with tf.name_scope('V_Net'):
v_net = ValueNetwork(env.observation_space, [hidden_dim] * num_hidden_layer)
with tf.name_scope('Policy'):
policy_net = StochasticPolicyNetwork(env.observation_space, env.action_space,
[hidden_dim] * num_hidden_layer)
net_list = v_net, policy_net
alg_params['net_list'] = net_list
if alg_params.get('optimizers_list') is None:
actor_lr = 1e-4
critic_lr = 2e-4
optimizers_list = [tf.optimizers.Adam(critic_lr), tf.optimizers.Adam(actor_lr)]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(train_episodes=1000,
test_episodes=100,
max_steps=200,
save_interval=50,
gamma=0.9,
a_update_steps=10,
c_update_steps=10,
n_workers=num_env,
batch_size=32)
return alg_params, learn_params
def box2d(env, default_seed=True):
if default_seed:
assert isinstance(env, list)
seed = np.arange(len(env)).tolist() # a list of seeds for each env
set_seed(seed, env) # reproducible
# for multi-threading
if isinstance(env, list): # judge if multiple envs are passed in for parallel computing
num_env = len(env) # number of envs passed in
env = env[0] # take one of the env as they are all the same
else:
num_env = 1
alg_params = dict(method='clip', # method can be clip or penalty
epsilon=0.2, # for method 'clip'
kl_target=0.01, # for method 'penalty'
lam=0.5 # for method 'penalty'
)
if alg_params.get('net_list') is None:
num_hidden_layer = 2 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
with tf.name_scope('DPPO'):
with tf.name_scope('V_Net'):
v_net = ValueNetwork(env.observation_space, [hidden_dim] * num_hidden_layer)
with tf.name_scope('Policy'):
policy_net = StochasticPolicyNetwork(env.observation_space, env.action_space,
[hidden_dim] * num_hidden_layer)
net_list = v_net, policy_net
alg_params['net_list'] = net_list
if alg_params.get('optimizers_list') is None:
actor_lr = 1e-4
critic_lr = 2e-4
optimizers_list = [tf.optimizers.Adam(critic_lr), tf.optimizers.Adam(actor_lr)]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(train_episodes=1000,
test_episodes=100,
max_steps=200,
save_interval=50,
gamma=0.9,
a_update_steps=10,
c_update_steps=10,
n_workers=num_env,
batch_size=32)
return alg_params, learn_params
def mujoco(env, default_seed=True):
if default_seed:
assert isinstance(env, list)
seed = np.arange(len(env)).tolist() # a list of seeds for each env
set_seed(seed, env) # reproducible
# for multi-threading
if isinstance(env, list): # judge if multiple envs are passed in for parallel computing
num_env = len(env) # number of envs passed in
env = env[0] # take one of the env as they are all the same
else:
num_env = 1
alg_params = dict(method='clip', # method can be clip or penalty
epsilon=0.2, # for method 'clip'
kl_target=0.01, # for method 'penalty'
lam=0.5 # for method 'penalty'
)
if alg_params.get('net_list') is None:
num_hidden_layer = 2 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
with tf.name_scope('DPPO'):
with tf.name_scope('V_Net'):
v_net = ValueNetwork(env.observation_space, [hidden_dim] * num_hidden_layer)
with tf.name_scope('Policy'):
policy_net = StochasticPolicyNetwork(env.observation_space, env.action_space,
[hidden_dim] * num_hidden_layer)
net_list = v_net, policy_net
alg_params['net_list'] = net_list
if alg_params.get('optimizers_list') is None:
actor_lr = 1e-4
critic_lr = 2e-4
optimizers_list = [tf.optimizers.Adam(critic_lr), tf.optimizers.Adam(actor_lr)]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(train_episodes=1000,
test_episodes=100,
max_steps=200,
save_interval=50,
gamma=0.9,
a_update_steps=10,
c_update_steps=10,
n_workers=num_env,
batch_size=32)
return alg_params, learn_params
def robotics(env, default_seed=True):
if default_seed:
assert isinstance(env, list)
seed = np.arange(len(env)).tolist() # a list of seeds for each env
set_seed(seed, env) # reproducible
# for multi-threading
if isinstance(env, list): # judge if multiple envs are passed in for parallel computing
num_env = len(env) # number of envs passed in
env = env[0] # take one of the env as they are all the same
else:
num_env = 1
alg_params = dict(method='clip', # method can be clip or penalty
epsilon=0.2, # for method 'clip'
kl_target=0.01, # for method 'penalty'
lam=0.5 # for method 'penalty'
)
if alg_params.get('net_list') is None:
num_hidden_layer = 2 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
with tf.name_scope('DPPO'):
with tf.name_scope('V_Net'):
v_net = ValueNetwork(env.observation_space, [hidden_dim] * num_hidden_layer)
with tf.name_scope('Policy'):
policy_net = StochasticPolicyNetwork(env.observation_space, env.action_space,
[hidden_dim] * num_hidden_layer)
net_list = v_net, policy_net
alg_params['net_list'] = net_list
if alg_params.get('optimizers_list') is None:
actor_lr = 1e-4
critic_lr = 2e-4
optimizers_list = [tf.optimizers.Adam(critic_lr), tf.optimizers.Adam(actor_lr)]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(train_episodes=1000,
test_episodes=100,
max_steps=200,
save_interval=50,
gamma=0.9,
a_update_steps=10,
c_update_steps=10,
n_workers=num_env,
batch_size=32)
return alg_params, learn_params
def dm_control(env, default_seed=True):
if default_seed:
assert isinstance(env, list)
seed = np.arange(len(env)).tolist() # a list of seeds for each env
set_seed(seed, env) # reproducible
# for multi-threading
if isinstance(env, list): # judge if multiple envs are passed in for parallel computing
num_env = len(env) # number of envs passed in
env = env[0] # take one of the env as they are all the same
else:
num_env = 1
alg_params = dict(method='clip', # method can be clip or penalty
epsilon=0.2, # for method 'clip'
kl_target=0.01, # for method 'penalty'
lam=0.5 # for method 'penalty'
)
if alg_params.get('net_list') is None:
num_hidden_layer = 2 # number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
with tf.name_scope('DPPO'):
with tf.name_scope('V_Net'):
v_net = ValueNetwork(env.observation_space, [hidden_dim] * num_hidden_layer)
with tf.name_scope('Policy'):
policy_net = StochasticPolicyNetwork(env.observation_space, env.action_space,
[hidden_dim] * num_hidden_layer)
net_list = v_net, policy_net
alg_params['net_list'] = net_list
if alg_params.get('optimizers_list') is None:
actor_lr = 1e-4
critic_lr = 2e-4
optimizers_list = [tf.optimizers.Adam(critic_lr), tf.optimizers.Adam(actor_lr)]
alg_params['optimizers_list'] = optimizers_list
learn_params = dict(train_episodes=1000,
test_episodes=100,
max_steps=200,
save_interval=50,
gamma=0.9,
a_update_steps=10,
c_update_steps=10,
n_workers=num_env,
batch_size=32)
return alg_params, learn_params
|
11464209
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from src.genotype.cdn.nodes.module_node import ModuleNode
from typing import List, Tuple
from runs import runs_manager
from configuration import config
from src.genotype.neat.connection import Connection
from src.genotype.neat.genome import Genome
from src.genotype.neat.node import Node
from src.phenotype.neural_network.layers.layer import Layer
from src.analysis.visualisation.genome_visualiser import get_graph_of
class ModuleGenome(Genome):
def __int__(self, nodes: List[Node], connections: List[Connection]):
super().__init__(nodes, connections)
def to_phenotype(self, **kwargs) -> Tuple[Layer, Layer]:
return super().to_phenotype(**kwargs)
def visualize(self):
get_graph_of(self).render(directory=runs_manager.get_graphs_folder_path(config.run_name),
view=config.view_graph_plots)
def get_size_estimate(self):
node: ModuleNode
size = 0
for node_id in self.get_fully_connected_node_ids():
node = self.nodes[node_id]
out_features = node.layer_type.get_subvalue('out_features')
if node.is_conv():
window_size = node.layer_type.get_subvalue('conv_window_size')
size += window_size**2 + out_features
if node.is_linear():
size += out_features **2
return size
|
11464246
|
from ctypes import *
cliodb = cdll.LoadLibrary("cliodb-ffi/target/debug/libcliodbffi.so")
cliodb.connect.argtypes = [c_char_p, c_void_p]
cliodb.connect.restype = c_int
cliodb.transact.argtypes = [c_void_p, c_char_p]
cliodb.transact.restype = c_int
# ValueTag enum
(VAL_ENTITY, VAL_IDENT, VAL_STRING, VAL_TIMESTAMP) = (0, 1, 2, 3)
class CValue(Structure):
_fields_ = [
("tag", c_int64),
("string_val", c_char_p),
("int_val", c_int64)
]
def value(self):
if self.tag == VAL_ENTITY:
return self.int_val
elif self.tag == VAL_IDENT:
# TODO: return an interned ident type
return self.string_val.decode()
elif self.tag == VAL_STRING:
return self.string_val.decode()
elif self.tag == VAL_TIMESTAMP:
# TODO: return a real timestamp
return self.string_val.decode()
else:
pass
# raise Exception("Unsupported tag: {}".format(self.tag))
ROW_CALLBACK = CFUNCTYPE(None, c_int32, POINTER(CValue))
def print_row(num_cols, c_val_p):
row = []
for i in range(num_cols):
row.append(c_val_p[i].value())
print(row)
cliodb.query.argtypes = [c_void_p, c_char_p, ROW_CALLBACK]
class Db(object):
def __init__(self, db_ptr):
self.db_ptr = db_ptr
def __del__(self):
cliodb.drop_db(self.db_ptr)
class ClioDB(object):
def __init__(self, store_uri, tx_uri):
"""Takes a ClioDB URL and returns a connection."""
if not store_uri:
raise Exception("store_uri must be provided")
if not tx_uri:
raise Exception("tx_uri must be provided")
self.conn_ptr = c_void_p()
cliodb.connect(store_uri.encode('utf-8'), tx_uri.encode('utf-8'), byref(self.conn_ptr))
def db(self):
db_ptr = c_void_p()
err = cliodb.get_db(self.conn_ptr, byref(db_ptr))
if err < 0:
# TODO: Set an error string
raise Exception("Error opening db")
return Db(db_ptr)
def transact(self, tx_string):
tx_bytes = tx_string.encode('utf-8')
ret = cliodb.transact(self.conn_ptr, tx_bytes)
if ret < 0:
# TODO: Set an error string
print("return value {}".format(ret))
raise Exception("Error executing transaction")
def close(self):
cliodb.close(self.conn_ptr)
class Query(object):
# TODO: queries should be parameterizable
def __init__(self, query_string):
self.query_string = query_string.encode('utf-8')
def run(self, db):
self.results = []
def row_cb(num_cols, row_ptr):
row = []
for i in range(num_cols):
row.append(row_ptr[i].value())
self.results.append(row)
cliodb.query(db.db_ptr, self.query_string, ROW_CALLBACK(row_cb))
return self.results
|
11464253
|
import os
from locust import TaskSet, task, HttpUser, between
QUIET_MODE = True if os.getenv("QUIET_MODE", "true").lower() in ['1', 'true', 'yes'] else False
TASK_DELAY_FROM = int(os.getenv("TASK_DELAY", "5"))
TASK_DELAY_TO = int(os.getenv("TASK_DELAY", "30"))
def log(message):
if not QUIET_MODE:
print(message)
class TestBehaviour(TaskSet):
@task
def task1(self):
log("running task1")
self.client.get("/")
class TestUser(HttpUser):
tasks = [TestBehaviour]
# wait between 5 and 30 seconds
wait_time = between(TASK_DELAY_FROM, TASK_DELAY_TO)
|
11464277
|
from relogic.logickit.dataflow import DataFlow, Example, Feature, MiniBatch
from typing import List, Dict, Tuple
import torch
from relogic.logickit.utils import create_tensor
from relogic.logickit.tokenizer.tokenization import BertTokenizer
class DependencyParsingExample(Example):
"""DependencyParsingExample
"""
def __init__(self, text, arcs=None, labels=None, lang=None):
super(DependencyParsingExample, self).__init__()
self.text = text
self.raw_tokens = text.split()
self.arcs = arcs
self.labels = labels
self.lang = lang
self.label_padding = "X"
def process(self, tokenizers: Dict, *inputs, **kwargs):
for tokenizer in tokenizers.values():
if isinstance(tokenizer, BertTokenizer):
self.text_tokens, self.text_is_head = tokenizer.tokenize(self.text)
self.tokens = ["[CLS]"] + self.text_tokens + ["[SEP]"]
self.segment_ids = [0] * (len(self.tokens))
self.is_head = [2] + self.text_is_head + [2]
self.head_index = [idx for idx, value in enumerate(self.is_head) if value == 1]
self.input_ids = tokenizer.convert_tokens_to_ids(self.tokens)
self.input_mask = [1] * len(self.input_ids)
if self.arcs is not None and self.labels is not None:
label_mapping = kwargs.get("label_mapping")
self.label_padding_id = label_mapping[self.label_padding]
self.label_ids = [self.label_padding_id] * len(self.input_ids)
self.arcs_ids = [-1] * len(self.input_ids)
assert(len(self.labels) == len(self.arcs))
for idx, label, arc in zip(self.head_index, self.labels, self.arcs):
self.label_ids[idx] = label_mapping[label]
self.arcs_ids[idx] = arc
else:
self.label_ids = None
self.arcs_ids = None
if self.lang is not None:
language_name2id = kwargs.get("language_name2id")
if language_name2id is not None:
self.lang_id = language_name2id[self.lang]
@classmethod
def from_structure(cls, structure):
return cls(text=structure.text)
@classmethod
def from_json(cls, example):
return cls(text=" ".join(example["tokens"]),
arcs=example.get("arcs", None),
labels=example.get("labels", None),
lang=example.get("lang", None))
@property
def len(self):
return len(self.input_ids)
class DependencyParsingFeature(Feature):
"""
Sequence Features
"""
def __init__(self, *inputs, **kwargs):
super().__init__()
self.input_ids = kwargs.pop("input_ids")
self.input_mask = kwargs.pop("input_mask")
self.segment_ids = kwargs.pop("segment_ids")
self.is_head = kwargs.pop("is_head")
self.arcs_ids = kwargs.pop("arcs_ids")
self.label_ids = kwargs.pop("label_ids")
self.lang_ids = kwargs.pop("lang_ids", None)
class DependencyParsingMiniBatch(MiniBatch):
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def generate_input(self, device, use_label):
inputs = {}
inputs["task_name"] = self.task_name
inputs["input_ids"] = create_tensor(self.input_features, "input_ids", torch.long, device)
inputs["input_mask"] = create_tensor(self.input_features, "input_mask", torch.long, device)
inputs["segment_ids"] = create_tensor(self.input_features, "segment_ids", torch.long, device)
inputs["input_head"] = create_tensor(self.input_features, "is_head", torch.long, device)
if use_label:
label_ids = create_tensor(self.input_features, "label_ids", torch.long, device)
inputs["label_ids"] = label_ids
arcs_ids = create_tensor(self.input_features, "arcs_ids", torch.long, device)
inputs["arcs_ids"] = arcs_ids
else:
inputs["label_ids"] = None
inputs["arcs_ids"] = None
inputs["lang_ids"] = create_tensor(self.input_features, "lang_ids",
torch.long, device)
inputs["extra_args"] = {}
return inputs
class DependencyParsingDataFlow(DataFlow):
def __init__(self, config, task_name, tokenizers, label_mapping):
super().__init__(config, task_name, tokenizers, label_mapping)
self._inv_label_mapping = {v: k for k, v in label_mapping.items()}
@property
def example_class(self):
return DependencyParsingExample
@property
def minibatch_class(self):
return DependencyParsingMiniBatch
def process_example(self, example: DependencyParsingExample):
example.process(tokenizers=self.tokenizers,
label_mapping=self.label_mapping,
language_name2id=None)
def convert_examples_to_features(self, examples: List[DependencyParsingExample]):
examples: List[DependencyParsingExample]
features = []
max_token_length = max([example.len for example in examples])
for idx, example in enumerate(examples):
padding = [0] * (max_token_length - example.len)
input_ids = example.input_ids + padding
input_mask = example.input_mask + padding
segment_ids = example.segment_ids + padding
is_head = example.is_head + [2] * (max_token_length - example.len)
if example.label_ids is not None and example.arcs_ids is not None:
label_ids = example.label_ids + [example.label_padding_id] * (max_token_length - example.len)
arcs_ids = example.arcs_ids + [-1] * (max_token_length - example.len)
else:
label_ids = None
arcs_ids = None
features.append(DependencyParsingFeature(
input_ids=input_ids,
input_mask=input_mask,
is_head=is_head,
segment_ids=segment_ids,
label_ids=label_ids,
arcs_ids=arcs_ids))
return features
|
11464291
|
import os
import sys
import subprocess
import filecmp
testfile = '/home/rcarley/toe/hls/toe/toe_prj/solution2/ctest.tcl'
number_of_tests = 5
projectname = 'toe_prj'
##projectname = '/home/rcarley/toe/hls/toe/toe_prj/'
vivado_path = "vivado_hls"
# infile = "in_toe.dat"
# outfile = "out_toe.dat"
curr_path = os.getcwd()
##curr_dir = os.getcwd().split(os.sep)[-1]
dir = curr_path+'/'
io_dir = '/home/rcarley/toe/tools/tcpPacketGenerate/'
# /home/rcarley/toe/tools/tcpPacketGenerate/in_ooo1.dat /home/rcarley/toe/tools/tcpPacketGenerate/rx_ooo1.dat /home/rcarley/toe/tools/tcpPacketGenerate/tx_ooo1.dat
def run_ctest(testNumber,ooo):
testNumber = str(testNumber)
retcode = 0;
# if os.path.exists(testNumber+'.out'):
# os.remove(testNumber+'.out')
# testfile = dir+projectname+'/solution2/ctest.tcl'
f = open(testfile, 'w')
f.write('open_project '+projectname+'\n')
f.write('open_solution "solution2"\n')
# set_part {xc7vx690tffg1761-2}
# create_clock -period 6.4 -name default
# set_clock_uncertainty 0.83
if (ooo == 0):
stringOOO = ""
sys.stdout.write("Running in-order test \""+testNumber+"\" \n")
else:
stringOOO = "_ooo"
sys.stdout.write("Running OOO test \""+testNumber+"\" \n")
sys.stdout.flush()
inputFileName =io_dir+"in"+stringOOO+testNumber+".dat "
rxOutputFileName =io_dir+"rx"+stringOOO+testNumber+".dat "
txOutputFileName =io_dir+"tx"+stringOOO+testNumber+".dat "
goldRxFileName =io_dir+"rx"+stringOOO+testNumber+".gold "
goldTxFileName =io_dir+"tx"+stringOOO+testNumber+".gold "
# f.write('csim_design -argv {'+io_dir+'in'+stringOOO+testNumber+'.dat '+io_dir+'rx'+stringOOO+testNumber+'.dat '+io_dir+'tx'+stringOOO+testNumber+'.dat '+io_dir+'rx'+stringOOO+testNumber+'.gold} -clean\n')
f.write('csim_design -argv {'+ inputFileName + rxOutputFileName + txOutputFileName + goldRxFileName +'} -clean\n')
f.write('exit\n')
f.close()
DEVNULL = open('test.log', 'a')
# subprocess.call([vivado_path, "-f", testfile], stdout=DEVNULL, stderr=subprocess.STDOUT)
# print('csim_design -argv {'+ inputFileName + rxOutputFileName + txOutputFileName + goldRxFileName +'} -clean\n')
# Uncomment this:::
retcode = subprocess.call([vivado_path, "-f", testfile], stdout=DEVNULL, stderr=subprocess.STDOUT)
DEVNULL.close()
if(retcode == 0):
# print("Test Successful")
# else:
# print("Test Unsuccessful")
print '\033[32m[Test Successful]\033[0m'
else:
print '\033[31m[Test Unsuccessful]\033[0m'
# if os.path.exists(testname+'.out'):
# diff = filecmp.cmp(testname+'.out', testname+'.gold')
# else:
# diff = False
# for i in range(len(testname), 50):
# sys.stdout.write(' ')
# if diff:
# print '\033[32m[PASSED]\033[0m'
# else:
# print '\033[31m[FAILED]\033[0m'
# def test_dir(dir, projectname):
# print "Testing project \""+dir+"\"."
# for filename in os.listdir(dir):
# if (len(filename) > 5) and (filename[-5:] == ".gold"):
# #print "valid gold file: "+filename
# if os.path.exists(filename[:-5]+".in"):
# run_ctest(filename[:-5], dir+'/', projectname)
def main():
print ('Testing TOE Project')
# run_ctest(0) # run base test
for testNumber in range(5,(number_of_tests+1)):
run_ctest(testNumber,0)
run_ctest(testNumber,1)
# for filename in os.listdir(dir):
# if filename[] and (filename[-5:] == ".gold"):
# #print "valid gold file: "+filename
# if os.path.exists(filename[:-5]+".in"):
# run_ctest(filename[:-5], dir+'/', projectname)
# valid_dir = False
# for filename in os.listdir(os.getcwd()):
# if filename == curr_dir+"_proj":
# valid_dir = True
# test_dir(os.getcwd(), filename)
# break
# if not valid_dir:
# print "The directory \""+curr_dir+"\" is not valid"
# sys.exit()
#subprocess.call([vivado_path, "-f", solution_dir+"blub.tcl", "-tclargs", "blubl.in"])
if __name__ == '__main__':main()
|
11464308
|
import requests
import time
from datetime import datetime,timedelta
import dateutil.parser
class GetTopPostTime():
def set_info(self,parameters):
"""
Set the info about a parameter
Args:
self: (todo): write your description
parameters: (dict): write your description
"""
self.access_token = parameters.get("access_token","")#add your access token
self.no_of_days_from_now = parameters.get("no_of_days",7)
self.since = parameters.get("since",str((datetime.now()-timedelta(days=self.no_of_days_from_now)).date()))
self.upto = parameters.get("upto",str(datetime.now().date()-timedelta(1)))
self.page_id = parameters.get("page_id",'508160905939091')
self.like_score = parameters.get('like_score', 2)
self.comment_score = parameters.get('comment_score',3)
self.share_score = parameters.get('share_score',4)
self.reaction_score = parameters.get('reaction_score',1)
self.max_post_score = 2000000000
def get_all_posts_between_range(self):
"""
Get all posts in the blog.
Args:
self: (todo): write your description
"""
output_format="%Y-%m-%d"
result = requests.get("https://graph.facebook.com/"+str(self.page_id)+"/feed?&access_token="+self.access_token+"&limit=100")
posts = []
no_of_iterations = 4
loop_break = False
while no_of_iterations > 0:
if result.status_code == 200 and "data" in result.json() and result.json()["data"]:
for single_post in result.json()["data"]:
created_date = dateutil.parser.parse(single_post["created_time"],fuzzy=True).strftime(output_format)
if created_date>self.upto:
continue
elif created_date>=self.since and created_date<=self.upto:
posts.append(single_post)
else:
loop_break = True
break
if loop_break:
break
else:
if "paging" in result.json() and "next" in result.json()["paging"]:
result = requests.get(result.json()["paging"]["next"])
else:
break
no_of_iterations-=1
return posts
def get_stats_for_each_post(self,result_posts):
"""
Obtain the stats for a list of posts.
Args:
self: (todo): write your description
result_posts: (str): write your description
"""
max_count,time = 0,0
top_posts,top_posts_ids = [],[]
index = 0
for each_post in result_posts:
count = 0
result = requests.get("https://graph.facebook.com/"+str(each_post["id"])+"/insights/post_impressions?&access_token="+self.access_token)
if result.status_code == 200 and "data" in result.json() and result.json()["data"]:
count+=result.json()["data"][0]["values"][0]["value"]
result = requests.get("https://graph.facebook.com/"+str(each_post["id"])+"/insights/post_engagements?&access_token="+self.access_token)
if result.status_code == 200 and "data" in result.json() and result.json()["data"]:
count+=result.json()["data"][0]["values"][0]["value"]
result = requests.get("https://graph.facebook.com/"+str(each_post["id"])+"/insights/post_consumptions_by_type_unique?access_token="+self.access_token)
if result.status_code == 200 and "data" in result.json() and result.json()["data"]:
if "other clicks" in result.json()["data"][0]["values"][0]["value"]:
count+=result.json()["data"][0]["values"][0]["value"]["other clicks"]
if "link clicks" in result.json()["data"][0]["values"][0]["value"]:
count+=result.json()["data"][0]["values"][0]["value"]["link clicks"]
top_posts.append((count,index))
top_posts_ids.append(each_post["id"])
index+=1
if count>max_count:
max_count = count
time = each_post["created_time"]
output = {}
time_format = "%H:%M:%S"
output["time"] = k=dateutil.parser.parse(time,fuzzy=True).strftime(time_format)
days = {0:"munday",1:"tuesday",2:"wednesday",3:"thursday",4:"friday",5:"saturday",6:"sunday"}
date_format = "%Y-%m-%d"
date = dateutil.parser.parse(time,fuzzy=True).strftime(date_format)
date = date.split("-")
output["day"] = days[datetime(int(date[0]),int(date[1]),int(date[2])).weekday()]
top_posts = sorted(top_posts)[-5:]
final_result = []
for each_post in top_posts:
final_result.append(top_posts_ids[each_post[1]])
result = self.get_full_details_of_ids(final_result)
output["top_posts"] = result
return output
def get_full_details_of_ids(self,ids):
"""
A method to get details about all posts.
Args:
self: (todo): write your description
ids: (list): write your description
"""
top_posts = []
for post_id in ids:
post_dictionary = {}
post = requests.get("https://graph.facebook.com/"+str(post_id)+"/?fields=id,message,description,picture,created_time,type,attachments,link,permalink_url&access_token="+self.access_token).json()
post_dictionary['page_id'] = self.page_id
post_dictionary['page_name'] = self.page_id
post_dictionary['page_url'] = 'https://www.facebook.com/'+str(self.page_id)
post_dictionary['post_id'] = post_id
post_dictionary['post_title'] = post['message'] if 'message' in post.keys() else "<No Message>"
post_dictionary['post_description'] = post['description'] if 'description' in post.keys() else ""
if 'attachments' in post.keys() and 'data' in post['attachments'] and post['attachments']['data'] and 'media' in post['attachments']['data'][0]:
post_dictionary['post_image_url'] = post['attachments']['data'][0]['media']['image']['src']
elif 'picture' in post.keys():
post_dictionary['post_image_url'] = post['picture']
else:
post_dictionary['post_image_url'] = ""
if post_dictionary['post_title'] == "<No Message>" and 'attachments' in post.keys() and 'data' in post['attachments'] and post['attachments']['data'] and 'title' in post['attachments']['data'][0]:
post_dictionary['post_title'] = post['attachments']['data'][0]['title']
post_dictionary['score'] = 0
#post_dictionary['post_url'] = 'https://www.facebook.com/'+post['id']
post_dictionary['post_url'] = post['link'] if 'link' in post else post['permalink_url'] #copying 3rd party link if not exists copying FB permalink
#post_dictionary['source_url'] = urlparse(post['link']).netloc if 'link' in post else "" #we were storing only domain previously
post_dictionary['source_url'] = post['permalink_url']
post_dictionary['post_type'] = post['type']
post_dictionary['FB_post_id'] = post_dictionary['post_id']
post_dictionary['normalised_score'] = float('%.10f'%(post_dictionary['score']/self.max_post_score *100)) # Take upto 10 decimals
top_posts.append(post_dictionary)
return top_posts
def get_result(self):
"""
Returns a list of all posts.
Args:
self: (todo): write your description
"""
result = self.get_all_posts_between_range()
top_time = self.get_stats_for_each_post(result)
return top_time
if __name__=="__main__":
parameters = {
}
obj = GetTopPostTime()
obj.set_info(parameters)
result = obj.get_result()
print(result)
|
11464344
|
import logging
import os
import numpy as np
bigrams_pool = []
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'bigrams_new.txt'), 'r') as f:
for line in f:
bigrams_pool.append(line.split())
def build_phoc(words,
phoc_unigrams,
unigram_levels,
bigram_levels=None,
phoc_bigrams=None,
split_character=None,
on_unknown_unigram='error'):
"""Calculate Pyramidal Histogram of Characters (PHOC) descriptor (see
Almazan 2014).
Args:
word (str): word to calculate descriptor for
phoc_unigrams (str): string of all unigrams to use in the PHOC
unigram_levels (list of int): the levels for the unigrams in PHOC
phoc_bigrams (list of str): list of bigrams to be used in the PHOC
phoc_bigram_levls (list of int): the levels of the bigrams in the PHOC
split_character (str): special character to split the word strings into characters
on_unknown_unigram (str): What to do if a unigram appearing in a word
is not among the supplied phoc_unigrams. Possible: 'warn', 'error'
Returns:
the PHOC for the given word
"""
# prepare output matrix
logger = logging.getLogger('PHOCGenerator')
if on_unknown_unigram not in ['error', 'warn']:
raise ValueError('I don\'t know the on_unknown_unigram parameter \'%s\'' % on_unknown_unigram)
phoc_size = len(phoc_unigrams) * np.sum(unigram_levels)
if phoc_bigrams is not None:
phoc_size += len(phoc_bigrams) * np.sum(bigram_levels)
phocs = np.zeros((len(words), phoc_size))
# prepare some lambda functions
# occupancy = lambda k, n: [float(k) / n, float(k + 1) / n]
def occupancy(k, n):
return [float(k) / n, float(k + 1) / n]
# overlap = lambda a, b: [max(a[0], b[0]), min(a[1], b[1])]
def overlap(a, b):
return [max(a[0], b[0]), min(a[1], b[1])]
# size = lambda region: region[1] - region[0]
def size(region):
return region[1] - region[0]
# map from character to alphabet position
char_indices = {d: i for i, d in enumerate(phoc_unigrams)}
# iterate through all the words
for word_index, word in enumerate(words):
if split_character is not None:
word = word.split(split_character)
n = len(word)
for index, char in enumerate(word):
word = word.replace(' ', '').replace('-', '')
char_occ = occupancy(index, n)
if char not in char_indices:
if on_unknown_unigram == 'warn':
# logger.warning('The unigram \'%s\' is unknown, skipping this character', char)
continue
else:
print('The unigram \'%s\' is unknown' % char)
logger.fatal('The unigram \'%s\' is unknown', char)
raise ValueError()
char_index = char_indices[char]
for level in unigram_levels:
for region in range(level):
region_occ = occupancy(region, level)
if size(overlap(char_occ, region_occ)) / size(char_occ) >= 0.5:
feat_vec_index = sum([l for l in unigram_levels if l < level
]) * len(phoc_unigrams) + region * len(phoc_unigrams) + char_index
phocs[word_index, feat_vec_index] = 1
# add bigrams
if phoc_bigrams is not None:
ngram_features = np.zeros(len(phoc_bigrams) * np.sum(bigram_levels))
# ngram_occupancy = lambda k, n: [float(k) / n, float(k + 2) / n]
def ngram_occupancy(k, n):
return [float(k) / n, float(k + 2) / n]
for i in range(n - 1):
ngram = word[i:i + 2]
phoc_dict = {k: v for v, k in enumerate(phoc_bigrams)}
if phoc_dict.get(ngram, 666) == 666:
continue
occ = ngram_occupancy(i, n)
for level in bigram_levels:
for region in range(level):
region_occ = occupancy(region, level)
overlap_size = size(overlap(occ, region_occ)) / size(occ)
if overlap_size >= 0.5:
ngram_features[region * len(phoc_bigrams) + phoc_dict[ngram]] = 1
phocs[word_index, -ngram_features.shape[0]:] = ngram_features
return phocs
def phoc(raw_word):
'''
:param raw_word: string of word to be converted
:return: phoc representation as a np.array (1,604)
'''
word = [raw_word]
word_lowercase = word[0].lower()
word = [word_lowercase]
phoc_unigrams = 'abcdefghijklmnopqrstuvwxyz0123456789'
unigram_levels = [2, 3, 4, 5]
bigram_levels = []
bigram_levels.append(2)
phoc_bigrams = []
i = 0
for a in bigrams_pool:
phoc_bigrams.append(a[0].lower())
# phoc_bigrams.append(list(a[0])[0])
# phoc_bigrams.append(list(a[0])[1])
i = i + 1
if i >= 50:
break
qry_phocs = build_phoc(
words=word,
phoc_unigrams=phoc_unigrams,
unigram_levels=unigram_levels,
bigram_levels=bigram_levels,
phoc_bigrams=phoc_bigrams,
on_unknown_unigram='warn')
return qry_phocs
|
11464349
|
import math
import numpy as np
import copy
import random
from fireplace.exceptions import GameOver, InvalidAction
EPS = 1e-8
class MCTS():
"""
This class handles the MCTS tree.
"""
def __init__(self, game, nnet, args):
self.game = game
self.nnet = nnet
self.args = args
self.Qsa = {} # stores Q values for s,a (as defined in the paper)
self.Nsa = {} # stores #times edge s,a was visited
self.Ns = {} # stores #times board s was visited
self.Ps = {} # stores initial policy (returned by neural net)
self.Es = {} # stores game.getGameEnded ended for board s
self.Vs = {} # stores game.getValidMoves for board s
def getActionProb(self, state, temp=1):
"""
This function performs numMCTSSims simulations of MCTS starting from
the state.
Returns:
probs: a policy vector where the probability of the ith action is
proportional to Nsa[(s,a)]**(1./temp)
"""
for i in range(self.args.numMCTSSims):
self.search(state, create_copy=True)
s = self.game.stringRepresentation(state)
counts = [self.Nsa[(s,(a,b))] if (s,(a,b)) in self.Nsa else 0 for a in range(21) for b in range(18)]
if temp==0:
bestA = np.argmax(counts)
probs = [0]*len(counts)
probs[bestA]=1
return probs
counts = [x**(1./temp) for x in counts]
probs = [x/float(sum(counts)) for x in counts]
return probs
def cloneAndRandomize(self, game):
""" Create a deep clone of this game state, randomizing any information not visible to the specified observer player.
"""
game_copy = copy.deepcopy(game)
enemy = game_copy.current_player.opponent
random.shuffle(enemy.hand)
random.shuffle(enemy.deck)
# for idx, card in enumerate(enemy.hand):
# if card.id == 'GAME_005':
# coin = enemy.hand.pop(idx)
#
# combined = enemy.hand + enemy.deck
# random.shuffle(combined)
# enemy.hand, enemy.deck = combined[:len(enemy.hand)], combined[len(enemy.hand):]
# enemy.hand.append(coin)
return game_copy
def search(self, state, create_copy):
"""
NEEDS TO RUN ON DEEPCOPY!!!
This function performs one iteration of MCTS. It is recursively called
till a leaf node is found. The action chosen at each node is one that
has the maximum upper confidence bound as in the paper.
Once a leaf node is found, the neural network is called to return an
initial policy P and a value v for the state. This value is propogated
up the search path. In case the leaf node is a terminal state, the
outcome is propogated up the search path. The values of Ns, Nsa, Qsa are
updated.
NOTE: the return values are the negative of the value of the current
state. This is done since v is in [-1,1] and if v is the value of a
state for the current player, then its value is -v for the other player.
Returns:
v: the negative of the value of the current state
"""
if create_copy:
self.game_copy = self.cloneAndRandomize(self.game.b.game)
s = self.game.stringRepresentation(state)
if s not in self.Es:
self.Es[s] = self.game.getGameEnded(self.game_copy)
if self.game_copy.ended or self.game_copy.turn > 180:
# terminal node
return -self.Es[s]
if s not in self.Ps:
# leaf node
self.Ps[s], v = self.nnet.predict(state)
valids = self.game.getValidMoves(self.game_copy)
self.Ps[s] = self.Ps[s]*valids # masking invalid moves
sum_Ps_s = np.sum(self.Ps[s])
if sum_Ps_s > 0:
self.Ps[s] /= sum_Ps_s # renormalize
else:
# if all valid moves were masked make all valid moves equally probable
# NB! All valid moves may be masked if either your NNet architecture is insufficient or you've get overfitting or something else.
# If you have got dozens or hundreds of these messages you should pay attention to your NNet and/or training process.
print("All valid moves were masked, do workaround.")
self.Ps[s] = self.Ps[s] + valids
self.Ps[s] /= np.sum(self.Ps[s])
self.Vs[s] = valids
self.Ns[s] = 0
return -v
valids = self.Vs[s]
cur_best = -float('inf')
best_act = -1
# pick the action with the highest upper confidence bound
for a in range(21):
for b in range(18):
if valids[a,b]:
if (s,(a,b)) in self.Qsa:
u = self.Qsa[(s,(a,b))] + self.args.cpuct*self.Ps[s][a,b]*math.sqrt(self.Ns[s])/(1+self.Nsa[(s,(a,b))])
else:
u = self.args.cpuct*self.Ps[s][a,b]*math.sqrt(self.Ns[s] + EPS) # Q = 0 ?
if u > cur_best:
cur_best = u
best_act = (a,b)
a = best_act
next_s, next_player = self.game.getNextState(1, a, self.game_copy)
next_s = self.game.getState(self.game_copy)
if not self.game_copy.ended:
v = self.search(next_s, create_copy=False) #call recursively
else:
v = -self.Es[s]
if (s,a) in self.Qsa:
self.Qsa[(s,a)] = (self.Nsa[(s,a)]*self.Qsa[(s,a)] + v)/(self.Nsa[(s,a)]+1)
self.Nsa[(s,a)] += 1
else:
self.Qsa[(s,a)] = v
self.Nsa[(s,a)] = 1
self.Ns[s] += 1
return -v
|
11464422
|
import contextlib
import io
import time
import asyncio
import nest_asyncio
from logging import StreamHandler
from aiohttp import ClientSession
nest_asyncio.apply()
DEFAULT_PAYLOAD = {"disable_web_page_preview": True, "parse_mode": "Markdown"}
class TelegramLogHandler(StreamHandler):
"""
Handler to send logs to telegram chats.
Parameters:
token: a telegram bot token to interact with telegram API.
log_chat_id: chat id of chat to which logs are to be send.
update_interval: interval between two posting in seconds.
lower intervals will lead to floodwaits.
recommended to use greater than 5 sec
minimum_lines: minimum number of new lines required to post / edit a message.
pending_logs: maximum number of letters for pending logs to send as file.
default to 200000. usefull for apps producing lengthy logs withing few minutes.
"""
def __init__(
self,
token: str,
log_chat_id: int,
update_interval: int = 5,
minimum_lines: int = 1,
pending_logs: int = 200000,
):
StreamHandler.__init__(self)
self.loop = asyncio.get_event_loop()
self.token = token
self.log_chat_id = log_chat_id
self.wait_time = update_interval
self.minimum = minimum_lines
self.pending = pending_logs
self.messages = ""
self.current_msg = ""
self.floodwait = 0
self.message_id = 0
self.lines = 0
self.last_update = 0
self.base_url = f"https://api.telegram.org/bot{token}"
DEFAULT_PAYLOAD.update({"chat_id": log_chat_id})
def emit(self, record):
msg = self.format(record)
self.lines += 1
self.messages += f"{msg}\n"
diff = time.time() - self.last_update
if diff >= max(self.wait_time, self.floodwait) and self.lines >= self.minimum:
if self.floodwait:
self.floodwait = 0
self.loop.run_until_complete(self.handle_logs())
self.lines = 0
self.last_update = time.time()
async def handle_logs(self):
if len(self.messages) > self.pending:
_msg = self.messages
msg = _msg.rsplit("\n", 1)[0]
if not msg:
msg = _msg
self.current_msg = ""
self.message_id = 0
self.messages = self.messages[len(msg) :]
await self.send_as_file(msg) # sending as document
return
_message = self.messages[:4050] # taking first 4050 characters
msg = _message.rsplit("\n", 1)[0]
if not msg:
msg = _message
letter_count = len(msg)
# removing these messages from the list
self.messages = self.messages[letter_count:]
if not self.message_id:
uname, is_alive = await self.verify_bot()
if not is_alive:
print("TGLogger: [ERROR] - Invalid bot token provided.")
await self.initialise() # Initializing by sending a message
await self.edit_message(f"Logging started by @{uname}")
computed_message = self.current_msg + msg
if len(computed_message) > 4050:
_to_edit = computed_message[:4050]
to_edit = _to_edit.rsplit("\n", 1)[0]
if not to_edit:
to_edit = _to_edit # incase of lengthy lines
to_new = computed_message[len(to_edit) :]
if to_edit != self.current_msg:
await self.edit_message(to_edit)
self.current_msg = to_new
await self.send_message(to_new)
else:
await self.edit_message(computed_message)
self.current_msg = computed_message
async def send_request(self, url, payload):
async with ClientSession() as session:
async with session.request("POST", url, json=payload) as response:
e = await response.json()
return e
async def verify_bot(self):
res = await self.send_request(f"{self.base_url}/getMe", {})
if res.get("error_code") == 401 and res.get("description") == "Unauthorized":
return None, False
elif res.get("result").get("username"):
return res.get("result").get("username"), True
async def initialise(self):
payload = DEFAULT_PAYLOAD.copy()
payload["text"] = "```Initializing```"
url = f"{self.base_url}/sendMessage"
res = await self.send_request(url, payload)
if res.get("ok"):
result = res.get("result")
self.message_id = result.get("message_id")
else:
await self.handle_error(res)
async def send_message(self, message):
payload = DEFAULT_PAYLOAD.copy()
payload["text"] = f"```{message}```"
url = f"{self.base_url}/sendMessage"
res = await self.send_request(url, payload)
if res.get("ok"):
result = res.get("result")
self.message_id = result.get("message_id")
else:
await self.handle_error(res)
async def edit_message(self, message):
payload = DEFAULT_PAYLOAD.copy()
payload["message_id"] = self.message_id
payload["text"] = f"```{message}```"
url = f"{self.base_url}/editMessageText"
res = await self.send_request(url, payload)
if not res.get("ok"):
await self.handle_error(res)
async def send_as_file(self, logs):
file = io.BytesIO(logs.encode())
file.name = "tglogging.logs"
url = f"{self.base_url}/sendDocument"
payload = DEFAULT_PAYLOAD.copy()
payload["caption"] = "Too much logs to send and hence sending as file."
files = {"document": file}
with contextlib.suppress(BaseException):
del payload["disable_web_page_preview"]
async with ClientSession() as session:
async with session.request(
"POST", url, params=payload, data=files
) as response:
res = await response.json()
if res.get("ok"):
print("Logs send as a file since there were too much lines to print.")
else:
await self.handle_error(res)
async def handle_error(self, resp: dict):
error = resp.get("parameters", {})
if not error:
if (
resp.get("error_code") == 401
and resp.get("description") == "Unauthorized"
):
return
print(f"Errors while updating TG logs {resp}")
return
if error.get("retry_after"):
self.floodwait = error.get("retry_after")
print(f'Floodwait of {error.get("retry_after")} and sleeping')
|
11464438
|
import sys
from json import loads
from os import getenv
from os.path import join, dirname
standard_location = join(dirname(__file__), './config.json')
env_var_config_file_path = getenv('TELEGRAM_PI_BOT_CONFIG', standard_location)
try:
input_config_file_path = sys.argv[1] if sys.argv and sys.argv[1] else env_var_config_file_path
except IndexError:
input_config_file_path = env_var_config_file_path
try:
config_file = open(input_config_file_path, 'r', encoding='utf-8')
except FileNotFoundError:
print('A config.json file needs to be provided')
exit(1)
content = loads(config_file.read())
def get(key):
return content[key]
|
11464479
|
from .PBXResolver import *
from .PBX_Constants import *
class PBX_Base(object):
def __init__(self, lookup_func, dictionary, project, identifier):
# default 'name' property of a PBX object is the type
self.name = self.__class__.__name__;
# this is the identifier for this object
self.identifier = str(identifier);
# set of any referenced identifiers on this object
self.referencedIdentifiers = set();
def __attrs(self):
return (self.identifier);
def __repr__(self):
return '(%s : %s : %s)' % (type(self), self.name, self.identifier);
def __eq__(self, other):
return isinstance(other, type(self)) and self.identifier == other.identifier;
def __hash__(self):
return hash(self.__attrs());
def resolve(self, type, item_list):
return filter(lambda item: isinstance(item, type), item_list);
def fetchObjectFromProject(self, lookup_func, identifier, project):
find_object = project.objectForIdentifier(identifier);
if find_object == None:
result = lookup_func(project.contents[kPBX_objects][identifier]);
if result[0] == True:
find_object = result[1](lookup_func, project.contents[kPBX_objects][identifier], project, identifier);
project.objects.add(find_object);
return find_object;
def parseProperty(self, prop_name, lookup_func, dictionary, project, is_array):
dict_item = dictionary[prop_name];
if is_array == True:
property_list = [];
for item in dict_item:
self.referencedIdentifiers.add(item);
find_object = self.fetchObjectFromProject(lookup_func, item, project);
property_list.append(find_object);
return property_list;
else:
self.referencedIdentifiers.add(dict_item);
return self.fetchObjectFromProject(lookup_func, dict_item, project);
|
11464487
|
from datetime import datetime, timedelta
import re
from ebedke.utils.date import on_workdays, days_lower
from ebedke.utils.http import get_dom
from ebedke.pluginmanager import EbedkePlugin
URL = "http://www.monks.hu/etlap"
@on_workdays
def get_menu(today):
dom = get_dom(URL)
week_date = dom.xpath("/html/body//div//li//a[contains(text(), 'MENÜ')]/text()")
from_date, to_date = re.split(r" |-", week_date.pop())[-2:]
from_date = datetime.strptime(f"{today.year}.{from_date}", "%Y.%m.%d.")
to_date = datetime.strptime(f"{today.year}.{to_date}", "%Y.%m.%d.")
menu = []
if from_date.date() <= today.date() <= to_date.date():
rows = dom.xpath("/html/body//tr[count(td)=2]")
for row in rows:
if days_lower[today.weekday()] in row.text_content().lower():
menu = row.xpath(".//td[2]//text()")
return menu
plugin = EbedkePlugin(
enabled=True,
groups=["ferenciek"],
name="Monks",
id="mo",
url=URL,
downloader=get_menu,
ttl=timedelta(hours=24),
cards=[],
coord=(47.492625, 19.052698)
)
|
11464504
|
import numpy as np
from scipy.stats import norm
from PIL import Image, ImageDraw, ImageFont, ImageMath
from pyray.shapes.twod.paraboloid import *
from pyray.shapes.zerod.pointswarm import *
from pyray.rotation import *
from pyray.imageutils import *
from pyray.axes import *
from pyray.shapes.oned.curve import draw_curve
from pyray.misc import zigzag2
from pyray.global_vars import *
def betafn(alpha,effect,std):
return norm.cdf(-effect+norm.isf(alpha,0,std),0,std)
def draw_axes(draw, base_x=250, base_y=180):
font = ImageFont.truetype(font_loc, 15)
# Draw the axes first.
draw.line((base_x,base_y,base_x,base_y+150),fill=(255,0,0))
draw.line((base_x,base_y+150,base_x+150,base_y+150),fill=(0,255,0))
draw.line((base_x,base_y+150,base_x,base_y+150*2),fill=(255,0,0))
draw.line((base_x,base_y+150,base_x-150,base_y+150),fill=(0,255,0))
draw.text((base_x+150, base_y+150), "FPR", font=font)
draw.text((base_x, base_y), "TPR", font=font)
draw.text((base_x-150, base_y+150), "-FPR", font=font)
draw.text((base_x, base_y+150*2), "-TPR", font=font)
def draw_pt(draw,alpha=0.15865525393145707,effect=50,std=30,
base_x=250, base_y=180):
# Now draw the curve.
beta = betafn(alpha,effect,std)
# Draw the point.
x1 = base_x+(alpha)*150; y1 = base_y+(beta)*150
draw.ellipse((x1-3,y1-3,x1+3,y1+3),outline=(255,255,0),fill=(255,255,0,150))
# The two lines from point to axes.
draw.line((x1,y1,base_x,y1),fill=(0,255,0))
draw.line((x1,y1,x1,base_y+150),fill=(255,0,0))
def draw_main_curve(draw, effect=50, std=30, base_x=250, base_y=180, alpha_mx=1.0):
pt1 = np.array([base_x,base_y+150]); moving_beta=0.0
for alp in np.arange(0.05,alpha_mx+.05,0.05):
moving_beta = betafn(alp,effect,std)
x1 = base_x+alp*150
y1 = base_y+(moving_beta)*150
draw.line((pt1[0],pt1[1],x1,y1))
pt1 = np.array([x1,y1])
def draw_neg_curve(draw, effect=50, std=30, base_x=250, base_y=180, alpha_mx=1.0):
pt1 = np.array([base_x,base_y+150])
moving_beta=0.0
for alp in np.arange(0.05,alpha_mx+.05,0.05):
moving_beta = betafn(alp,effect,std)
x1 = base_x-alp*150
y1 = base_y+150+(1-moving_beta)*150
draw.line((pt1[0],pt1[1],x1,y1),fill="orange",width=3)
pt1 = np.array([x1,y1])
effect = 50
std=30
for i in range(16):
pp = np.sin(i*2*np.pi/30)**2
im = Image.new("RGB", (512,512), "black")
draw = ImageDraw.Draw(im, 'RGBA')
draw_axes(draw)
draw_pt(draw, alpha=pp)
draw_main_curve(draw, effect, std, alpha_mx=pp)
draw_neg_curve(draw, alpha_mx=pp)
im.save(basedir + 'im' + str(i) + '.png')
|
11464506
|
import argparse
import logging
import pathlib
import joblib
import numpy as np
from sklearn.preprocessing import PowerTransformer, StandardScaler, MinMaxScaler
from util import init
def parse_args(run_name):
parser = argparse.ArgumentParser(description=run_name)
parser.add_argument('--scaler', type=str, default='PowerTransformer')
return parser.parse_args()
def dump(target_encoding, file_name):
save_dir = pathlib.Path('../data/07_te')
if not save_dir.exists():
save_dir.mkdir(parents=True)
joblib.dump(target_encoding, save_dir / f'{file_name}.joblib', compress=True)
def target_encode(v_sales, calendar, transform_sales=True, scaler=None):
v_sales = v_sales[['id', 'd', 'sales']].merge(calendar[['d', 'day_of_week']])
target_encoding = v_sales.groupby(['id', 'day_of_week'])['sales'].mean().reset_index()
if transform_sales:
target_encoding = target_encoding.pivot(index='id', columns='day_of_week', values='sales').T.to_dict(orient='list')
sales_transformers = joblib.load('../data/05_preprocess/agg_item/sales_transformers.joblib')
for data_id, scaler in sales_transformers.items():
target_encoding[data_id] = list(scaler.transform(np.array(target_encoding[data_id]).reshape(-1, 1)).reshape(-1))
else:
if scaler == 'StandardScaler':
scaler = StandardScaler()
elif scaler == 'MinMaxScaler':
scaler = MinMaxScaler()
else:
scaler = PowerTransformer()
target_encoding['sales'] = scaler.fit_transform(target_encoding[['sales']])
target_encoding = target_encoding.pivot(index='id', columns='day_of_week', values='sales').T.to_dict(orient='list')
return target_encoding
def main(run_name):
args = parse_args(run_name)
calendar = joblib.load('../data/02_fe/calendar.joblib')
v_sales_agg = joblib.load('../data/04_agg/v_sales_agg.joblib')
dump(target_encode(v_sales_agg, calendar), 'agg_te')
v_sales_item = joblib.load('../data/04_agg/v_sales_each.joblib')
dump(target_encode(v_sales_item, calendar, transform_sales=False, scaler=args.scaler), 'each_te')
if __name__ == "__main__":
run_name = init(__file__)
try:
main(run_name)
except:
logging.exception('exception')
finally:
logging.info('end')
|
11464509
|
from moto.core.exceptions import JsonRESTError
class NotFoundException(JsonRESTError):
code = 400
def __init__(self, message):
super(NotFoundException, self).__init__("NotFoundException", message)
class ValidationException(JsonRESTError):
code = 400
def __init__(self, message):
super(ValidationException, self).__init__("ValidationException", message)
class AlreadyExistsException(JsonRESTError):
code = 400
def __init__(self, message):
super(AlreadyExistsException, self).__init__("AlreadyExistsException", message)
class NotAuthorizedException(JsonRESTError):
code = 400
def __init__(self):
super(NotAuthorizedException, self).__init__("NotAuthorizedException", None)
self.description = '{"__type":"NotAuthorizedException"}'
class AccessDeniedException(JsonRESTError):
code = 400
def __init__(self, message):
super(AccessDeniedException, self).__init__("AccessDeniedException", message)
self.description = '{"__type":"AccessDeniedException"}'
class InvalidCiphertextException(JsonRESTError):
code = 400
def __init__(self):
super(InvalidCiphertextException, self).__init__(
"InvalidCiphertextException", None
)
self.description = '{"__type":"InvalidCiphertextException"}'
|
11464520
|
from core.models import Follow
from core.models import Pin
from django import forms
from datetime import datetime
class PinForm(forms.Form):
item = forms.IntegerField()
influencer = forms.IntegerField()
remove = forms.IntegerField(required=False)
def __init__(self, user, *args, **kwargs):
self.user = user
super(PinForm, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
item = self.cleaned_data['item']
influencer = self.cleaned_data['influencer']
remove = bool(int(self.cleaned_data.get('remove', 0) or 0))
if remove:
now = datetime.now()
pins = Pin.objects.filter(
user=self.user, item=item)
for pin in pins:
pin.deleted_at = now
pin.save()
else:
pin, created = Pin.objects.get_or_create(user=self.user, item_id=item, influencer_id=influencer)
if not created and pin.deleted_at is not None:
pin.deleted_at = None
pin.save()
class FollowForm(forms.Form):
target = forms.IntegerField()
remove = forms.IntegerField(required=False)
def __init__(self, user, *args, **kwargs):
self.user = user
super(FollowForm, self).__init__(*args, **kwargs)
def save(self):
target = self.cleaned_data['target']
remove = bool(int(self.cleaned_data.get('remove', 0) or 0))
if remove:
follows = Follow.objects.filter(user=self.user, target_id=target)
now = datetime.now()
for follow in follows:
follow.deleted_at = now
follow.save()
else:
follow, created = Follow.objects.get_or_create(user=self.user, target_id=target)
if not created and follow.deleted_at is not None:
follow.deleted_at = None
follow.save()
|
11464524
|
from guillotina import error_reasons
from guillotina import logger
from guillotina import response
from guillotina import task_vars
from guillotina._settings import app_settings
from guillotina.browser import View
from guillotina.i18n import default_message_factory as _
from guillotina.interfaces import IRequest
from guillotina.traversal import apply_rendering
from typing import Optional
import asyncio
import traceback
import uuid
class ErrorsMiddleware:
def __init__(self, app):
self.next_app = app
async def __call__(self, scope, receive, send):
headers_sent = False
async def _send(msg):
nonlocal headers_sent
headers_sent = True
await send(msg)
try:
resp = await self.next_app(scope, receive, _send)
except Exception as exc:
request = task_vars.request.get()
view_result = generate_error_response(exc, request=request)
resp = await apply_rendering(View(None, request), request, view_result)
if headers_sent:
# Too late to send status 500, headers already sent
raise
return resp
def generate_error_response(
exc: Exception, request: Optional[IRequest] = None
) -> response.HTTPInternalServerError:
# We may need to check the roles of the users to show the real error
eid = uuid.uuid4().hex
if isinstance(exc, asyncio.CancelledError): # pragma: no cover
message = _("Cancelled execution of view") + " " + eid
logger.warning(message, exc_info=exc, eid=eid, request=request)
else:
message = _("Error on execution of view") + " " + eid
logger.error(message, exc_info=exc, eid=eid, request=request)
data = {
"message": message,
"reason": error_reasons.UNKNOWN.name,
"details": error_reasons.UNKNOWN.details,
"eid": eid,
}
if app_settings.get("debug"):
data["traceback"] = traceback.format_exc()
return response.HTTPInternalServerError(content=data)
|
11464557
|
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Modules import SubtractMean, LCN
from PuzzleLib.Visual import loadImage, showImage
def main():
subtractMean = SubtractMean(size=7)
lcn = LCN(N=7)
img = gpuarray.to_gpu(loadImage("../TestData/Bench.png"))
subtractMean(img)
showImage(subtractMean.data.get(), "../TestData/ResultSubtractNorm.png")
lcn(img)
showImage(lcn.data.get(), "../TestData/ResultLCN.png")
if __name__ == "__main__":
main()
|
11464628
|
from sqlalchemy import Column, String, Integer, ForeignKey, func, Text, Table, Boolean
from models import models
from models.models import Base, datetime_to_string, string_to_datetime, db, optional_encoded_field
class SampleSubmission(Base):
__tablename__ = 'sample_submission'
name = Column(String(255), default="Blank Submission")
status = Column(String(255), default='unknown')
STATUSES = ['unknown', 'passed', 'failed', 'error', 'skipped']
code = Column(Text(), default="")
extra_files = Column(Text(), default="")
score = Column(Integer(), default=0)
correct = Column(Boolean(), default=False)
output = Column(Text(), default="")
inputs = Column(Text(), default="")
feedback = Column(Text(), default="")
forked_id = Column(Integer(), ForeignKey('submission.id'))
forked_version = Column(Integer(), default=0)
owner_id = Column(Integer(), ForeignKey('user.id'))
assignment_id = Column(Integer(), ForeignKey('assignment.id'))
version = Column(Integer(), default=0)
def __str__(self):
return '{} Tag {}'.format(self.kind.title(), self.name)
def encode_json(self, use_owner=True):
return {
'_schema_version': 2,
'name': self.name,
'status': self.status,
'code': self.code,
'extra_files': self.extra_files,
'score': self.score,
'correct': self.correct,
'output': self.output,
'inputs': self.inputs,
'feedback': self.feedback,
'forked_id': self.forked_id,
'forked_version': self.forked_version,
'owner_id': self.owner_id,
'owner_id__email': optional_encoded_field(self.owner_id, use_owner, models.User.query.get, 'email'),
'assignment_id': self.assignment_id,
'version': self.version,
'id': self.id,
'date_modified': datetime_to_string(self.date_modified),
'date_created': datetime_to_string(self.date_created)
}
@staticmethod
def decode_json(data, **kwargs):
if data['_schema_version'] == 1:
data = dict(data) # shallow copy
del data['_schema_version']
del data['owner_id__email']
del data['id']
del data['date_modified']
data['date_created'] = string_to_datetime(data['date_created'])
for key, value in kwargs.items():
data[key] = value
return SampleSubmission(**data)
raise Exception("Unknown schema version: {}".format(data.get('_schema_version', "Unknown")))
@staticmethod
def new(owner_id, course_id, name):
sample_submission = SampleSubmission(owner_id=owner_id, course_id=course_id, name=name)
db.session.add(sample_submission)
db.session.commit()
return sample_submission
@staticmethod
def remove(assignment_tag_id):
SampleSubmission.query.filter_by(id=assignment_tag_id).delete()
db.session.commit()
@staticmethod
def by_assignment(assignment_id):
return (SampleSubmission.query.filter_by(assignment_id=assignment_id)
.order_by(SampleSubmission.name)
.all())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.